1 /* 2 * Core routines and tables shareable across OS platforms. 3 * 4 * Copyright (c) 1994-2002 Justin T. Gibbs. 5 * Copyright (c) 2000-2002 Adaptec Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification. 14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 15 * substantially similar to the "NO WARRANTY" disclaimer below 16 * ("Disclaimer") and any redistribution must be conditioned upon 17 * including a substantially similar Disclaimer requirement for further 18 * binary redistribution. 19 * 3. Neither the names of the above-listed copyright holders nor the names 20 * of any contributors may be used to endorse or promote products derived 21 * from this software without specific prior written permission. 22 * 23 * Alternatively, this software may be distributed under the terms of the 24 * GNU General Public License ("GPL") version 2 as published by the Free 25 * Software Foundation. 26 * 27 * NO WARRANTY 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 36 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 37 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 38 * POSSIBILITY OF SUCH DAMAGES. 39 * 40 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#80 $ 41 * 42 * $FreeBSD$ 43 */ 44 45 #ifdef __linux__ 46 #include "aic7xxx_osm.h" 47 #include "aic7xxx_inline.h" 48 #include "aicasm/aicasm_insformat.h" 49 #else 50 #include <dev/aic7xxx/aic7xxx_osm.h> 51 #include <dev/aic7xxx/aic7xxx_inline.h> 52 #include <dev/aic7xxx/aicasm/aicasm_insformat.h> 53 #endif 54 55 /****************************** Softc Data ************************************/ 56 struct ahc_softc_tailq ahc_tailq = TAILQ_HEAD_INITIALIZER(ahc_tailq); 57 58 /***************************** Lookup Tables **********************************/ 59 char *ahc_chip_names[] = 60 { 61 "NONE", 62 "aic7770", 63 "aic7850", 64 "aic7855", 65 "aic7859", 66 "aic7860", 67 "aic7870", 68 "aic7880", 69 "aic7895", 70 "aic7895C", 71 "aic7890/91", 72 "aic7896/97", 73 "aic7892", 74 "aic7899" 75 }; 76 static const u_int num_chip_names = NUM_ELEMENTS(ahc_chip_names); 77 78 /* 79 * Hardware error codes. 80 */ 81 struct ahc_hard_error_entry { 82 uint8_t errno; 83 char *errmesg; 84 }; 85 86 static struct ahc_hard_error_entry ahc_hard_errors[] = { 87 { ILLHADDR, "Illegal Host Access" }, 88 { ILLSADDR, "Illegal Sequencer Address referrenced" }, 89 { ILLOPCODE, "Illegal Opcode in sequencer program" }, 90 { SQPARERR, "Sequencer Parity Error" }, 91 { DPARERR, "Data-path Parity Error" }, 92 { MPARERR, "Scratch or SCB Memory Parity Error" }, 93 { PCIERRSTAT, "PCI Error detected" }, 94 { CIOPARERR, "CIOBUS Parity Error" }, 95 }; 96 static const u_int num_errors = NUM_ELEMENTS(ahc_hard_errors); 97 98 static struct ahc_phase_table_entry ahc_phase_table[] = 99 { 100 { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, 101 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, 102 { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" }, 103 { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" }, 104 { P_COMMAND, MSG_NOOP, "in Command phase" }, 105 { P_MESGOUT, MSG_NOOP, "in Message-out phase" }, 106 { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" }, 107 { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, 108 { P_BUSFREE, MSG_NOOP, "while idle" }, 109 { 0, MSG_NOOP, "in unknown phase" } 110 }; 111 112 /* 113 * In most cases we only wish to itterate over real phases, so 114 * exclude the last element from the count. 115 */ 116 static const u_int num_phases = NUM_ELEMENTS(ahc_phase_table) - 1; 117 118 /* 119 * Valid SCSIRATE values. (p. 3-17) 120 * Provides a mapping of tranfer periods in ns to the proper value to 121 * stick in the scsixfer reg. 122 */ 123 static struct ahc_syncrate ahc_syncrates[] = 124 { 125 /* ultra2 fast/ultra period rate */ 126 { 0x42, 0x000, 9, "80.0" }, 127 { 0x03, 0x000, 10, "40.0" }, 128 { 0x04, 0x000, 11, "33.0" }, 129 { 0x05, 0x100, 12, "20.0" }, 130 { 0x06, 0x110, 15, "16.0" }, 131 { 0x07, 0x120, 18, "13.4" }, 132 { 0x08, 0x000, 25, "10.0" }, 133 { 0x19, 0x010, 31, "8.0" }, 134 { 0x1a, 0x020, 37, "6.67" }, 135 { 0x1b, 0x030, 43, "5.7" }, 136 { 0x1c, 0x040, 50, "5.0" }, 137 { 0x00, 0x050, 56, "4.4" }, 138 { 0x00, 0x060, 62, "4.0" }, 139 { 0x00, 0x070, 68, "3.6" }, 140 { 0x00, 0x000, 0, NULL } 141 }; 142 143 /* Our Sequencer Program */ 144 #include "aic7xxx_seq.h" 145 146 /**************************** Function Declarations ***************************/ 147 static void ahc_force_renegotiation(struct ahc_softc *ahc); 148 static struct ahc_tmode_tstate* 149 ahc_alloc_tstate(struct ahc_softc *ahc, 150 u_int scsi_id, char channel); 151 #ifdef AHC_TARGET_MODE 152 static void ahc_free_tstate(struct ahc_softc *ahc, 153 u_int scsi_id, char channel, int force); 154 #endif 155 static struct ahc_syncrate* 156 ahc_devlimited_syncrate(struct ahc_softc *ahc, 157 struct ahc_initiator_tinfo *, 158 u_int *period, 159 u_int *ppr_options, 160 role_t role); 161 static void ahc_update_pending_scbs(struct ahc_softc *ahc); 162 static void ahc_fetch_devinfo(struct ahc_softc *ahc, 163 struct ahc_devinfo *devinfo); 164 static void ahc_print_devinfo(struct ahc_softc *ahc, 165 struct ahc_devinfo *devinfo); 166 static void ahc_scb_devinfo(struct ahc_softc *ahc, 167 struct ahc_devinfo *devinfo, 168 struct scb *scb); 169 static void ahc_assert_atn(struct ahc_softc *ahc); 170 static void ahc_setup_initiator_msgout(struct ahc_softc *ahc, 171 struct ahc_devinfo *devinfo, 172 struct scb *scb); 173 static void ahc_build_transfer_msg(struct ahc_softc *ahc, 174 struct ahc_devinfo *devinfo); 175 static void ahc_construct_sdtr(struct ahc_softc *ahc, 176 struct ahc_devinfo *devinfo, 177 u_int period, u_int offset); 178 static void ahc_construct_wdtr(struct ahc_softc *ahc, 179 struct ahc_devinfo *devinfo, 180 u_int bus_width); 181 static void ahc_construct_ppr(struct ahc_softc *ahc, 182 struct ahc_devinfo *devinfo, 183 u_int period, u_int offset, 184 u_int bus_width, u_int ppr_options); 185 static void ahc_clear_msg_state(struct ahc_softc *ahc); 186 static void ahc_handle_message_phase(struct ahc_softc *ahc); 187 typedef enum { 188 AHCMSG_1B, 189 AHCMSG_2B, 190 AHCMSG_EXT 191 } ahc_msgtype; 192 static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, 193 u_int msgval, int full); 194 static int ahc_parse_msg(struct ahc_softc *ahc, 195 struct ahc_devinfo *devinfo); 196 static int ahc_handle_msg_reject(struct ahc_softc *ahc, 197 struct ahc_devinfo *devinfo); 198 static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc, 199 struct ahc_devinfo *devinfo); 200 static void ahc_reinitialize_dataptrs(struct ahc_softc *ahc); 201 static void ahc_handle_devreset(struct ahc_softc *ahc, 202 struct ahc_devinfo *devinfo, 203 cam_status status, char *message, 204 int verbose_level); 205 #if AHC_TARGET_MODE 206 static void ahc_setup_target_msgin(struct ahc_softc *ahc, 207 struct ahc_devinfo *devinfo, 208 struct scb *scb); 209 #endif 210 211 static bus_dmamap_callback_t ahc_dmamap_cb; 212 static void ahc_build_free_scb_list(struct ahc_softc *ahc); 213 static int ahc_init_scbdata(struct ahc_softc *ahc); 214 static void ahc_fini_scbdata(struct ahc_softc *ahc); 215 static void ahc_qinfifo_requeue(struct ahc_softc *ahc, 216 struct scb *prev_scb, 217 struct scb *scb); 218 static int ahc_qinfifo_count(struct ahc_softc *ahc); 219 static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, 220 u_int prev, u_int scbptr); 221 static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc); 222 static u_int ahc_rem_wscb(struct ahc_softc *ahc, 223 u_int scbpos, u_int prev); 224 static void ahc_reset_current_bus(struct ahc_softc *ahc); 225 #ifdef AHC_DUMP_SEQ 226 static void ahc_dumpseq(struct ahc_softc *ahc); 227 #endif 228 static void ahc_loadseq(struct ahc_softc *ahc); 229 static int ahc_check_patch(struct ahc_softc *ahc, 230 struct patch **start_patch, 231 u_int start_instr, u_int *skip_addr); 232 static void ahc_download_instr(struct ahc_softc *ahc, 233 u_int instrptr, uint8_t *dconsts); 234 #ifdef AHC_TARGET_MODE 235 static void ahc_queue_lstate_event(struct ahc_softc *ahc, 236 struct ahc_tmode_lstate *lstate, 237 u_int initiator_id, 238 u_int event_type, 239 u_int event_arg); 240 static void ahc_update_scsiid(struct ahc_softc *ahc, 241 u_int targid_mask); 242 static int ahc_handle_target_cmd(struct ahc_softc *ahc, 243 struct target_cmd *cmd); 244 #endif 245 /************************* Sequencer Execution Control ************************/ 246 /* 247 * Restart the sequencer program from address zero 248 */ 249 void 250 ahc_restart(struct ahc_softc *ahc) 251 { 252 253 ahc_pause(ahc); 254 255 /* No more pending messages. */ 256 ahc_clear_msg_state(ahc); 257 258 ahc_outb(ahc, SCSISIGO, 0); /* De-assert BSY */ 259 ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* No message to send */ 260 ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); 261 ahc_outb(ahc, LASTPHASE, P_BUSFREE); 262 ahc_outb(ahc, SAVED_SCSIID, 0xFF); 263 ahc_outb(ahc, SAVED_LUN, 0xFF); 264 265 /* 266 * Ensure that the sequencer's idea of TQINPOS 267 * matches our own. The sequencer increments TQINPOS 268 * only after it sees a DMA complete and a reset could 269 * occur before the increment leaving the kernel to believe 270 * the command arrived but the sequencer to not. 271 */ 272 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); 273 274 /* Always allow reselection */ 275 ahc_outb(ahc, SCSISEQ, 276 ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); 277 if ((ahc->features & AHC_CMD_CHAN) != 0) { 278 /* Ensure that no DMA operations are in progress */ 279 ahc_outb(ahc, CCSCBCNT, 0); 280 ahc_outb(ahc, CCSGCTL, 0); 281 ahc_outb(ahc, CCSCBCTL, 0); 282 } 283 /* 284 * If we were in the process of DMA'ing SCB data into 285 * an SCB, replace that SCB on the free list. This prevents 286 * an SCB leak. 287 */ 288 if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) { 289 ahc_add_curscb_to_free_list(ahc); 290 ahc_outb(ahc, SEQ_FLAGS2, 291 ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA); 292 } 293 ahc_outb(ahc, MWI_RESIDUAL, 0); 294 ahc_outb(ahc, SEQCTL, FASTMODE); 295 ahc_outb(ahc, SEQADDR0, 0); 296 ahc_outb(ahc, SEQADDR1, 0); 297 ahc_unpause(ahc); 298 } 299 300 /************************* Input/Output Queues ********************************/ 301 void 302 ahc_run_qoutfifo(struct ahc_softc *ahc) 303 { 304 struct scb *scb; 305 u_int scb_index; 306 307 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); 308 while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) { 309 310 scb_index = ahc->qoutfifo[ahc->qoutfifonext]; 311 if ((ahc->qoutfifonext & 0x03) == 0x03) { 312 u_int modnext; 313 314 /* 315 * Clear 32bits of QOUTFIFO at a time 316 * so that we don't clobber an incoming 317 * byte DMA to the array on architectures 318 * that only support 32bit load and store 319 * operations. 320 */ 321 modnext = ahc->qoutfifonext & ~0x3; 322 *((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL; 323 ahc_dmamap_sync(ahc, ahc->shared_data_dmat, 324 ahc->shared_data_dmamap, 325 /*offset*/modnext, /*len*/4, 326 BUS_DMASYNC_PREREAD); 327 } 328 ahc->qoutfifonext++; 329 330 scb = ahc_lookup_scb(ahc, scb_index); 331 if (scb == NULL) { 332 printf("%s: WARNING no command for scb %d " 333 "(cmdcmplt)\nQOUTPOS = %d\n", 334 ahc_name(ahc), scb_index, 335 (ahc->qoutfifonext - 1) & 0xFF); 336 continue; 337 } 338 339 /* 340 * Save off the residual 341 * if there is one. 342 */ 343 ahc_update_residual(ahc, scb); 344 ahc_done(ahc, scb); 345 } 346 } 347 348 void 349 ahc_run_untagged_queues(struct ahc_softc *ahc) 350 { 351 int i; 352 353 for (i = 0; i < 16; i++) 354 ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]); 355 } 356 357 void 358 ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue) 359 { 360 struct scb *scb; 361 362 if (ahc->untagged_queue_lock != 0) 363 return; 364 365 if ((scb = TAILQ_FIRST(queue)) != NULL 366 && (scb->flags & SCB_ACTIVE) == 0) { 367 scb->flags |= SCB_ACTIVE; 368 ahc_queue_scb(ahc, scb); 369 } 370 } 371 372 /************************* Interrupt Handling *********************************/ 373 void 374 ahc_handle_brkadrint(struct ahc_softc *ahc) 375 { 376 /* 377 * We upset the sequencer :-( 378 * Lookup the error message 379 */ 380 int i; 381 int error; 382 383 error = ahc_inb(ahc, ERROR); 384 for (i = 0; error != 1 && i < num_errors; i++) 385 error >>= 1; 386 printf("%s: brkadrint, %s at seqaddr = 0x%x\n", 387 ahc_name(ahc), ahc_hard_errors[i].errmesg, 388 ahc_inb(ahc, SEQADDR0) | 389 (ahc_inb(ahc, SEQADDR1) << 8)); 390 391 ahc_dump_card_state(ahc); 392 393 /* Tell everyone that this HBA is no longer availible */ 394 ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS, 395 CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, 396 CAM_NO_HBA); 397 398 /* Disable all interrupt sources by resetting the controller */ 399 ahc_shutdown(ahc); 400 } 401 402 void 403 ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat) 404 { 405 struct scb *scb; 406 struct ahc_devinfo devinfo; 407 408 ahc_fetch_devinfo(ahc, &devinfo); 409 410 /* 411 * Clear the upper byte that holds SEQINT status 412 * codes and clear the SEQINT bit. We will unpause 413 * the sequencer, if appropriate, after servicing 414 * the request. 415 */ 416 ahc_outb(ahc, CLRINT, CLRSEQINT); 417 switch (intstat & SEQINT_MASK) { 418 case BAD_STATUS: 419 { 420 u_int scb_index; 421 struct hardware_scb *hscb; 422 423 /* 424 * Set the default return value to 0 (don't 425 * send sense). The sense code will change 426 * this if needed. 427 */ 428 ahc_outb(ahc, RETURN_1, 0); 429 430 /* 431 * The sequencer will notify us when a command 432 * has an error that would be of interest to 433 * the kernel. This allows us to leave the sequencer 434 * running in the common case of command completes 435 * without error. The sequencer will already have 436 * dma'd the SCB back up to us, so we can reference 437 * the in kernel copy directly. 438 */ 439 scb_index = ahc_inb(ahc, SCB_TAG); 440 scb = ahc_lookup_scb(ahc, scb_index); 441 if (scb == NULL) { 442 ahc_print_devinfo(ahc, &devinfo); 443 printf("ahc_intr - referenced scb " 444 "not valid during seqint 0x%x scb(%d)\n", 445 intstat, scb_index); 446 ahc_dump_card_state(ahc); 447 panic("for safety"); 448 goto unpause; 449 } 450 451 hscb = scb->hscb; 452 453 /* Don't want to clobber the original sense code */ 454 if ((scb->flags & SCB_SENSE) != 0) { 455 /* 456 * Clear the SCB_SENSE Flag and have 457 * the sequencer do a normal command 458 * complete. 459 */ 460 scb->flags &= ~SCB_SENSE; 461 ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); 462 break; 463 } 464 ahc_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); 465 /* Freeze the queue until the client sees the error. */ 466 ahc_freeze_devq(ahc, scb); 467 ahc_freeze_scb(scb); 468 ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status); 469 switch (hscb->shared_data.status.scsi_status) { 470 case SCSI_STATUS_OK: 471 printf("%s: Interrupted for staus of 0???\n", 472 ahc_name(ahc)); 473 break; 474 case SCSI_STATUS_CMD_TERMINATED: 475 case SCSI_STATUS_CHECK_COND: 476 { 477 struct ahc_dma_seg *sg; 478 struct scsi_sense *sc; 479 struct ahc_initiator_tinfo *targ_info; 480 struct ahc_tmode_tstate *tstate; 481 struct ahc_transinfo *tinfo; 482 #ifdef AHC_DEBUG 483 if (ahc_debug & AHC_SHOW_SENSE) { 484 ahc_print_path(ahc, scb); 485 printf("SCB %d: requests Check Status\n", 486 scb->hscb->tag); 487 } 488 #endif 489 490 if (ahc_perform_autosense(scb) == 0) 491 break; 492 493 targ_info = ahc_fetch_transinfo(ahc, 494 devinfo.channel, 495 devinfo.our_scsiid, 496 devinfo.target, 497 &tstate); 498 tinfo = &targ_info->curr; 499 sg = scb->sg_list; 500 sc = (struct scsi_sense *)(&hscb->shared_data.cdb); 501 /* 502 * Save off the residual if there is one. 503 */ 504 ahc_update_residual(ahc, scb); 505 #ifdef AHC_DEBUG 506 if (ahc_debug & AHC_SHOW_SENSE) { 507 ahc_print_path(ahc, scb); 508 printf("Sending Sense\n"); 509 } 510 #endif 511 sg->addr = ahc_get_sense_bufaddr(ahc, scb); 512 sg->len = ahc_get_sense_bufsize(ahc, scb); 513 sg->len |= AHC_DMA_LAST_SEG; 514 515 /* Fixup byte order */ 516 sg->addr = ahc_htole32(sg->addr); 517 sg->len = ahc_htole32(sg->len); 518 519 sc->opcode = REQUEST_SENSE; 520 sc->byte2 = 0; 521 if (tinfo->protocol_version <= SCSI_REV_2 522 && SCB_GET_LUN(scb) < 8) 523 sc->byte2 = SCB_GET_LUN(scb) << 5; 524 sc->unused[0] = 0; 525 sc->unused[1] = 0; 526 sc->length = sg->len; 527 sc->control = 0; 528 529 /* 530 * We can't allow the target to disconnect. 531 * This will be an untagged transaction and 532 * having the target disconnect will make this 533 * transaction indestinguishable from outstanding 534 * tagged transactions. 535 */ 536 hscb->control = 0; 537 538 /* 539 * This request sense could be because the 540 * the device lost power or in some other 541 * way has lost our transfer negotiations. 542 * Renegotiate if appropriate. Unit attention 543 * errors will be reported before any data 544 * phases occur. 545 */ 546 if (ahc_get_residual(scb) 547 == ahc_get_transfer_length(scb)) { 548 ahc_update_neg_request(ahc, &devinfo, 549 tstate, targ_info, 550 /*force*/TRUE); 551 } 552 if (tstate->auto_negotiate & devinfo.target_mask) { 553 hscb->control |= MK_MESSAGE; 554 scb->flags &= ~SCB_NEGOTIATE; 555 scb->flags |= SCB_AUTO_NEGOTIATE; 556 } 557 hscb->cdb_len = sizeof(*sc); 558 hscb->dataptr = sg->addr; 559 hscb->datacnt = sg->len; 560 hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID; 561 hscb->sgptr = ahc_htole32(hscb->sgptr); 562 scb->sg_count = 1; 563 scb->flags |= SCB_SENSE; 564 ahc_qinfifo_requeue_tail(ahc, scb); 565 ahc_outb(ahc, RETURN_1, SEND_SENSE); 566 #ifdef __FreeBSD__ 567 /* 568 * Ensure we have enough time to actually 569 * retrieve the sense. 570 */ 571 untimeout(ahc_timeout, (caddr_t)scb, 572 scb->io_ctx->ccb_h.timeout_ch); 573 scb->io_ctx->ccb_h.timeout_ch = 574 timeout(ahc_timeout, (caddr_t)scb, 5 * hz); 575 #endif 576 break; 577 } 578 default: 579 break; 580 } 581 break; 582 } 583 case NO_MATCH: 584 { 585 /* Ensure we don't leave the selection hardware on */ 586 ahc_outb(ahc, SCSISEQ, 587 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 588 589 printf("%s:%c:%d: no active SCB for reconnecting " 590 "target - issuing BUS DEVICE RESET\n", 591 ahc_name(ahc), devinfo.channel, devinfo.target); 592 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " 593 "ARG_1 == 0x%x ACCUM = 0x%x\n", 594 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), 595 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); 596 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " 597 "SINDEX == 0x%x\n", 598 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), 599 ahc_index_busy_tcl(ahc, 600 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), 601 ahc_inb(ahc, SAVED_LUN))), 602 ahc_inb(ahc, SINDEX)); 603 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " 604 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", 605 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), 606 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), 607 ahc_inb(ahc, SCB_CONTROL)); 608 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", 609 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); 610 printf("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0)); 611 printf("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL)); 612 ahc_dump_card_state(ahc); 613 ahc->msgout_buf[0] = MSG_BUS_DEV_RESET; 614 ahc->msgout_len = 1; 615 ahc->msgout_index = 0; 616 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 617 ahc_outb(ahc, MSG_OUT, HOST_MSG); 618 ahc_assert_atn(ahc); 619 break; 620 } 621 case SEND_REJECT: 622 { 623 u_int rejbyte = ahc_inb(ahc, ACCUM); 624 printf("%s:%c:%d: Warning - unknown message received from " 625 "target (0x%x). Rejecting\n", 626 ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte); 627 break; 628 } 629 case NO_IDENT: 630 { 631 /* 632 * The reconnecting target either did not send an identify 633 * message, or did, but we didn't find an SCB to match and 634 * before it could respond to our ATN/abort, it hit a dataphase. 635 * The only safe thing to do is to blow it away with a bus 636 * reset. 637 */ 638 int found; 639 640 printf("%s:%c:%d: Target did not send an IDENTIFY message. " 641 "LASTPHASE = 0x%x, SAVED_SCSIID == 0x%x\n", 642 ahc_name(ahc), devinfo.channel, devinfo.target, 643 ahc_inb(ahc, LASTPHASE), ahc_inb(ahc, SAVED_SCSIID)); 644 found = ahc_reset_channel(ahc, devinfo.channel, 645 /*initiate reset*/TRUE); 646 printf("%s: Issued Channel %c Bus Reset. " 647 "%d SCBs aborted\n", ahc_name(ahc), devinfo.channel, 648 found); 649 return; 650 } 651 case IGN_WIDE_RES: 652 ahc_handle_ign_wide_residue(ahc, &devinfo); 653 break; 654 case PDATA_REINIT: 655 ahc_reinitialize_dataptrs(ahc); 656 break; 657 case BAD_PHASE: 658 { 659 u_int lastphase; 660 661 lastphase = ahc_inb(ahc, LASTPHASE); 662 printf("%s:%c:%d: unknown scsi bus phase %x, " 663 "lastphase = 0x%x. Attempting to continue\n", 664 ahc_name(ahc), devinfo.channel, devinfo.target, 665 lastphase, ahc_inb(ahc, SCSISIGI)); 666 break; 667 } 668 case MISSED_BUSFREE: 669 { 670 u_int lastphase; 671 672 lastphase = ahc_inb(ahc, LASTPHASE); 673 printf("%s:%c:%d: Missed busfree. " 674 "Lastphase = 0x%x, Curphase = 0x%x\n", 675 ahc_name(ahc), devinfo.channel, devinfo.target, 676 lastphase, ahc_inb(ahc, SCSISIGI)); 677 ahc_restart(ahc); 678 return; 679 } 680 case HOST_MSG_LOOP: 681 { 682 /* 683 * The sequencer has encountered a message phase 684 * that requires host assistance for completion. 685 * While handling the message phase(s), we will be 686 * notified by the sequencer after each byte is 687 * transfered so we can track bus phase changes. 688 * 689 * If this is the first time we've seen a HOST_MSG_LOOP 690 * interrupt, initialize the state of the host message 691 * loop. 692 */ 693 if (ahc->msg_type == MSG_TYPE_NONE) { 694 struct scb *scb; 695 u_int scb_index; 696 u_int bus_phase; 697 698 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 699 if (bus_phase != P_MESGIN 700 && bus_phase != P_MESGOUT) { 701 printf("ahc_intr: HOST_MSG_LOOP bad " 702 "phase 0x%x\n", 703 bus_phase); 704 /* 705 * Probably transitioned to bus free before 706 * we got here. Just punt the message. 707 */ 708 ahc_clear_intstat(ahc); 709 ahc_restart(ahc); 710 return; 711 } 712 713 scb_index = ahc_inb(ahc, SCB_TAG); 714 scb = ahc_lookup_scb(ahc, scb_index); 715 if (devinfo.role == ROLE_INITIATOR) { 716 if (scb == NULL) 717 panic("HOST_MSG_LOOP with " 718 "invalid SCB %x\n", scb_index); 719 720 if (bus_phase == P_MESGOUT) 721 ahc_setup_initiator_msgout(ahc, 722 &devinfo, 723 scb); 724 else { 725 ahc->msg_type = 726 MSG_TYPE_INITIATOR_MSGIN; 727 ahc->msgin_index = 0; 728 } 729 } 730 #if AHC_TARGET_MODE 731 else { 732 if (bus_phase == P_MESGOUT) { 733 ahc->msg_type = 734 MSG_TYPE_TARGET_MSGOUT; 735 ahc->msgin_index = 0; 736 } 737 else 738 ahc_setup_target_msgin(ahc, 739 &devinfo, 740 scb); 741 } 742 #endif 743 } 744 745 ahc_handle_message_phase(ahc); 746 break; 747 } 748 case PERR_DETECTED: 749 { 750 /* 751 * If we've cleared the parity error interrupt 752 * but the sequencer still believes that SCSIPERR 753 * is true, it must be that the parity error is 754 * for the currently presented byte on the bus, 755 * and we are not in a phase (data-in) where we will 756 * eventually ack this byte. Ack the byte and 757 * throw it away in the hope that the target will 758 * take us to message out to deliver the appropriate 759 * error message. 760 */ 761 if ((intstat & SCSIINT) == 0 762 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) { 763 764 if ((ahc->features & AHC_DT) == 0) { 765 u_int curphase; 766 767 /* 768 * The hardware will only let you ack bytes 769 * if the expected phase in SCSISIGO matches 770 * the current phase. Make sure this is 771 * currently the case. 772 */ 773 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 774 ahc_outb(ahc, LASTPHASE, curphase); 775 ahc_outb(ahc, SCSISIGO, curphase); 776 } 777 ahc_inb(ahc, SCSIDATL); 778 } 779 break; 780 } 781 case DATA_OVERRUN: 782 { 783 /* 784 * When the sequencer detects an overrun, it 785 * places the controller in "BITBUCKET" mode 786 * and allows the target to complete its transfer. 787 * Unfortunately, none of the counters get updated 788 * when the controller is in this mode, so we have 789 * no way of knowing how large the overrun was. 790 */ 791 u_int scbindex = ahc_inb(ahc, SCB_TAG); 792 u_int lastphase = ahc_inb(ahc, LASTPHASE); 793 u_int i; 794 795 scb = ahc_lookup_scb(ahc, scbindex); 796 for (i = 0; i < num_phases; i++) { 797 if (lastphase == ahc_phase_table[i].phase) 798 break; 799 } 800 ahc_print_path(ahc, scb); 801 printf("data overrun detected %s." 802 " Tag == 0x%x.\n", 803 ahc_phase_table[i].phasemsg, 804 scb->hscb->tag); 805 ahc_print_path(ahc, scb); 806 printf("%s seen Data Phase. Length = %ld. NumSGs = %d.\n", 807 ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", 808 ahc_get_transfer_length(scb), scb->sg_count); 809 if (scb->sg_count > 0) { 810 for (i = 0; i < scb->sg_count; i++) { 811 812 printf("sg[%d] - Addr 0x%x%x : Length %d\n", 813 i, 814 (ahc_le32toh(scb->sg_list[i].len) >> 24 815 & SG_HIGH_ADDR_BITS), 816 ahc_le32toh(scb->sg_list[i].addr), 817 ahc_le32toh(scb->sg_list[i].len) 818 & AHC_SG_LEN_MASK); 819 } 820 } 821 /* 822 * Set this and it will take effect when the 823 * target does a command complete. 824 */ 825 ahc_freeze_devq(ahc, scb); 826 if ((scb->flags & SCB_SENSE) == 0) { 827 ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR); 828 } else { 829 scb->flags &= ~SCB_SENSE; 830 ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); 831 } 832 ahc_freeze_scb(scb); 833 834 if ((ahc->features & AHC_ULTRA2) != 0) { 835 /* 836 * Clear the channel in case we return 837 * to data phase later. 838 */ 839 ahc_outb(ahc, SXFRCTL0, 840 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); 841 ahc_outb(ahc, SXFRCTL0, 842 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); 843 } 844 if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { 845 u_int dscommand1; 846 847 /* Ensure HHADDR is 0 for future DMA operations. */ 848 dscommand1 = ahc_inb(ahc, DSCOMMAND1); 849 ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); 850 ahc_outb(ahc, HADDR, 0); 851 ahc_outb(ahc, DSCOMMAND1, dscommand1); 852 } 853 break; 854 } 855 case MKMSG_FAILED: 856 { 857 u_int scbindex; 858 859 printf("%s:%c:%d:%d: Attempt to issue message failed\n", 860 ahc_name(ahc), devinfo.channel, devinfo.target, 861 devinfo.lun); 862 scbindex = ahc_inb(ahc, SCB_TAG); 863 scb = ahc_lookup_scb(ahc, scbindex); 864 if (scb != NULL 865 && (scb->flags & SCB_RECOVERY_SCB) != 0) 866 /* 867 * Ensure that we didn't put a second instance of this 868 * SCB into the QINFIFO. 869 */ 870 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), 871 SCB_GET_CHANNEL(ahc, scb), 872 SCB_GET_LUN(scb), scb->hscb->tag, 873 ROLE_INITIATOR, /*status*/0, 874 SEARCH_REMOVE); 875 break; 876 } 877 case NO_FREE_SCB: 878 { 879 printf("%s: No free or disconnected SCBs\n", ahc_name(ahc)); 880 ahc_dump_card_state(ahc); 881 panic("for safety"); 882 break; 883 } 884 case SCB_MISMATCH: 885 { 886 u_int scbptr; 887 888 scbptr = ahc_inb(ahc, SCBPTR); 889 printf("Bogus TAG after DMA. SCBPTR %d, tag %d, our tag %d\n", 890 scbptr, ahc_inb(ahc, ARG_1), 891 ahc->scb_data->hscbs[scbptr].tag); 892 ahc_dump_card_state(ahc); 893 panic("for saftey"); 894 break; 895 } 896 case OUT_OF_RANGE: 897 { 898 printf("%s: BTT calculation out of range\n", ahc_name(ahc)); 899 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " 900 "ARG_1 == 0x%x ACCUM = 0x%x\n", 901 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), 902 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); 903 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " 904 "SINDEX == 0x%x\n, A == 0x%x\n", 905 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), 906 ahc_index_busy_tcl(ahc, 907 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), 908 ahc_inb(ahc, SAVED_LUN))), 909 ahc_inb(ahc, SINDEX), 910 ahc_inb(ahc, ACCUM)); 911 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " 912 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", 913 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), 914 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), 915 ahc_inb(ahc, SCB_CONTROL)); 916 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", 917 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); 918 ahc_dump_card_state(ahc); 919 panic("for safety"); 920 break; 921 } 922 default: 923 printf("ahc_intr: seqint, " 924 "intstat == 0x%x, scsisigi = 0x%x\n", 925 intstat, ahc_inb(ahc, SCSISIGI)); 926 break; 927 } 928 unpause: 929 /* 930 * The sequencer is paused immediately on 931 * a SEQINT, so we should restart it when 932 * we're done. 933 */ 934 ahc_unpause(ahc); 935 } 936 937 void 938 ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat) 939 { 940 u_int scb_index; 941 u_int status0; 942 u_int status; 943 struct scb *scb; 944 char cur_channel; 945 char intr_channel; 946 947 /* Make sure the sequencer is in a safe location. */ 948 ahc_clear_critical_section(ahc); 949 950 if ((ahc->features & AHC_TWIN) != 0 951 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0)) 952 cur_channel = 'B'; 953 else 954 cur_channel = 'A'; 955 intr_channel = cur_channel; 956 957 if ((ahc->features & AHC_ULTRA2) != 0) 958 status0 = ahc_inb(ahc, SSTAT0) & IOERR; 959 else 960 status0 = 0; 961 status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); 962 if (status == 0 && status0 == 0) { 963 if ((ahc->features & AHC_TWIN) != 0) { 964 /* Try the other channel */ 965 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 966 status = ahc_inb(ahc, SSTAT1) 967 & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); 968 intr_channel = (cur_channel == 'A') ? 'B' : 'A'; 969 } 970 if (status == 0) { 971 printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc)); 972 ahc_outb(ahc, CLRINT, CLRSCSIINT); 973 ahc_unpause(ahc); 974 return; 975 } 976 } 977 978 scb_index = ahc_inb(ahc, SCB_TAG); 979 scb = ahc_lookup_scb(ahc, scb_index); 980 if (scb != NULL 981 && (ahc_inb(ahc, SEQ_FLAGS) & IDENTIFY_SEEN) == 0) 982 scb = NULL; 983 984 if ((ahc->features & AHC_ULTRA2) != 0 985 && (status0 & IOERR) != 0) { 986 int now_lvd; 987 988 now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40; 989 printf("%s: Transceiver State Has Changed to %s mode\n", 990 ahc_name(ahc), now_lvd ? "LVD" : "SE"); 991 ahc_outb(ahc, CLRSINT0, CLRIOERR); 992 /* 993 * When transitioning to SE mode, the reset line 994 * glitches, triggering an arbitration bug in some 995 * Ultra2 controllers. This bug is cleared when we 996 * assert the reset line. Since a reset glitch has 997 * already occurred with this transition and a 998 * transceiver state change is handled just like 999 * a bus reset anyway, asserting the reset line 1000 * ourselves is safe. 1001 */ 1002 ahc_reset_channel(ahc, intr_channel, 1003 /*Initiate Reset*/now_lvd == 0); 1004 } else if ((status & SCSIRSTI) != 0) { 1005 printf("%s: Someone reset channel %c\n", 1006 ahc_name(ahc), intr_channel); 1007 if (intr_channel != cur_channel) 1008 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 1009 ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE); 1010 } else if ((status & SCSIPERR) != 0) { 1011 /* 1012 * Determine the bus phase and queue an appropriate message. 1013 * SCSIPERR is latched true as soon as a parity error 1014 * occurs. If the sequencer acked the transfer that 1015 * caused the parity error and the currently presented 1016 * transfer on the bus has correct parity, SCSIPERR will 1017 * be cleared by CLRSCSIPERR. Use this to determine if 1018 * we should look at the last phase the sequencer recorded, 1019 * or the current phase presented on the bus. 1020 */ 1021 u_int mesg_out; 1022 u_int curphase; 1023 u_int errorphase; 1024 u_int lastphase; 1025 u_int scsirate; 1026 u_int i; 1027 u_int sstat2; 1028 1029 lastphase = ahc_inb(ahc, LASTPHASE); 1030 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 1031 sstat2 = ahc_inb(ahc, SSTAT2); 1032 ahc_outb(ahc, CLRSINT1, CLRSCSIPERR); 1033 /* 1034 * For all phases save DATA, the sequencer won't 1035 * automatically ack a byte that has a parity error 1036 * in it. So the only way that the current phase 1037 * could be 'data-in' is if the parity error is for 1038 * an already acked byte in the data phase. During 1039 * synchronous data-in transfers, we may actually 1040 * ack bytes before latching the current phase in 1041 * LASTPHASE, leading to the discrepancy between 1042 * curphase and lastphase. 1043 */ 1044 if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0 1045 || curphase == P_DATAIN || curphase == P_DATAIN_DT) 1046 errorphase = curphase; 1047 else 1048 errorphase = lastphase; 1049 1050 for (i = 0; i < num_phases; i++) { 1051 if (errorphase == ahc_phase_table[i].phase) 1052 break; 1053 } 1054 mesg_out = ahc_phase_table[i].mesg_out; 1055 if (scb != NULL) 1056 ahc_print_path(ahc, scb); 1057 else 1058 printf("%s:%c:%d: ", ahc_name(ahc), intr_channel, 1059 SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID))); 1060 scsirate = ahc_inb(ahc, SCSIRATE); 1061 printf("parity error detected %s. " 1062 "SEQADDR(0x%x) SCSIRATE(0x%x)\n", 1063 ahc_phase_table[i].phasemsg, 1064 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8), 1065 scsirate); 1066 1067 if ((ahc->features & AHC_DT) != 0) { 1068 1069 if ((sstat2 & CRCVALERR) != 0) 1070 printf("\tCRC Value Mismatch\n"); 1071 if ((sstat2 & CRCENDERR) != 0) 1072 printf("\tNo terminal CRC packet recevied\n"); 1073 if ((sstat2 & CRCREQERR) != 0) 1074 printf("\tIllegal CRC packet request\n"); 1075 if ((sstat2 & DUAL_EDGE_ERR) != 0) { 1076 printf("\tUnexpected %sDT Data Phase\n", 1077 (scsirate & SINGLE_EDGE) ? "" : "non-"); 1078 /* 1079 * This error applies regardless of 1080 * data direction, so ignore the value 1081 * in the phase table. 1082 */ 1083 mesg_out = MSG_INITIATOR_DET_ERR; 1084 } 1085 } 1086 1087 /* 1088 * We've set the hardware to assert ATN if we 1089 * get a parity error on "in" phases, so all we 1090 * need to do is stuff the message buffer with 1091 * the appropriate message. "In" phases have set 1092 * mesg_out to something other than MSG_NOP. 1093 */ 1094 if (mesg_out != MSG_NOOP) { 1095 if (ahc->msg_type != MSG_TYPE_NONE) 1096 ahc->send_msg_perror = TRUE; 1097 else 1098 ahc_outb(ahc, MSG_OUT, mesg_out); 1099 } 1100 /* 1101 * Force a renegotiation with this target just in 1102 * case we are out of sync for some external reason 1103 * unknown (or unreported) by the target. 1104 */ 1105 ahc_force_renegotiation(ahc); 1106 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1107 ahc_unpause(ahc); 1108 } else if ((status & SELTO) != 0) { 1109 u_int scbptr; 1110 1111 /* Stop the selection */ 1112 ahc_outb(ahc, SCSISEQ, 0); 1113 1114 /* No more pending messages */ 1115 ahc_clear_msg_state(ahc); 1116 1117 /* Clear interrupt state */ 1118 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 1119 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); 1120 1121 /* 1122 * Although the driver does not care about the 1123 * 'Selection in Progress' status bit, the busy 1124 * LED does. SELINGO is only cleared by a sucessfull 1125 * selection, so we must manually clear it to insure 1126 * the LED turns off just incase no future successful 1127 * selections occur (e.g. no devices on the bus). 1128 */ 1129 ahc_outb(ahc, CLRSINT0, CLRSELINGO); 1130 1131 scbptr = ahc_inb(ahc, WAITING_SCBH); 1132 ahc_outb(ahc, SCBPTR, scbptr); 1133 scb_index = ahc_inb(ahc, SCB_TAG); 1134 1135 scb = ahc_lookup_scb(ahc, scb_index); 1136 if (scb == NULL) { 1137 printf("%s: ahc_intr - referenced scb not " 1138 "valid during SELTO scb(%d, %d)\n", 1139 ahc_name(ahc), scbptr, scb_index); 1140 } else { 1141 ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT); 1142 ahc_freeze_devq(ahc, scb); 1143 #ifdef AHC_DEBUG 1144 if ((ahc_debug & AHC_SHOW_SELTO) != 0) { 1145 ahc_print_path(ahc, scb); 1146 printf("Saw Selection Timeout for SCB 0x%x\n", 1147 scb_index); 1148 } 1149 #endif 1150 } 1151 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1152 /* 1153 * Force a renegotiation with this target just in 1154 * case the cable was pulled and will later be 1155 * re-attached. The target may forget its negotiation 1156 * settings with us should it attempt to reselect 1157 * during the interruption. The target will not issue 1158 * a unit attention in this case, so we must always 1159 * renegotiate. 1160 */ 1161 ahc_force_renegotiation(ahc); 1162 ahc_restart(ahc); 1163 } else if ((status & BUSFREE) != 0 1164 && (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) { 1165 u_int lastphase; 1166 u_int saved_scsiid; 1167 u_int saved_lun; 1168 u_int target; 1169 u_int initiator_role_id; 1170 char channel; 1171 int printerror; 1172 1173 /* 1174 * Clear our selection hardware as soon as possible. 1175 * We may have an entry in the waiting Q for this target, 1176 * that is affected by this busfree and we don't want to 1177 * go about selecting the target while we handle the event. 1178 */ 1179 ahc_outb(ahc, SCSISEQ, 1180 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 1181 1182 /* 1183 * Disable busfree interrupts and clear the busfree 1184 * interrupt status. We do this here so that several 1185 * bus transactions occur prior to clearing the SCSIINT 1186 * latch. It can take a bit for the clearing to take effect. 1187 */ 1188 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 1189 ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR); 1190 1191 /* 1192 * Look at what phase we were last in. 1193 * If its message out, chances are pretty good 1194 * that the busfree was in response to one of 1195 * our abort requests. 1196 */ 1197 lastphase = ahc_inb(ahc, LASTPHASE); 1198 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); 1199 saved_lun = ahc_inb(ahc, SAVED_LUN); 1200 target = SCSIID_TARGET(ahc, saved_scsiid); 1201 initiator_role_id = SCSIID_OUR_ID(saved_scsiid); 1202 channel = SCSIID_CHANNEL(ahc, saved_scsiid); 1203 printerror = 1; 1204 1205 if (lastphase == P_MESGOUT) { 1206 struct ahc_devinfo devinfo; 1207 u_int tag; 1208 1209 ahc_fetch_devinfo(ahc, &devinfo); 1210 tag = SCB_LIST_NULL; 1211 if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE) 1212 || ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) { 1213 if (ahc->msgout_buf[ahc->msgout_index - 1] 1214 == MSG_ABORT_TAG) 1215 tag = scb->hscb->tag; 1216 ahc_print_path(ahc, scb); 1217 printf("SCB %d - Abort%s Completed.\n", 1218 scb->hscb->tag, tag == SCB_LIST_NULL ? 1219 "" : " Tag"); 1220 ahc_abort_scbs(ahc, target, channel, 1221 saved_lun, tag, 1222 ROLE_INITIATOR, 1223 CAM_REQ_ABORTED); 1224 printerror = 0; 1225 } else if (ahc_sent_msg(ahc, AHCMSG_1B, 1226 MSG_BUS_DEV_RESET, TRUE)) { 1227 #ifdef __FreeBSD__ 1228 /* 1229 * Don't mark the user's request for this BDR 1230 * as completing with CAM_BDR_SENT. CAM3 1231 * specifies CAM_REQ_CMP. 1232 */ 1233 if (scb != NULL 1234 && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV 1235 && ahc_match_scb(ahc, scb, target, channel, 1236 CAM_LUN_WILDCARD, 1237 SCB_LIST_NULL, 1238 ROLE_INITIATOR)) { 1239 ahc_set_transaction_status(scb, CAM_REQ_CMP); 1240 } 1241 #endif 1242 ahc_compile_devinfo(&devinfo, 1243 initiator_role_id, 1244 target, 1245 CAM_LUN_WILDCARD, 1246 channel, 1247 ROLE_INITIATOR); 1248 ahc_handle_devreset(ahc, &devinfo, 1249 CAM_BDR_SENT, 1250 "Bus Device Reset", 1251 /*verbose_level*/0); 1252 printerror = 0; 1253 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, 1254 MSG_EXT_PPR, FALSE)) { 1255 struct ahc_initiator_tinfo *tinfo; 1256 struct ahc_tmode_tstate *tstate; 1257 1258 /* 1259 * PPR Rejected. Try non-ppr negotiation 1260 * and retry command. 1261 */ 1262 tinfo = ahc_fetch_transinfo(ahc, 1263 devinfo.channel, 1264 devinfo.our_scsiid, 1265 devinfo.target, 1266 &tstate); 1267 tinfo->curr.transport_version = 2; 1268 tinfo->goal.transport_version = 2; 1269 tinfo->goal.ppr_options = 0; 1270 ahc_qinfifo_requeue_tail(ahc, scb); 1271 printerror = 0; 1272 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, 1273 MSG_EXT_WDTR, FALSE) 1274 || ahc_sent_msg(ahc, AHCMSG_EXT, 1275 MSG_EXT_SDTR, FALSE)) { 1276 /* 1277 * Negotiation Rejected. Go-async and 1278 * retry command. 1279 */ 1280 ahc_set_width(ahc, &devinfo, 1281 MSG_EXT_WDTR_BUS_8_BIT, 1282 AHC_TRANS_CUR|AHC_TRANS_GOAL, 1283 /*paused*/TRUE); 1284 ahc_set_syncrate(ahc, &devinfo, 1285 /*syncrate*/NULL, 1286 /*period*/0, /*offset*/0, 1287 /*ppr_options*/0, 1288 AHC_TRANS_CUR|AHC_TRANS_GOAL, 1289 /*paused*/TRUE); 1290 ahc_qinfifo_requeue_tail(ahc, scb); 1291 printerror = 0; 1292 } 1293 } 1294 if (printerror != 0) { 1295 u_int i; 1296 1297 if (scb != NULL) { 1298 u_int tag; 1299 1300 if ((scb->hscb->control & TAG_ENB) != 0) 1301 tag = scb->hscb->tag; 1302 else 1303 tag = SCB_LIST_NULL; 1304 ahc_print_path(ahc, scb); 1305 ahc_abort_scbs(ahc, target, channel, 1306 SCB_GET_LUN(scb), tag, 1307 ROLE_INITIATOR, 1308 CAM_UNEXP_BUSFREE); 1309 } else { 1310 /* 1311 * We had not fully identified this connection, 1312 * so we cannot abort anything. 1313 */ 1314 printf("%s: ", ahc_name(ahc)); 1315 } 1316 for (i = 0; i < num_phases; i++) { 1317 if (lastphase == ahc_phase_table[i].phase) 1318 break; 1319 } 1320 /* 1321 * Renegotiate with this device at the 1322 * next oportunity just in case this busfree 1323 * is due to a negotiation mismatch with the 1324 * device. 1325 */ 1326 ahc_force_renegotiation(ahc); 1327 printf("Unexpected busfree %s\n" 1328 "SEQADDR == 0x%x\n", 1329 ahc_phase_table[i].phasemsg, 1330 ahc_inb(ahc, SEQADDR0) 1331 | (ahc_inb(ahc, SEQADDR1) << 8)); 1332 } 1333 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1334 ahc_restart(ahc); 1335 } else { 1336 printf("%s: Missing case in ahc_handle_scsiint. status = %x\n", 1337 ahc_name(ahc), status); 1338 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1339 } 1340 } 1341 1342 /* 1343 * Force renegotiation to occur the next time we initiate 1344 * a command to the current device. 1345 */ 1346 static void 1347 ahc_force_renegotiation(struct ahc_softc *ahc) 1348 { 1349 struct ahc_devinfo devinfo; 1350 struct ahc_initiator_tinfo *targ_info; 1351 struct ahc_tmode_tstate *tstate; 1352 1353 ahc_fetch_devinfo(ahc, &devinfo); 1354 targ_info = ahc_fetch_transinfo(ahc, 1355 devinfo.channel, 1356 devinfo.our_scsiid, 1357 devinfo.target, 1358 &tstate); 1359 ahc_update_neg_request(ahc, &devinfo, tstate, 1360 targ_info, /*force*/TRUE); 1361 } 1362 1363 #define AHC_MAX_STEPS 2000 1364 void 1365 ahc_clear_critical_section(struct ahc_softc *ahc) 1366 { 1367 int stepping; 1368 int steps; 1369 u_int simode0; 1370 u_int simode1; 1371 1372 if (ahc->num_critical_sections == 0) 1373 return; 1374 1375 stepping = FALSE; 1376 steps = 0; 1377 simode0 = 0; 1378 simode1 = 0; 1379 for (;;) { 1380 struct cs *cs; 1381 u_int seqaddr; 1382 u_int i; 1383 1384 seqaddr = ahc_inb(ahc, SEQADDR0) 1385 | (ahc_inb(ahc, SEQADDR1) << 8); 1386 1387 /* 1388 * Seqaddr represents the next instruction to execute, 1389 * so we are really executing the instruction just 1390 * before it. 1391 */ 1392 if (seqaddr != 0) 1393 seqaddr -= 1; 1394 cs = ahc->critical_sections; 1395 for (i = 0; i < ahc->num_critical_sections; i++, cs++) { 1396 1397 if (cs->begin < seqaddr && cs->end >= seqaddr) 1398 break; 1399 } 1400 1401 if (i == ahc->num_critical_sections) 1402 break; 1403 1404 if (steps > AHC_MAX_STEPS) { 1405 printf("%s: Infinite loop in critical section\n", 1406 ahc_name(ahc)); 1407 ahc_dump_card_state(ahc); 1408 panic("critical section loop"); 1409 } 1410 1411 steps++; 1412 if (stepping == FALSE) { 1413 1414 /* 1415 * Disable all interrupt sources so that the 1416 * sequencer will not be stuck by a pausing 1417 * interrupt condition while we attempt to 1418 * leave a critical section. 1419 */ 1420 simode0 = ahc_inb(ahc, SIMODE0); 1421 ahc_outb(ahc, SIMODE0, 0); 1422 simode1 = ahc_inb(ahc, SIMODE1); 1423 ahc_outb(ahc, SIMODE1, 0); 1424 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1425 ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) | STEP); 1426 stepping = TRUE; 1427 } 1428 ahc_outb(ahc, HCNTRL, ahc->unpause); 1429 while (!ahc_is_paused(ahc)) 1430 ahc_delay(200); 1431 } 1432 if (stepping) { 1433 ahc_outb(ahc, SIMODE0, simode0); 1434 ahc_outb(ahc, SIMODE1, simode1); 1435 ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) & ~STEP); 1436 } 1437 } 1438 1439 /* 1440 * Clear any pending interrupt status. 1441 */ 1442 void 1443 ahc_clear_intstat(struct ahc_softc *ahc) 1444 { 1445 /* Clear any interrupt conditions this may have caused */ 1446 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI 1447 |CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG| 1448 CLRREQINIT); 1449 ahc_flush_device_writes(ahc); 1450 ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO); 1451 ahc_flush_device_writes(ahc); 1452 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1453 ahc_flush_device_writes(ahc); 1454 } 1455 1456 /**************************** Debugging Routines ******************************/ 1457 #ifdef AHC_DEBUG 1458 uint32_t ahc_debug = AHC_DEBUG_OPTS; 1459 #endif 1460 1461 void 1462 ahc_print_scb(struct scb *scb) 1463 { 1464 int i; 1465 1466 struct hardware_scb *hscb = scb->hscb; 1467 1468 printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", 1469 (void *)scb, 1470 hscb->control, 1471 hscb->scsiid, 1472 hscb->lun, 1473 hscb->cdb_len); 1474 printf("Shared Data: "); 1475 for (i = 0; i < sizeof(hscb->shared_data.cdb); i++) 1476 printf("%#02x", hscb->shared_data.cdb[i]); 1477 printf(" dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n", 1478 ahc_le32toh(hscb->dataptr), 1479 ahc_le32toh(hscb->datacnt), 1480 ahc_le32toh(hscb->sgptr), 1481 hscb->tag); 1482 if (scb->sg_count > 0) { 1483 for (i = 0; i < scb->sg_count; i++) { 1484 printf("sg[%d] - Addr 0x%x%x : Length %d\n", 1485 i, 1486 (ahc_le32toh(scb->sg_list[i].len) >> 24 1487 & SG_HIGH_ADDR_BITS), 1488 ahc_le32toh(scb->sg_list[i].addr), 1489 ahc_le32toh(scb->sg_list[i].len)); 1490 } 1491 } 1492 } 1493 1494 /************************* Transfer Negotiation *******************************/ 1495 /* 1496 * Allocate per target mode instance (ID we respond to as a target) 1497 * transfer negotiation data structures. 1498 */ 1499 static struct ahc_tmode_tstate * 1500 ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel) 1501 { 1502 struct ahc_tmode_tstate *master_tstate; 1503 struct ahc_tmode_tstate *tstate; 1504 int i; 1505 1506 master_tstate = ahc->enabled_targets[ahc->our_id]; 1507 if (channel == 'B') { 1508 scsi_id += 8; 1509 master_tstate = ahc->enabled_targets[ahc->our_id_b + 8]; 1510 } 1511 if (ahc->enabled_targets[scsi_id] != NULL 1512 && ahc->enabled_targets[scsi_id] != master_tstate) 1513 panic("%s: ahc_alloc_tstate - Target already allocated", 1514 ahc_name(ahc)); 1515 tstate = (struct ahc_tmode_tstate*)malloc(sizeof(*tstate), 1516 M_DEVBUF, M_NOWAIT); 1517 if (tstate == NULL) 1518 return (NULL); 1519 1520 /* 1521 * If we have allocated a master tstate, copy user settings from 1522 * the master tstate (taken from SRAM or the EEPROM) for this 1523 * channel, but reset our current and goal settings to async/narrow 1524 * until an initiator talks to us. 1525 */ 1526 if (master_tstate != NULL) { 1527 memcpy(tstate, master_tstate, sizeof(*tstate)); 1528 memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); 1529 tstate->ultraenb = 0; 1530 for (i = 0; i < AHC_NUM_TARGETS; i++) { 1531 memset(&tstate->transinfo[i].curr, 0, 1532 sizeof(tstate->transinfo[i].curr)); 1533 memset(&tstate->transinfo[i].goal, 0, 1534 sizeof(tstate->transinfo[i].goal)); 1535 } 1536 } else 1537 memset(tstate, 0, sizeof(*tstate)); 1538 ahc->enabled_targets[scsi_id] = tstate; 1539 return (tstate); 1540 } 1541 1542 #ifdef AHC_TARGET_MODE 1543 /* 1544 * Free per target mode instance (ID we respond to as a target) 1545 * transfer negotiation data structures. 1546 */ 1547 static void 1548 ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force) 1549 { 1550 struct ahc_tmode_tstate *tstate; 1551 1552 /* 1553 * Don't clean up our "master" tstate. 1554 * It has our default user settings. 1555 */ 1556 if (((channel == 'B' && scsi_id == ahc->our_id_b) 1557 || (channel == 'A' && scsi_id == ahc->our_id)) 1558 && force == FALSE) 1559 return; 1560 1561 if (channel == 'B') 1562 scsi_id += 8; 1563 tstate = ahc->enabled_targets[scsi_id]; 1564 if (tstate != NULL) 1565 free(tstate, M_DEVBUF); 1566 ahc->enabled_targets[scsi_id] = NULL; 1567 } 1568 #endif 1569 1570 /* 1571 * Called when we have an active connection to a target on the bus, 1572 * this function finds the nearest syncrate to the input period limited 1573 * by the capabilities of the bus connectivity of and sync settings for 1574 * the target. 1575 */ 1576 static struct ahc_syncrate * 1577 ahc_devlimited_syncrate(struct ahc_softc *ahc, 1578 struct ahc_initiator_tinfo *tinfo, 1579 u_int *period, u_int *ppr_options, role_t role) 1580 { 1581 struct ahc_transinfo *transinfo; 1582 u_int maxsync; 1583 1584 if ((ahc->features & AHC_ULTRA2) != 0) { 1585 if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0 1586 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) { 1587 maxsync = AHC_SYNCRATE_DT; 1588 } else { 1589 maxsync = AHC_SYNCRATE_ULTRA; 1590 /* Can't do DT on an SE bus */ 1591 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1592 } 1593 } else if ((ahc->features & AHC_ULTRA) != 0) { 1594 maxsync = AHC_SYNCRATE_ULTRA; 1595 } else { 1596 maxsync = AHC_SYNCRATE_FAST; 1597 } 1598 /* 1599 * Never allow a value higher than our current goal 1600 * period otherwise we may allow a target initiated 1601 * negotiation to go above the limit as set by the 1602 * user. In the case of an initiator initiated 1603 * sync negotiation, we limit based on the user 1604 * setting. This allows the system to still accept 1605 * incoming negotiations even if target initiated 1606 * negotiation is not performed. 1607 */ 1608 if (role == ROLE_TARGET) 1609 transinfo = &tinfo->user; 1610 else 1611 transinfo = &tinfo->goal; 1612 *ppr_options &= transinfo->ppr_options; 1613 if (transinfo->period == 0) { 1614 *period = 0; 1615 *ppr_options = 0; 1616 return (NULL); 1617 } 1618 *period = MAX(*period, transinfo->period); 1619 return (ahc_find_syncrate(ahc, period, ppr_options, maxsync)); 1620 } 1621 1622 /* 1623 * Look up the valid period to SCSIRATE conversion in our table. 1624 * Return the period and offset that should be sent to the target 1625 * if this was the beginning of an SDTR. 1626 */ 1627 struct ahc_syncrate * 1628 ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, 1629 u_int *ppr_options, u_int maxsync) 1630 { 1631 struct ahc_syncrate *syncrate; 1632 1633 if ((ahc->features & AHC_DT) == 0) 1634 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1635 1636 /* Skip all DT only entries if DT is not available */ 1637 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 1638 && maxsync < AHC_SYNCRATE_ULTRA2) 1639 maxsync = AHC_SYNCRATE_ULTRA2; 1640 1641 for (syncrate = &ahc_syncrates[maxsync]; 1642 syncrate->rate != NULL; 1643 syncrate++) { 1644 1645 /* 1646 * The Ultra2 table doesn't go as low 1647 * as for the Fast/Ultra cards. 1648 */ 1649 if ((ahc->features & AHC_ULTRA2) != 0 1650 && (syncrate->sxfr_u2 == 0)) 1651 break; 1652 1653 if (*period <= syncrate->period) { 1654 /* 1655 * When responding to a target that requests 1656 * sync, the requested rate may fall between 1657 * two rates that we can output, but still be 1658 * a rate that we can receive. Because of this, 1659 * we want to respond to the target with 1660 * the same rate that it sent to us even 1661 * if the period we use to send data to it 1662 * is lower. Only lower the response period 1663 * if we must. 1664 */ 1665 if (syncrate == &ahc_syncrates[maxsync]) 1666 *period = syncrate->period; 1667 1668 /* 1669 * At some speeds, we only support 1670 * ST transfers. 1671 */ 1672 if ((syncrate->sxfr_u2 & ST_SXFR) != 0) 1673 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1674 break; 1675 } 1676 } 1677 1678 if ((*period == 0) 1679 || (syncrate->rate == NULL) 1680 || ((ahc->features & AHC_ULTRA2) != 0 1681 && (syncrate->sxfr_u2 == 0))) { 1682 /* Use asynchronous transfers. */ 1683 *period = 0; 1684 syncrate = NULL; 1685 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1686 } 1687 return (syncrate); 1688 } 1689 1690 /* 1691 * Convert from an entry in our syncrate table to the SCSI equivalent 1692 * sync "period" factor. 1693 */ 1694 u_int 1695 ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync) 1696 { 1697 struct ahc_syncrate *syncrate; 1698 1699 if ((ahc->features & AHC_ULTRA2) != 0) 1700 scsirate &= SXFR_ULTRA2; 1701 else 1702 scsirate &= SXFR; 1703 1704 syncrate = &ahc_syncrates[maxsync]; 1705 while (syncrate->rate != NULL) { 1706 1707 if ((ahc->features & AHC_ULTRA2) != 0) { 1708 if (syncrate->sxfr_u2 == 0) 1709 break; 1710 else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2)) 1711 return (syncrate->period); 1712 } else if (scsirate == (syncrate->sxfr & SXFR)) { 1713 return (syncrate->period); 1714 } 1715 syncrate++; 1716 } 1717 return (0); /* async */ 1718 } 1719 1720 /* 1721 * Truncate the given synchronous offset to a value the 1722 * current adapter type and syncrate are capable of. 1723 */ 1724 void 1725 ahc_validate_offset(struct ahc_softc *ahc, 1726 struct ahc_initiator_tinfo *tinfo, 1727 struct ahc_syncrate *syncrate, 1728 u_int *offset, int wide, role_t role) 1729 { 1730 u_int maxoffset; 1731 1732 /* Limit offset to what we can do */ 1733 if (syncrate == NULL) { 1734 maxoffset = 0; 1735 } else if ((ahc->features & AHC_ULTRA2) != 0) { 1736 maxoffset = MAX_OFFSET_ULTRA2; 1737 } else { 1738 if (wide) 1739 maxoffset = MAX_OFFSET_16BIT; 1740 else 1741 maxoffset = MAX_OFFSET_8BIT; 1742 } 1743 *offset = MIN(*offset, maxoffset); 1744 if (tinfo != NULL) { 1745 if (role == ROLE_TARGET) 1746 *offset = MIN(*offset, tinfo->user.offset); 1747 else 1748 *offset = MIN(*offset, tinfo->goal.offset); 1749 } 1750 } 1751 1752 /* 1753 * Truncate the given transfer width parameter to a value the 1754 * current adapter type is capable of. 1755 */ 1756 void 1757 ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, 1758 u_int *bus_width, role_t role) 1759 { 1760 switch (*bus_width) { 1761 default: 1762 if (ahc->features & AHC_WIDE) { 1763 /* Respond Wide */ 1764 *bus_width = MSG_EXT_WDTR_BUS_16_BIT; 1765 break; 1766 } 1767 /* FALLTHROUGH */ 1768 case MSG_EXT_WDTR_BUS_8_BIT: 1769 *bus_width = MSG_EXT_WDTR_BUS_8_BIT; 1770 break; 1771 } 1772 if (tinfo != NULL) { 1773 if (role == ROLE_TARGET) 1774 *bus_width = MIN(tinfo->user.width, *bus_width); 1775 else 1776 *bus_width = MIN(tinfo->goal.width, *bus_width); 1777 } 1778 } 1779 1780 /* 1781 * Update the bitmask of targets for which the controller should 1782 * negotiate with at the next convenient oportunity. This currently 1783 * means the next time we send the initial identify messages for 1784 * a new transaction. 1785 */ 1786 int 1787 ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1788 struct ahc_tmode_tstate *tstate, 1789 struct ahc_initiator_tinfo *tinfo, int force) 1790 { 1791 u_int auto_negotiate_orig; 1792 1793 auto_negotiate_orig = tstate->auto_negotiate; 1794 if (tinfo->curr.period != tinfo->goal.period 1795 || tinfo->curr.width != tinfo->goal.width 1796 || tinfo->curr.offset != tinfo->goal.offset 1797 || tinfo->curr.ppr_options != tinfo->goal.ppr_options 1798 || (force 1799 && (tinfo->goal.offset != 0 1800 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT 1801 || tinfo->goal.ppr_options != 0))) 1802 tstate->auto_negotiate |= devinfo->target_mask; 1803 else 1804 tstate->auto_negotiate &= ~devinfo->target_mask; 1805 1806 return (auto_negotiate_orig != tstate->auto_negotiate); 1807 } 1808 1809 /* 1810 * Update the user/goal/curr tables of synchronous negotiation 1811 * parameters as well as, in the case of a current or active update, 1812 * any data structures on the host controller. In the case of an 1813 * active update, the specified target is currently talking to us on 1814 * the bus, so the transfer parameter update must take effect 1815 * immediately. 1816 */ 1817 void 1818 ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1819 struct ahc_syncrate *syncrate, u_int period, 1820 u_int offset, u_int ppr_options, u_int type, int paused) 1821 { 1822 struct ahc_initiator_tinfo *tinfo; 1823 struct ahc_tmode_tstate *tstate; 1824 u_int old_period; 1825 u_int old_offset; 1826 u_int old_ppr; 1827 int active; 1828 int update_needed; 1829 1830 active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 1831 update_needed = 0; 1832 1833 if (syncrate == NULL) { 1834 period = 0; 1835 offset = 0; 1836 } 1837 1838 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1839 devinfo->target, &tstate); 1840 1841 if ((type & AHC_TRANS_USER) != 0) { 1842 tinfo->user.period = period; 1843 tinfo->user.offset = offset; 1844 tinfo->user.ppr_options = ppr_options; 1845 } 1846 1847 if ((type & AHC_TRANS_GOAL) != 0) { 1848 tinfo->goal.period = period; 1849 tinfo->goal.offset = offset; 1850 tinfo->goal.ppr_options = ppr_options; 1851 } 1852 1853 old_period = tinfo->curr.period; 1854 old_offset = tinfo->curr.offset; 1855 old_ppr = tinfo->curr.ppr_options; 1856 1857 if ((type & AHC_TRANS_CUR) != 0 1858 && (old_period != period 1859 || old_offset != offset 1860 || old_ppr != ppr_options)) { 1861 u_int scsirate; 1862 1863 update_needed++; 1864 scsirate = tinfo->scsirate; 1865 if ((ahc->features & AHC_ULTRA2) != 0) { 1866 1867 scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC); 1868 if (syncrate != NULL) { 1869 scsirate |= syncrate->sxfr_u2; 1870 if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) 1871 scsirate |= ENABLE_CRC; 1872 else 1873 scsirate |= SINGLE_EDGE; 1874 } 1875 } else { 1876 1877 scsirate &= ~(SXFR|SOFS); 1878 /* 1879 * Ensure Ultra mode is set properly for 1880 * this target. 1881 */ 1882 tstate->ultraenb &= ~devinfo->target_mask; 1883 if (syncrate != NULL) { 1884 if (syncrate->sxfr & ULTRA_SXFR) { 1885 tstate->ultraenb |= 1886 devinfo->target_mask; 1887 } 1888 scsirate |= syncrate->sxfr & SXFR; 1889 scsirate |= offset & SOFS; 1890 } 1891 if (active) { 1892 u_int sxfrctl0; 1893 1894 sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 1895 sxfrctl0 &= ~FAST20; 1896 if (tstate->ultraenb & devinfo->target_mask) 1897 sxfrctl0 |= FAST20; 1898 ahc_outb(ahc, SXFRCTL0, sxfrctl0); 1899 } 1900 } 1901 if (active) { 1902 ahc_outb(ahc, SCSIRATE, scsirate); 1903 if ((ahc->features & AHC_ULTRA2) != 0) 1904 ahc_outb(ahc, SCSIOFFSET, offset); 1905 } 1906 1907 tinfo->scsirate = scsirate; 1908 tinfo->curr.period = period; 1909 tinfo->curr.offset = offset; 1910 tinfo->curr.ppr_options = ppr_options; 1911 1912 ahc_send_async(ahc, devinfo->channel, devinfo->target, 1913 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); 1914 if (bootverbose) { 1915 if (offset != 0) { 1916 printf("%s: target %d synchronous at %sMHz%s, " 1917 "offset = 0x%x\n", ahc_name(ahc), 1918 devinfo->target, syncrate->rate, 1919 (ppr_options & MSG_EXT_PPR_DT_REQ) 1920 ? " DT" : "", offset); 1921 } else { 1922 printf("%s: target %d using " 1923 "asynchronous transfers\n", 1924 ahc_name(ahc), devinfo->target); 1925 } 1926 } 1927 } 1928 1929 update_needed += ahc_update_neg_request(ahc, devinfo, tstate, 1930 tinfo, /*force*/FALSE); 1931 1932 if (update_needed) 1933 ahc_update_pending_scbs(ahc); 1934 } 1935 1936 /* 1937 * Update the user/goal/curr tables of wide negotiation 1938 * parameters as well as, in the case of a current or active update, 1939 * any data structures on the host controller. In the case of an 1940 * active update, the specified target is currently talking to us on 1941 * the bus, so the transfer parameter update must take effect 1942 * immediately. 1943 */ 1944 void 1945 ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1946 u_int width, u_int type, int paused) 1947 { 1948 struct ahc_initiator_tinfo *tinfo; 1949 struct ahc_tmode_tstate *tstate; 1950 u_int oldwidth; 1951 int active; 1952 int update_needed; 1953 1954 active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 1955 update_needed = 0; 1956 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1957 devinfo->target, &tstate); 1958 1959 if ((type & AHC_TRANS_USER) != 0) 1960 tinfo->user.width = width; 1961 1962 if ((type & AHC_TRANS_GOAL) != 0) 1963 tinfo->goal.width = width; 1964 1965 oldwidth = tinfo->curr.width; 1966 if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) { 1967 u_int scsirate; 1968 1969 update_needed++; 1970 scsirate = tinfo->scsirate; 1971 scsirate &= ~WIDEXFER; 1972 if (width == MSG_EXT_WDTR_BUS_16_BIT) 1973 scsirate |= WIDEXFER; 1974 1975 tinfo->scsirate = scsirate; 1976 1977 if (active) 1978 ahc_outb(ahc, SCSIRATE, scsirate); 1979 1980 tinfo->curr.width = width; 1981 1982 ahc_send_async(ahc, devinfo->channel, devinfo->target, 1983 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); 1984 if (bootverbose) { 1985 printf("%s: target %d using %dbit transfers\n", 1986 ahc_name(ahc), devinfo->target, 1987 8 * (0x01 << width)); 1988 } 1989 } 1990 1991 update_needed += ahc_update_neg_request(ahc, devinfo, tstate, 1992 tinfo, /*force*/FALSE); 1993 if (update_needed) 1994 ahc_update_pending_scbs(ahc); 1995 } 1996 1997 /* 1998 * Update the current state of tagged queuing for a given target. 1999 */ 2000 void 2001 ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2002 ahc_queue_alg alg) 2003 { 2004 ahc_platform_set_tags(ahc, devinfo, alg); 2005 ahc_send_async(ahc, devinfo->channel, devinfo->target, 2006 devinfo->lun, AC_TRANSFER_NEG, &alg); 2007 } 2008 2009 /* 2010 * When the transfer settings for a connection change, update any 2011 * in-transit SCBs to contain the new data so the hardware will 2012 * be set correctly during future (re)selections. 2013 */ 2014 static void 2015 ahc_update_pending_scbs(struct ahc_softc *ahc) 2016 { 2017 struct scb *pending_scb; 2018 int pending_scb_count; 2019 int i; 2020 int paused; 2021 u_int saved_scbptr; 2022 2023 /* 2024 * Traverse the pending SCB list and ensure that all of the 2025 * SCBs there have the proper settings. 2026 */ 2027 pending_scb_count = 0; 2028 LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) { 2029 struct ahc_devinfo devinfo; 2030 struct hardware_scb *pending_hscb; 2031 struct ahc_initiator_tinfo *tinfo; 2032 struct ahc_tmode_tstate *tstate; 2033 2034 ahc_scb_devinfo(ahc, &devinfo, pending_scb); 2035 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 2036 devinfo.our_scsiid, 2037 devinfo.target, &tstate); 2038 pending_hscb = pending_scb->hscb; 2039 pending_hscb->control &= ~ULTRAENB; 2040 if ((tstate->ultraenb & devinfo.target_mask) != 0) 2041 pending_hscb->control |= ULTRAENB; 2042 pending_hscb->scsirate = tinfo->scsirate; 2043 pending_hscb->scsioffset = tinfo->curr.offset; 2044 if ((tstate->auto_negotiate & devinfo.target_mask) == 0 2045 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { 2046 pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; 2047 pending_hscb->control &= ~MK_MESSAGE; 2048 } 2049 ahc_sync_scb(ahc, pending_scb, 2050 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2051 pending_scb_count++; 2052 } 2053 2054 if (pending_scb_count == 0) 2055 return; 2056 2057 if (ahc_is_paused(ahc)) { 2058 paused = 1; 2059 } else { 2060 paused = 0; 2061 ahc_pause(ahc); 2062 } 2063 2064 saved_scbptr = ahc_inb(ahc, SCBPTR); 2065 /* Ensure that the hscbs down on the card match the new information */ 2066 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 2067 struct hardware_scb *pending_hscb; 2068 u_int control; 2069 u_int scb_tag; 2070 2071 ahc_outb(ahc, SCBPTR, i); 2072 scb_tag = ahc_inb(ahc, SCB_TAG); 2073 pending_scb = ahc_lookup_scb(ahc, scb_tag); 2074 if (pending_scb == NULL) 2075 continue; 2076 2077 pending_hscb = pending_scb->hscb; 2078 control = ahc_inb(ahc, SCB_CONTROL); 2079 control &= ~(ULTRAENB|MK_MESSAGE); 2080 control |= pending_hscb->control & (ULTRAENB|MK_MESSAGE); 2081 ahc_outb(ahc, SCB_CONTROL, control); 2082 ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate); 2083 ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset); 2084 } 2085 ahc_outb(ahc, SCBPTR, saved_scbptr); 2086 2087 if (paused == 0) 2088 ahc_unpause(ahc); 2089 } 2090 2091 /**************************** Pathing Information *****************************/ 2092 static void 2093 ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2094 { 2095 u_int saved_scsiid; 2096 role_t role; 2097 int our_id; 2098 2099 if (ahc_inb(ahc, SSTAT0) & TARGET) 2100 role = ROLE_TARGET; 2101 else 2102 role = ROLE_INITIATOR; 2103 2104 if (role == ROLE_TARGET 2105 && (ahc->features & AHC_MULTI_TID) != 0 2106 && (ahc_inb(ahc, SEQ_FLAGS) 2107 & (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) { 2108 /* We were selected, so pull our id from TARGIDIN */ 2109 our_id = ahc_inb(ahc, TARGIDIN) & OID; 2110 } else if ((ahc->features & AHC_ULTRA2) != 0) 2111 our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID; 2112 else 2113 our_id = ahc_inb(ahc, SCSIID) & OID; 2114 2115 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); 2116 ahc_compile_devinfo(devinfo, 2117 our_id, 2118 SCSIID_TARGET(ahc, saved_scsiid), 2119 ahc_inb(ahc, SAVED_LUN), 2120 SCSIID_CHANNEL(ahc, saved_scsiid), 2121 role); 2122 } 2123 2124 struct ahc_phase_table_entry* 2125 ahc_lookup_phase_entry(int phase) 2126 { 2127 struct ahc_phase_table_entry *entry; 2128 struct ahc_phase_table_entry *last_entry; 2129 2130 /* 2131 * num_phases doesn't include the default entry which 2132 * will be returned if the phase doesn't match. 2133 */ 2134 last_entry = &ahc_phase_table[num_phases]; 2135 for (entry = ahc_phase_table; entry < last_entry; entry++) { 2136 if (phase == entry->phase) 2137 break; 2138 } 2139 return (entry); 2140 } 2141 2142 void 2143 ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target, 2144 u_int lun, char channel, role_t role) 2145 { 2146 devinfo->our_scsiid = our_id; 2147 devinfo->target = target; 2148 devinfo->lun = lun; 2149 devinfo->target_offset = target; 2150 devinfo->channel = channel; 2151 devinfo->role = role; 2152 if (channel == 'B') 2153 devinfo->target_offset += 8; 2154 devinfo->target_mask = (0x01 << devinfo->target_offset); 2155 } 2156 2157 static void 2158 ahc_print_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2159 { 2160 printf("%s:%c:%d:%d:", ahc_name(ahc), devinfo->channel, 2161 devinfo->target, devinfo->lun); 2162 } 2163 2164 static void 2165 ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2166 struct scb *scb) 2167 { 2168 role_t role; 2169 int our_id; 2170 2171 our_id = SCSIID_OUR_ID(scb->hscb->scsiid); 2172 role = ROLE_INITIATOR; 2173 if ((scb->hscb->control & TARGET_SCB) != 0) 2174 role = ROLE_TARGET; 2175 ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb), 2176 SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role); 2177 } 2178 2179 2180 /************************ Message Phase Processing ****************************/ 2181 static void 2182 ahc_assert_atn(struct ahc_softc *ahc) 2183 { 2184 u_int scsisigo; 2185 2186 scsisigo = ATNO; 2187 if ((ahc->features & AHC_DT) == 0) 2188 scsisigo |= ahc_inb(ahc, SCSISIGI); 2189 ahc_outb(ahc, SCSISIGO, scsisigo); 2190 } 2191 2192 /* 2193 * When an initiator transaction with the MK_MESSAGE flag either reconnects 2194 * or enters the initial message out phase, we are interrupted. Fill our 2195 * outgoing message buffer with the appropriate message and beging handing 2196 * the message phase(s) manually. 2197 */ 2198 static void 2199 ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2200 struct scb *scb) 2201 { 2202 /* 2203 * To facilitate adding multiple messages together, 2204 * each routine should increment the index and len 2205 * variables instead of setting them explicitly. 2206 */ 2207 ahc->msgout_index = 0; 2208 ahc->msgout_len = 0; 2209 2210 if ((scb->flags & SCB_DEVICE_RESET) == 0 2211 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) { 2212 u_int identify_msg; 2213 2214 identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); 2215 if ((scb->hscb->control & DISCENB) != 0) 2216 identify_msg |= MSG_IDENTIFY_DISCFLAG; 2217 ahc->msgout_buf[ahc->msgout_index++] = identify_msg; 2218 ahc->msgout_len++; 2219 2220 if ((scb->hscb->control & TAG_ENB) != 0) { 2221 ahc->msgout_buf[ahc->msgout_index++] = 2222 scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); 2223 ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag; 2224 ahc->msgout_len += 2; 2225 } 2226 } 2227 2228 if (scb->flags & SCB_DEVICE_RESET) { 2229 ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET; 2230 ahc->msgout_len++; 2231 ahc_print_path(ahc, scb); 2232 printf("Bus Device Reset Message Sent\n"); 2233 /* 2234 * Clear our selection hardware in advance of 2235 * the busfree. We may have an entry in the waiting 2236 * Q for this target, and we don't want to go about 2237 * selecting while we handle the busfree and blow it 2238 * away. 2239 */ 2240 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 2241 } else if ((scb->flags & SCB_ABORT) != 0) { 2242 if ((scb->hscb->control & TAG_ENB) != 0) 2243 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG; 2244 else 2245 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT; 2246 ahc->msgout_len++; 2247 ahc_print_path(ahc, scb); 2248 printf("Abort%s Message Sent\n", 2249 (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); 2250 /* 2251 * Clear our selection hardware in advance of 2252 * the busfree. We may have an entry in the waiting 2253 * Q for this target, and we don't want to go about 2254 * selecting while we handle the busfree and blow it 2255 * away. 2256 */ 2257 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 2258 } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { 2259 ahc_build_transfer_msg(ahc, devinfo); 2260 } else { 2261 printf("ahc_intr: AWAITING_MSG for an SCB that " 2262 "does not have a waiting message\n"); 2263 printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, 2264 devinfo->target_mask); 2265 panic("SCB = %d, SCB Control = %x, MSG_OUT = %x " 2266 "SCB flags = %x", scb->hscb->tag, scb->hscb->control, 2267 ahc_inb(ahc, MSG_OUT), scb->flags); 2268 } 2269 2270 /* 2271 * Clear the MK_MESSAGE flag from the SCB so we aren't 2272 * asked to send this message again. 2273 */ 2274 ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE); 2275 scb->hscb->control &= ~MK_MESSAGE; 2276 ahc->msgout_index = 0; 2277 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2278 } 2279 2280 /* 2281 * Build an appropriate transfer negotiation message for the 2282 * currently active target. 2283 */ 2284 static void 2285 ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2286 { 2287 /* 2288 * We need to initiate transfer negotiations. 2289 * If our current and goal settings are identical, 2290 * we want to renegotiate due to a check condition. 2291 */ 2292 struct ahc_initiator_tinfo *tinfo; 2293 struct ahc_tmode_tstate *tstate; 2294 struct ahc_syncrate *rate; 2295 int dowide; 2296 int dosync; 2297 int doppr; 2298 int use_ppr; 2299 u_int period; 2300 u_int ppr_options; 2301 u_int offset; 2302 2303 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 2304 devinfo->target, &tstate); 2305 /* 2306 * Filter our period based on the current connection. 2307 * If we can't perform DT transfers on this segment (not in LVD 2308 * mode for instance), then our decision to issue a PPR message 2309 * may change. 2310 */ 2311 period = tinfo->goal.period; 2312 ppr_options = tinfo->goal.ppr_options; 2313 /* Target initiated PPR is not allowed in the SCSI spec */ 2314 if (devinfo->role == ROLE_TARGET) 2315 ppr_options = 0; 2316 rate = ahc_devlimited_syncrate(ahc, tinfo, &period, 2317 &ppr_options, devinfo->role); 2318 dowide = tinfo->curr.width != tinfo->goal.width; 2319 dosync = tinfo->curr.period != period; 2320 doppr = tinfo->curr.ppr_options != ppr_options; 2321 2322 if (!dowide && !dosync && !doppr) { 2323 dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; 2324 dosync = tinfo->goal.offset != 0; 2325 doppr = tinfo->goal.ppr_options != 0; 2326 } 2327 2328 if (!dowide && !dosync && !doppr) { 2329 panic("ahc_intr: AWAITING_MSG for negotiation, " 2330 "but no negotiation needed\n"); 2331 } 2332 2333 use_ppr = (tinfo->curr.transport_version >= 3) || doppr; 2334 /* Target initiated PPR is not allowed in the SCSI spec */ 2335 if (devinfo->role == ROLE_TARGET) 2336 use_ppr = 0; 2337 2338 /* 2339 * Both the PPR message and SDTR message require the 2340 * goal syncrate to be limited to what the target device 2341 * is capable of handling (based on whether an LVD->SE 2342 * expander is on the bus), so combine these two cases. 2343 * Regardless, guarantee that if we are using WDTR and SDTR 2344 * messages that WDTR comes first. 2345 */ 2346 if (use_ppr || (dosync && !dowide)) { 2347 2348 offset = tinfo->goal.offset; 2349 ahc_validate_offset(ahc, tinfo, rate, &offset, 2350 use_ppr ? tinfo->goal.width 2351 : tinfo->curr.width, 2352 devinfo->role); 2353 if (use_ppr) { 2354 ahc_construct_ppr(ahc, devinfo, period, offset, 2355 tinfo->goal.width, ppr_options); 2356 } else { 2357 ahc_construct_sdtr(ahc, devinfo, period, offset); 2358 } 2359 } else { 2360 ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width); 2361 } 2362 } 2363 2364 /* 2365 * Build a synchronous negotiation message in our message 2366 * buffer based on the input parameters. 2367 */ 2368 static void 2369 ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2370 u_int period, u_int offset) 2371 { 2372 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2373 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN; 2374 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR; 2375 ahc->msgout_buf[ahc->msgout_index++] = period; 2376 ahc->msgout_buf[ahc->msgout_index++] = offset; 2377 ahc->msgout_len += 5; 2378 if (bootverbose) { 2379 printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", 2380 ahc_name(ahc), devinfo->channel, devinfo->target, 2381 devinfo->lun, period, offset); 2382 } 2383 } 2384 2385 /* 2386 * Build a wide negotiation message in our message 2387 * buffer based on the input parameters. 2388 */ 2389 static void 2390 ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2391 u_int bus_width) 2392 { 2393 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2394 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN; 2395 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR; 2396 ahc->msgout_buf[ahc->msgout_index++] = bus_width; 2397 ahc->msgout_len += 4; 2398 if (bootverbose) { 2399 printf("(%s:%c:%d:%d): Sending WDTR %x\n", 2400 ahc_name(ahc), devinfo->channel, devinfo->target, 2401 devinfo->lun, bus_width); 2402 } 2403 } 2404 2405 /* 2406 * Build a parallel protocol request message in our message 2407 * buffer based on the input parameters. 2408 */ 2409 static void 2410 ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2411 u_int period, u_int offset, u_int bus_width, 2412 u_int ppr_options) 2413 { 2414 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2415 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR_LEN; 2416 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR; 2417 ahc->msgout_buf[ahc->msgout_index++] = period; 2418 ahc->msgout_buf[ahc->msgout_index++] = 0; 2419 ahc->msgout_buf[ahc->msgout_index++] = offset; 2420 ahc->msgout_buf[ahc->msgout_index++] = bus_width; 2421 ahc->msgout_buf[ahc->msgout_index++] = ppr_options; 2422 ahc->msgout_len += 8; 2423 if (bootverbose) { 2424 printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " 2425 "offset %x, ppr_options %x\n", ahc_name(ahc), 2426 devinfo->channel, devinfo->target, devinfo->lun, 2427 bus_width, period, offset, ppr_options); 2428 } 2429 } 2430 2431 /* 2432 * Clear any active message state. 2433 */ 2434 static void 2435 ahc_clear_msg_state(struct ahc_softc *ahc) 2436 { 2437 ahc->msgout_len = 0; 2438 ahc->msgin_index = 0; 2439 ahc->msg_type = MSG_TYPE_NONE; 2440 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) { 2441 /* 2442 * The target didn't care to respond to our 2443 * message request, so clear ATN. 2444 */ 2445 ahc_outb(ahc, CLRSINT1, CLRATNO); 2446 } 2447 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 2448 ahc_outb(ahc, SEQ_FLAGS2, 2449 ahc_inb(ahc, SEQ_FLAGS2) & ~TARGET_MSG_PENDING); 2450 } 2451 2452 /* 2453 * Manual message loop handler. 2454 */ 2455 static void 2456 ahc_handle_message_phase(struct ahc_softc *ahc) 2457 { 2458 struct ahc_devinfo devinfo; 2459 u_int bus_phase; 2460 int end_session; 2461 2462 ahc_fetch_devinfo(ahc, &devinfo); 2463 end_session = FALSE; 2464 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 2465 2466 reswitch: 2467 switch (ahc->msg_type) { 2468 case MSG_TYPE_INITIATOR_MSGOUT: 2469 { 2470 int lastbyte; 2471 int phasemis; 2472 int msgdone; 2473 2474 if (ahc->msgout_len == 0) 2475 panic("HOST_MSG_LOOP interrupt with no active message"); 2476 2477 #ifdef AHC_DEBUG 2478 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2479 ahc_print_devinfo(ahc, &devinfo); 2480 printf("INITIATOR_MSG_OUT"); 2481 } 2482 #endif 2483 phasemis = bus_phase != P_MESGOUT; 2484 if (phasemis) { 2485 #ifdef AHC_DEBUG 2486 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2487 printf(" PHASEMIS %s\n", 2488 ahc_lookup_phase_entry(bus_phase) 2489 ->phasemsg); 2490 } 2491 #endif 2492 if (bus_phase == P_MESGIN) { 2493 /* 2494 * Change gears and see if 2495 * this messages is of interest to 2496 * us or should be passed back to 2497 * the sequencer. 2498 */ 2499 ahc_outb(ahc, CLRSINT1, CLRATNO); 2500 ahc->send_msg_perror = FALSE; 2501 ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN; 2502 ahc->msgin_index = 0; 2503 goto reswitch; 2504 } 2505 end_session = TRUE; 2506 break; 2507 } 2508 2509 if (ahc->send_msg_perror) { 2510 ahc_outb(ahc, CLRSINT1, CLRATNO); 2511 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2512 #ifdef AHC_DEBUG 2513 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) 2514 printf(" byte 0x%x\n", ahc->send_msg_perror); 2515 #endif 2516 ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR); 2517 break; 2518 } 2519 2520 msgdone = ahc->msgout_index == ahc->msgout_len; 2521 if (msgdone) { 2522 /* 2523 * The target has requested a retry. 2524 * Re-assert ATN, reset our message index to 2525 * 0, and try again. 2526 */ 2527 ahc->msgout_index = 0; 2528 ahc_assert_atn(ahc); 2529 } 2530 2531 lastbyte = ahc->msgout_index == (ahc->msgout_len - 1); 2532 if (lastbyte) { 2533 /* Last byte is signified by dropping ATN */ 2534 ahc_outb(ahc, CLRSINT1, CLRATNO); 2535 } 2536 2537 /* 2538 * Clear our interrupt status and present 2539 * the next byte on the bus. 2540 */ 2541 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2542 #ifdef AHC_DEBUG 2543 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) 2544 printf(" byte 0x%x\n", 2545 ahc->msgout_buf[ahc->msgout_index]); 2546 #endif 2547 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 2548 break; 2549 } 2550 case MSG_TYPE_INITIATOR_MSGIN: 2551 { 2552 int phasemis; 2553 int message_done; 2554 2555 #ifdef AHC_DEBUG 2556 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2557 ahc_print_devinfo(ahc, &devinfo); 2558 printf("INITIATOR_MSG_IN"); 2559 } 2560 #endif 2561 phasemis = bus_phase != P_MESGIN; 2562 if (phasemis) { 2563 #ifdef AHC_DEBUG 2564 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2565 printf(" PHASEMIS %s\n", 2566 ahc_lookup_phase_entry(bus_phase) 2567 ->phasemsg); 2568 } 2569 #endif 2570 ahc->msgin_index = 0; 2571 if (bus_phase == P_MESGOUT 2572 && (ahc->send_msg_perror == TRUE 2573 || (ahc->msgout_len != 0 2574 && ahc->msgout_index == 0))) { 2575 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2576 goto reswitch; 2577 } 2578 end_session = TRUE; 2579 break; 2580 } 2581 2582 /* Pull the byte in without acking it */ 2583 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL); 2584 #ifdef AHC_DEBUG 2585 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) 2586 printf(" byte 0x%x\n", 2587 ahc->msgin_buf[ahc->msgin_index]); 2588 #endif 2589 2590 message_done = ahc_parse_msg(ahc, &devinfo); 2591 2592 if (message_done) { 2593 /* 2594 * Clear our incoming message buffer in case there 2595 * is another message following this one. 2596 */ 2597 ahc->msgin_index = 0; 2598 2599 /* 2600 * If this message illicited a response, 2601 * assert ATN so the target takes us to the 2602 * message out phase. 2603 */ 2604 if (ahc->msgout_len != 0) 2605 ahc_assert_atn(ahc); 2606 } else 2607 ahc->msgin_index++; 2608 2609 if (message_done == MSGLOOP_TERMINATED) { 2610 end_session = TRUE; 2611 } else { 2612 /* Ack the byte */ 2613 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2614 ahc_inb(ahc, SCSIDATL); 2615 } 2616 break; 2617 } 2618 case MSG_TYPE_TARGET_MSGIN: 2619 { 2620 int msgdone; 2621 int msgout_request; 2622 2623 if (ahc->msgout_len == 0) 2624 panic("Target MSGIN with no active message"); 2625 2626 /* 2627 * If we interrupted a mesgout session, the initiator 2628 * will not know this until our first REQ. So, we 2629 * only honor mesgout requests after we've sent our 2630 * first byte. 2631 */ 2632 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0 2633 && ahc->msgout_index > 0) 2634 msgout_request = TRUE; 2635 else 2636 msgout_request = FALSE; 2637 2638 if (msgout_request) { 2639 2640 /* 2641 * Change gears and see if 2642 * this messages is of interest to 2643 * us or should be passed back to 2644 * the sequencer. 2645 */ 2646 ahc->msg_type = MSG_TYPE_TARGET_MSGOUT; 2647 ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO); 2648 ahc->msgin_index = 0; 2649 /* Dummy read to REQ for first byte */ 2650 ahc_inb(ahc, SCSIDATL); 2651 ahc_outb(ahc, SXFRCTL0, 2652 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2653 break; 2654 } 2655 2656 msgdone = ahc->msgout_index == ahc->msgout_len; 2657 if (msgdone) { 2658 ahc_outb(ahc, SXFRCTL0, 2659 ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 2660 end_session = TRUE; 2661 break; 2662 } 2663 2664 /* 2665 * Present the next byte on the bus. 2666 */ 2667 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2668 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 2669 break; 2670 } 2671 case MSG_TYPE_TARGET_MSGOUT: 2672 { 2673 int lastbyte; 2674 int msgdone; 2675 2676 /* 2677 * The initiator signals that this is 2678 * the last byte by dropping ATN. 2679 */ 2680 lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0; 2681 2682 /* 2683 * Read the latched byte, but turn off SPIOEN first 2684 * so that we don't inadvertently cause a REQ for the 2685 * next byte. 2686 */ 2687 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 2688 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL); 2689 msgdone = ahc_parse_msg(ahc, &devinfo); 2690 if (msgdone == MSGLOOP_TERMINATED) { 2691 /* 2692 * The message is *really* done in that it caused 2693 * us to go to bus free. The sequencer has already 2694 * been reset at this point, so pull the ejection 2695 * handle. 2696 */ 2697 return; 2698 } 2699 2700 ahc->msgin_index++; 2701 2702 /* 2703 * XXX Read spec about initiator dropping ATN too soon 2704 * and use msgdone to detect it. 2705 */ 2706 if (msgdone == MSGLOOP_MSGCOMPLETE) { 2707 ahc->msgin_index = 0; 2708 2709 /* 2710 * If this message illicited a response, transition 2711 * to the Message in phase and send it. 2712 */ 2713 if (ahc->msgout_len != 0) { 2714 ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO); 2715 ahc_outb(ahc, SXFRCTL0, 2716 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2717 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 2718 ahc->msgin_index = 0; 2719 break; 2720 } 2721 } 2722 2723 if (lastbyte) 2724 end_session = TRUE; 2725 else { 2726 /* Ask for the next byte. */ 2727 ahc_outb(ahc, SXFRCTL0, 2728 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2729 } 2730 2731 break; 2732 } 2733 default: 2734 panic("Unknown REQINIT message type"); 2735 } 2736 2737 if (end_session) { 2738 ahc_clear_msg_state(ahc); 2739 ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP); 2740 } else 2741 ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP); 2742 } 2743 2744 /* 2745 * See if we sent a particular extended message to the target. 2746 * If "full" is true, return true only if the target saw the full 2747 * message. If "full" is false, return true if the target saw at 2748 * least the first byte of the message. 2749 */ 2750 static int 2751 ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full) 2752 { 2753 int found; 2754 u_int index; 2755 2756 found = FALSE; 2757 index = 0; 2758 2759 while (index < ahc->msgout_len) { 2760 if (ahc->msgout_buf[index] == MSG_EXTENDED) { 2761 u_int end_index; 2762 2763 end_index = index + 1 + ahc->msgout_buf[index + 1]; 2764 if (ahc->msgout_buf[index+2] == msgval 2765 && type == AHCMSG_EXT) { 2766 2767 if (full) { 2768 if (ahc->msgout_index > end_index) 2769 found = TRUE; 2770 } else if (ahc->msgout_index > index) 2771 found = TRUE; 2772 } 2773 index = end_index; 2774 } else if (ahc->msgout_buf[index] >= MSG_SIMPLE_TASK 2775 && ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) { 2776 2777 /* Skip tag type and tag id or residue param*/ 2778 index += 2; 2779 } else { 2780 /* Single byte message */ 2781 if (type == AHCMSG_1B 2782 && ahc->msgout_buf[index] == msgval 2783 && ahc->msgout_index > index) 2784 found = TRUE; 2785 index++; 2786 } 2787 2788 if (found) 2789 break; 2790 } 2791 return (found); 2792 } 2793 2794 /* 2795 * Wait for a complete incoming message, parse it, and respond accordingly. 2796 */ 2797 static int 2798 ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2799 { 2800 struct ahc_initiator_tinfo *tinfo; 2801 struct ahc_tmode_tstate *tstate; 2802 int reject; 2803 int done; 2804 int response; 2805 u_int targ_scsirate; 2806 2807 done = MSGLOOP_IN_PROG; 2808 response = FALSE; 2809 reject = FALSE; 2810 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 2811 devinfo->target, &tstate); 2812 targ_scsirate = tinfo->scsirate; 2813 2814 /* 2815 * Parse as much of the message as is availible, 2816 * rejecting it if we don't support it. When 2817 * the entire message is availible and has been 2818 * handled, return MSGLOOP_MSGCOMPLETE, indicating 2819 * that we have parsed an entire message. 2820 * 2821 * In the case of extended messages, we accept the length 2822 * byte outright and perform more checking once we know the 2823 * extended message type. 2824 */ 2825 switch (ahc->msgin_buf[0]) { 2826 case MSG_DISCONNECT: 2827 case MSG_SAVEDATAPOINTER: 2828 case MSG_CMDCOMPLETE: 2829 case MSG_RESTOREPOINTERS: 2830 case MSG_IGN_WIDE_RESIDUE: 2831 /* 2832 * End our message loop as these are messages 2833 * the sequencer handles on its own. 2834 */ 2835 done = MSGLOOP_TERMINATED; 2836 break; 2837 case MSG_MESSAGE_REJECT: 2838 response = ahc_handle_msg_reject(ahc, devinfo); 2839 /* FALLTHROUGH */ 2840 case MSG_NOOP: 2841 done = MSGLOOP_MSGCOMPLETE; 2842 break; 2843 case MSG_EXTENDED: 2844 { 2845 /* Wait for enough of the message to begin validation */ 2846 if (ahc->msgin_index < 2) 2847 break; 2848 switch (ahc->msgin_buf[2]) { 2849 case MSG_EXT_SDTR: 2850 { 2851 struct ahc_syncrate *syncrate; 2852 u_int period; 2853 u_int ppr_options; 2854 u_int offset; 2855 u_int saved_offset; 2856 2857 if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) { 2858 reject = TRUE; 2859 break; 2860 } 2861 2862 /* 2863 * Wait until we have both args before validating 2864 * and acting on this message. 2865 * 2866 * Add one to MSG_EXT_SDTR_LEN to account for 2867 * the extended message preamble. 2868 */ 2869 if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1)) 2870 break; 2871 2872 period = ahc->msgin_buf[3]; 2873 ppr_options = 0; 2874 saved_offset = offset = ahc->msgin_buf[4]; 2875 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, 2876 &ppr_options, 2877 devinfo->role); 2878 ahc_validate_offset(ahc, tinfo, syncrate, &offset, 2879 targ_scsirate & WIDEXFER, 2880 devinfo->role); 2881 if (bootverbose) { 2882 printf("(%s:%c:%d:%d): Received " 2883 "SDTR period %x, offset %x\n\t" 2884 "Filtered to period %x, offset %x\n", 2885 ahc_name(ahc), devinfo->channel, 2886 devinfo->target, devinfo->lun, 2887 ahc->msgin_buf[3], saved_offset, 2888 period, offset); 2889 } 2890 ahc_set_syncrate(ahc, devinfo, 2891 syncrate, period, 2892 offset, ppr_options, 2893 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 2894 /*paused*/TRUE); 2895 2896 /* 2897 * See if we initiated Sync Negotiation 2898 * and didn't have to fall down to async 2899 * transfers. 2900 */ 2901 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) { 2902 /* We started it */ 2903 if (saved_offset != offset) { 2904 /* Went too low - force async */ 2905 reject = TRUE; 2906 } 2907 } else { 2908 /* 2909 * Send our own SDTR in reply 2910 */ 2911 if (bootverbose 2912 && devinfo->role == ROLE_INITIATOR) { 2913 printf("(%s:%c:%d:%d): Target " 2914 "Initiated SDTR\n", 2915 ahc_name(ahc), devinfo->channel, 2916 devinfo->target, devinfo->lun); 2917 } 2918 ahc->msgout_index = 0; 2919 ahc->msgout_len = 0; 2920 ahc_construct_sdtr(ahc, devinfo, 2921 period, offset); 2922 ahc->msgout_index = 0; 2923 response = TRUE; 2924 } 2925 done = MSGLOOP_MSGCOMPLETE; 2926 break; 2927 } 2928 case MSG_EXT_WDTR: 2929 { 2930 u_int bus_width; 2931 u_int saved_width; 2932 u_int sending_reply; 2933 2934 sending_reply = FALSE; 2935 if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) { 2936 reject = TRUE; 2937 break; 2938 } 2939 2940 /* 2941 * Wait until we have our arg before validating 2942 * and acting on this message. 2943 * 2944 * Add one to MSG_EXT_WDTR_LEN to account for 2945 * the extended message preamble. 2946 */ 2947 if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1)) 2948 break; 2949 2950 bus_width = ahc->msgin_buf[3]; 2951 saved_width = bus_width; 2952 ahc_validate_width(ahc, tinfo, &bus_width, 2953 devinfo->role); 2954 if (bootverbose) { 2955 printf("(%s:%c:%d:%d): Received WDTR " 2956 "%x filtered to %x\n", 2957 ahc_name(ahc), devinfo->channel, 2958 devinfo->target, devinfo->lun, 2959 saved_width, bus_width); 2960 } 2961 2962 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) { 2963 /* 2964 * Don't send a WDTR back to the 2965 * target, since we asked first. 2966 * If the width went higher than our 2967 * request, reject it. 2968 */ 2969 if (saved_width > bus_width) { 2970 reject = TRUE; 2971 printf("(%s:%c:%d:%d): requested %dBit " 2972 "transfers. Rejecting...\n", 2973 ahc_name(ahc), devinfo->channel, 2974 devinfo->target, devinfo->lun, 2975 8 * (0x01 << bus_width)); 2976 bus_width = 0; 2977 } 2978 } else { 2979 /* 2980 * Send our own WDTR in reply 2981 */ 2982 if (bootverbose 2983 && devinfo->role == ROLE_INITIATOR) { 2984 printf("(%s:%c:%d:%d): Target " 2985 "Initiated WDTR\n", 2986 ahc_name(ahc), devinfo->channel, 2987 devinfo->target, devinfo->lun); 2988 } 2989 ahc->msgout_index = 0; 2990 ahc->msgout_len = 0; 2991 ahc_construct_wdtr(ahc, devinfo, bus_width); 2992 ahc->msgout_index = 0; 2993 response = TRUE; 2994 sending_reply = TRUE; 2995 } 2996 ahc_set_width(ahc, devinfo, bus_width, 2997 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 2998 /*paused*/TRUE); 2999 /* After a wide message, we are async */ 3000 ahc_set_syncrate(ahc, devinfo, 3001 /*syncrate*/NULL, /*period*/0, 3002 /*offset*/0, /*ppr_options*/0, 3003 AHC_TRANS_ACTIVE, /*paused*/TRUE); 3004 if (sending_reply == FALSE && reject == FALSE) { 3005 3006 if (tinfo->goal.offset) { 3007 ahc->msgout_index = 0; 3008 ahc->msgout_len = 0; 3009 ahc_build_transfer_msg(ahc, devinfo); 3010 ahc->msgout_index = 0; 3011 response = TRUE; 3012 } 3013 } 3014 done = MSGLOOP_MSGCOMPLETE; 3015 break; 3016 } 3017 case MSG_EXT_PPR: 3018 { 3019 struct ahc_syncrate *syncrate; 3020 u_int period; 3021 u_int offset; 3022 u_int bus_width; 3023 u_int ppr_options; 3024 u_int saved_width; 3025 u_int saved_offset; 3026 u_int saved_ppr_options; 3027 3028 if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) { 3029 reject = TRUE; 3030 break; 3031 } 3032 3033 /* 3034 * Wait until we have all args before validating 3035 * and acting on this message. 3036 * 3037 * Add one to MSG_EXT_PPR_LEN to account for 3038 * the extended message preamble. 3039 */ 3040 if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1)) 3041 break; 3042 3043 period = ahc->msgin_buf[3]; 3044 offset = ahc->msgin_buf[5]; 3045 bus_width = ahc->msgin_buf[6]; 3046 saved_width = bus_width; 3047 ppr_options = ahc->msgin_buf[7]; 3048 /* 3049 * According to the spec, a DT only 3050 * period factor with no DT option 3051 * set implies async. 3052 */ 3053 if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 3054 && period == 9) 3055 offset = 0; 3056 saved_ppr_options = ppr_options; 3057 saved_offset = offset; 3058 3059 /* 3060 * Mask out any options we don't support 3061 * on any controller. Transfer options are 3062 * only available if we are negotiating wide. 3063 */ 3064 ppr_options &= MSG_EXT_PPR_DT_REQ; 3065 if (bus_width == 0) 3066 ppr_options = 0; 3067 3068 ahc_validate_width(ahc, tinfo, &bus_width, 3069 devinfo->role); 3070 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, 3071 &ppr_options, 3072 devinfo->role); 3073 ahc_validate_offset(ahc, tinfo, syncrate, 3074 &offset, bus_width, 3075 devinfo->role); 3076 3077 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) { 3078 /* 3079 * If we are unable to do any of the 3080 * requested options (we went too low), 3081 * then we'll have to reject the message. 3082 */ 3083 if (saved_width > bus_width 3084 || saved_offset != offset 3085 || saved_ppr_options != ppr_options) { 3086 reject = TRUE; 3087 period = 0; 3088 offset = 0; 3089 bus_width = 0; 3090 ppr_options = 0; 3091 syncrate = NULL; 3092 } 3093 } else { 3094 if (devinfo->role != ROLE_TARGET) 3095 printf("(%s:%c:%d:%d): Target " 3096 "Initiated PPR\n", 3097 ahc_name(ahc), devinfo->channel, 3098 devinfo->target, devinfo->lun); 3099 else 3100 printf("(%s:%c:%d:%d): Initiator " 3101 "Initiated PPR\n", 3102 ahc_name(ahc), devinfo->channel, 3103 devinfo->target, devinfo->lun); 3104 ahc->msgout_index = 0; 3105 ahc->msgout_len = 0; 3106 ahc_construct_ppr(ahc, devinfo, period, offset, 3107 bus_width, ppr_options); 3108 ahc->msgout_index = 0; 3109 response = TRUE; 3110 } 3111 if (bootverbose) { 3112 printf("(%s:%c:%d:%d): Received PPR width %x, " 3113 "period %x, offset %x,options %x\n" 3114 "\tFiltered to width %x, period %x, " 3115 "offset %x, options %x\n", 3116 ahc_name(ahc), devinfo->channel, 3117 devinfo->target, devinfo->lun, 3118 saved_width, ahc->msgin_buf[3], 3119 saved_offset, saved_ppr_options, 3120 bus_width, period, offset, ppr_options); 3121 } 3122 ahc_set_width(ahc, devinfo, bus_width, 3123 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3124 /*paused*/TRUE); 3125 ahc_set_syncrate(ahc, devinfo, 3126 syncrate, period, 3127 offset, ppr_options, 3128 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3129 /*paused*/TRUE); 3130 done = MSGLOOP_MSGCOMPLETE; 3131 break; 3132 } 3133 default: 3134 /* Unknown extended message. Reject it. */ 3135 reject = TRUE; 3136 break; 3137 } 3138 break; 3139 } 3140 #ifdef AHC_TARGET_MODE 3141 case MSG_BUS_DEV_RESET: 3142 ahc_handle_devreset(ahc, devinfo, 3143 CAM_BDR_SENT, 3144 "Bus Device Reset Received", 3145 /*verbose_level*/0); 3146 ahc_restart(ahc); 3147 done = MSGLOOP_TERMINATED; 3148 break; 3149 case MSG_ABORT_TAG: 3150 case MSG_ABORT: 3151 case MSG_CLEAR_QUEUE: 3152 { 3153 int tag; 3154 3155 /* Target mode messages */ 3156 if (devinfo->role != ROLE_TARGET) { 3157 reject = TRUE; 3158 break; 3159 } 3160 tag = SCB_LIST_NULL; 3161 if (ahc->msgin_buf[0] == MSG_ABORT_TAG) 3162 tag = ahc_inb(ahc, INITIATOR_TAG); 3163 ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 3164 devinfo->lun, tag, ROLE_TARGET, 3165 CAM_REQ_ABORTED); 3166 3167 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 3168 if (tstate != NULL) { 3169 struct ahc_tmode_lstate* lstate; 3170 3171 lstate = tstate->enabled_luns[devinfo->lun]; 3172 if (lstate != NULL) { 3173 ahc_queue_lstate_event(ahc, lstate, 3174 devinfo->our_scsiid, 3175 ahc->msgin_buf[0], 3176 /*arg*/tag); 3177 ahc_send_lstate_events(ahc, lstate); 3178 } 3179 } 3180 ahc_restart(ahc); 3181 done = MSGLOOP_TERMINATED; 3182 break; 3183 } 3184 #endif 3185 case MSG_TERM_IO_PROC: 3186 default: 3187 reject = TRUE; 3188 break; 3189 } 3190 3191 if (reject) { 3192 /* 3193 * Setup to reject the message. 3194 */ 3195 ahc->msgout_index = 0; 3196 ahc->msgout_len = 1; 3197 ahc->msgout_buf[0] = MSG_MESSAGE_REJECT; 3198 done = MSGLOOP_MSGCOMPLETE; 3199 response = TRUE; 3200 } 3201 3202 if (done != MSGLOOP_IN_PROG && !response) 3203 /* Clear the outgoing message buffer */ 3204 ahc->msgout_len = 0; 3205 3206 return (done); 3207 } 3208 3209 /* 3210 * Process a message reject message. 3211 */ 3212 static int 3213 ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 3214 { 3215 /* 3216 * What we care about here is if we had an 3217 * outstanding SDTR or WDTR message for this 3218 * target. If we did, this is a signal that 3219 * the target is refusing negotiation. 3220 */ 3221 struct scb *scb; 3222 struct ahc_initiator_tinfo *tinfo; 3223 struct ahc_tmode_tstate *tstate; 3224 u_int scb_index; 3225 u_int last_msg; 3226 int response = 0; 3227 3228 scb_index = ahc_inb(ahc, SCB_TAG); 3229 scb = ahc_lookup_scb(ahc, scb_index); 3230 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, 3231 devinfo->our_scsiid, 3232 devinfo->target, &tstate); 3233 /* Might be necessary */ 3234 last_msg = ahc_inb(ahc, LAST_MSG); 3235 3236 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) { 3237 /* 3238 * Target does not support the PPR message. 3239 * Attempt to negotiate SPI-2 style. 3240 */ 3241 if (bootverbose) { 3242 printf("(%s:%c:%d:%d): PPR Rejected. " 3243 "Trying WDTR/SDTR\n", 3244 ahc_name(ahc), devinfo->channel, 3245 devinfo->target, devinfo->lun); 3246 } 3247 tinfo->goal.ppr_options = 0; 3248 tinfo->curr.transport_version = 2; 3249 tinfo->goal.transport_version = 2; 3250 ahc->msgout_index = 0; 3251 ahc->msgout_len = 0; 3252 ahc_build_transfer_msg(ahc, devinfo); 3253 ahc->msgout_index = 0; 3254 response = 1; 3255 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) { 3256 3257 /* note 8bit xfers */ 3258 printf("(%s:%c:%d:%d): refuses WIDE negotiation. Using " 3259 "8bit transfers\n", ahc_name(ahc), 3260 devinfo->channel, devinfo->target, devinfo->lun); 3261 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 3262 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3263 /*paused*/TRUE); 3264 /* 3265 * No need to clear the sync rate. If the target 3266 * did not accept the command, our syncrate is 3267 * unaffected. If the target started the negotiation, 3268 * but rejected our response, we already cleared the 3269 * sync rate before sending our WDTR. 3270 */ 3271 if (tinfo->goal.offset) { 3272 3273 /* Start the sync negotiation */ 3274 ahc->msgout_index = 0; 3275 ahc->msgout_len = 0; 3276 ahc_build_transfer_msg(ahc, devinfo); 3277 ahc->msgout_index = 0; 3278 response = 1; 3279 } 3280 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) { 3281 /* note asynch xfers and clear flag */ 3282 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0, 3283 /*offset*/0, /*ppr_options*/0, 3284 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3285 /*paused*/TRUE); 3286 printf("(%s:%c:%d:%d): refuses synchronous negotiation. " 3287 "Using asynchronous transfers\n", 3288 ahc_name(ahc), devinfo->channel, 3289 devinfo->target, devinfo->lun); 3290 } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) { 3291 int tag_type; 3292 int mask; 3293 3294 tag_type = (scb->hscb->control & MSG_SIMPLE_TASK); 3295 3296 if (tag_type == MSG_SIMPLE_TASK) { 3297 printf("(%s:%c:%d:%d): refuses tagged commands. " 3298 "Performing non-tagged I/O\n", ahc_name(ahc), 3299 devinfo->channel, devinfo->target, devinfo->lun); 3300 ahc_set_tags(ahc, devinfo, AHC_QUEUE_NONE); 3301 mask = ~0x23; 3302 } else { 3303 printf("(%s:%c:%d:%d): refuses %s tagged commands. " 3304 "Performing simple queue tagged I/O only\n", 3305 ahc_name(ahc), devinfo->channel, devinfo->target, 3306 devinfo->lun, tag_type == MSG_ORDERED_TASK 3307 ? "ordered" : "head of queue"); 3308 ahc_set_tags(ahc, devinfo, AHC_QUEUE_BASIC); 3309 mask = ~0x03; 3310 } 3311 3312 /* 3313 * Resend the identify for this CCB as the target 3314 * may believe that the selection is invalid otherwise. 3315 */ 3316 ahc_outb(ahc, SCB_CONTROL, 3317 ahc_inb(ahc, SCB_CONTROL) & mask); 3318 scb->hscb->control &= mask; 3319 ahc_set_transaction_tag(scb, /*enabled*/FALSE, 3320 /*type*/MSG_SIMPLE_TASK); 3321 ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG); 3322 ahc_assert_atn(ahc); 3323 3324 /* 3325 * This transaction is now at the head of 3326 * the untagged queue for this target. 3327 */ 3328 if ((ahc->flags & AHC_SCB_BTT) == 0) { 3329 struct scb_tailq *untagged_q; 3330 3331 untagged_q = 3332 &(ahc->untagged_queues[devinfo->target_offset]); 3333 TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe); 3334 scb->flags |= SCB_UNTAGGEDQ; 3335 } 3336 ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), 3337 scb->hscb->tag); 3338 3339 /* 3340 * Requeue all tagged commands for this target 3341 * currently in our posession so they can be 3342 * converted to untagged commands. 3343 */ 3344 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), 3345 SCB_GET_CHANNEL(ahc, scb), 3346 SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, 3347 ROLE_INITIATOR, CAM_REQUEUE_REQ, 3348 SEARCH_COMPLETE); 3349 } else { 3350 /* 3351 * Otherwise, we ignore it. 3352 */ 3353 printf("%s:%c:%d: Message reject for %x -- ignored\n", 3354 ahc_name(ahc), devinfo->channel, devinfo->target, 3355 last_msg); 3356 } 3357 return (response); 3358 } 3359 3360 /* 3361 * Process an ingnore wide residue message. 3362 */ 3363 static void 3364 ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 3365 { 3366 u_int scb_index; 3367 struct scb *scb; 3368 3369 scb_index = ahc_inb(ahc, SCB_TAG); 3370 scb = ahc_lookup_scb(ahc, scb_index); 3371 /* 3372 * XXX Actually check data direction in the sequencer? 3373 * Perhaps add datadir to some spare bits in the hscb? 3374 */ 3375 if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0 3376 || ahc_get_transfer_dir(scb) != CAM_DIR_IN) { 3377 /* 3378 * Ignore the message if we haven't 3379 * seen an appropriate data phase yet. 3380 */ 3381 } else { 3382 /* 3383 * If the residual occurred on the last 3384 * transfer and the transfer request was 3385 * expected to end on an odd count, do 3386 * nothing. Otherwise, subtract a byte 3387 * and update the residual count accordingly. 3388 */ 3389 uint32_t sgptr; 3390 3391 sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR); 3392 if ((sgptr & SG_LIST_NULL) != 0 3393 && ahc_inb(ahc, DATA_COUNT_ODD) == 1) { 3394 /* 3395 * If the residual occurred on the last 3396 * transfer and the transfer request was 3397 * expected to end on an odd count, do 3398 * nothing. 3399 */ 3400 } else { 3401 struct ahc_dma_seg *sg; 3402 uint32_t data_cnt; 3403 uint32_t data_addr; 3404 uint32_t sglen; 3405 3406 /* Pull in the rest of the sgptr */ 3407 sgptr |= (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24) 3408 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16) 3409 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8); 3410 sgptr &= SG_PTR_MASK; 3411 data_cnt = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+3) << 24) 3412 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+2) << 16) 3413 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+1) << 8) 3414 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT)); 3415 3416 data_addr = (ahc_inb(ahc, SHADDR + 3) << 24) 3417 | (ahc_inb(ahc, SHADDR + 2) << 16) 3418 | (ahc_inb(ahc, SHADDR + 1) << 8) 3419 | (ahc_inb(ahc, SHADDR)); 3420 3421 data_cnt += 1; 3422 data_addr -= 1; 3423 3424 sg = ahc_sg_bus_to_virt(scb, sgptr); 3425 /* 3426 * The residual sg ptr points to the next S/G 3427 * to load so we must go back one. 3428 */ 3429 sg--; 3430 sglen = ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; 3431 if (sg != scb->sg_list 3432 && sglen < (data_cnt & AHC_SG_LEN_MASK)) { 3433 3434 sg--; 3435 sglen = ahc_le32toh(sg->len); 3436 /* 3437 * Preserve High Address and SG_LIST bits 3438 * while setting the count to 1. 3439 */ 3440 data_cnt = 1 | (sglen & (~AHC_SG_LEN_MASK)); 3441 data_addr = ahc_le32toh(sg->addr) 3442 + (sglen & AHC_SG_LEN_MASK) - 1; 3443 3444 /* 3445 * Increment sg so it points to the 3446 * "next" sg. 3447 */ 3448 sg++; 3449 sgptr = ahc_sg_virt_to_bus(scb, sg); 3450 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 3, 3451 sgptr >> 24); 3452 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 2, 3453 sgptr >> 16); 3454 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 1, 3455 sgptr >> 8); 3456 ahc_outb(ahc, SCB_RESIDUAL_SGPTR, sgptr); 3457 } 3458 3459 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 3, data_cnt >> 24); 3460 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 2, data_cnt >> 16); 3461 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 1, data_cnt >> 8); 3462 ahc_outb(ahc, SCB_RESIDUAL_DATACNT, data_cnt); 3463 } 3464 } 3465 } 3466 3467 3468 /* 3469 * Reinitialize the data pointers for the active transfer 3470 * based on its current residual. 3471 */ 3472 static void 3473 ahc_reinitialize_dataptrs(struct ahc_softc *ahc) 3474 { 3475 struct scb *scb; 3476 struct ahc_dma_seg *sg; 3477 u_int scb_index; 3478 uint32_t sgptr; 3479 uint32_t resid; 3480 uint32_t dataptr; 3481 3482 scb_index = ahc_inb(ahc, SCB_TAG); 3483 scb = ahc_lookup_scb(ahc, scb_index); 3484 sgptr = (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24) 3485 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16) 3486 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8) 3487 | ahc_inb(ahc, SCB_RESIDUAL_SGPTR); 3488 3489 sgptr &= SG_PTR_MASK; 3490 sg = ahc_sg_bus_to_virt(scb, sgptr); 3491 3492 /* The residual sg_ptr always points to the next sg */ 3493 sg--; 3494 3495 resid = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 2) << 16) 3496 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 1) << 8) 3497 | ahc_inb(ahc, SCB_RESIDUAL_DATACNT); 3498 3499 dataptr = ahc_le32toh(sg->addr) 3500 + (ahc_le32toh(sg->len) & AHC_SG_LEN_MASK) 3501 - resid; 3502 if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { 3503 u_int dscommand1; 3504 3505 dscommand1 = ahc_inb(ahc, DSCOMMAND1); 3506 ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); 3507 ahc_outb(ahc, HADDR, 3508 (ahc_le32toh(sg->len) >> 24) & SG_HIGH_ADDR_BITS); 3509 ahc_outb(ahc, DSCOMMAND1, dscommand1); 3510 } 3511 ahc_outb(ahc, HADDR + 3, dataptr >> 24); 3512 ahc_outb(ahc, HADDR + 2, dataptr >> 16); 3513 ahc_outb(ahc, HADDR + 1, dataptr >> 8); 3514 ahc_outb(ahc, HADDR, dataptr); 3515 ahc_outb(ahc, HCNT + 2, resid >> 16); 3516 ahc_outb(ahc, HCNT + 1, resid >> 8); 3517 ahc_outb(ahc, HCNT, resid); 3518 if ((ahc->features & AHC_ULTRA2) == 0) { 3519 ahc_outb(ahc, STCNT + 2, resid >> 16); 3520 ahc_outb(ahc, STCNT + 1, resid >> 8); 3521 ahc_outb(ahc, STCNT, resid); 3522 } 3523 } 3524 3525 /* 3526 * Handle the effects of issuing a bus device reset message. 3527 */ 3528 static void 3529 ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 3530 cam_status status, char *message, int verbose_level) 3531 { 3532 #ifdef AHC_TARGET_MODE 3533 struct ahc_tmode_tstate* tstate; 3534 u_int lun; 3535 #endif 3536 int found; 3537 3538 found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 3539 CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role, 3540 status); 3541 3542 #ifdef AHC_TARGET_MODE 3543 /* 3544 * Send an immediate notify ccb to all target mord peripheral 3545 * drivers affected by this action. 3546 */ 3547 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 3548 if (tstate != NULL) { 3549 for (lun = 0; lun < AHC_NUM_LUNS; lun++) { 3550 struct ahc_tmode_lstate* lstate; 3551 3552 lstate = tstate->enabled_luns[lun]; 3553 if (lstate == NULL) 3554 continue; 3555 3556 ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid, 3557 MSG_BUS_DEV_RESET, /*arg*/0); 3558 ahc_send_lstate_events(ahc, lstate); 3559 } 3560 } 3561 #endif 3562 3563 /* 3564 * Go back to async/narrow transfers and renegotiate. 3565 */ 3566 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 3567 AHC_TRANS_CUR, /*paused*/TRUE); 3568 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, 3569 /*period*/0, /*offset*/0, /*ppr_options*/0, 3570 AHC_TRANS_CUR, /*paused*/TRUE); 3571 3572 ahc_send_async(ahc, devinfo->channel, devinfo->target, 3573 CAM_LUN_WILDCARD, AC_SENT_BDR, NULL); 3574 3575 if (message != NULL 3576 && (verbose_level <= bootverbose)) 3577 printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc), 3578 message, devinfo->channel, devinfo->target, found); 3579 } 3580 3581 #ifdef AHC_TARGET_MODE 3582 static void 3583 ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 3584 struct scb *scb) 3585 { 3586 3587 /* 3588 * To facilitate adding multiple messages together, 3589 * each routine should increment the index and len 3590 * variables instead of setting them explicitly. 3591 */ 3592 ahc->msgout_index = 0; 3593 ahc->msgout_len = 0; 3594 3595 if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) 3596 ahc_build_transfer_msg(ahc, devinfo); 3597 else 3598 panic("ahc_intr: AWAITING target message with no message"); 3599 3600 ahc->msgout_index = 0; 3601 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 3602 } 3603 #endif 3604 /**************************** Initialization **********************************/ 3605 /* 3606 * Allocate a controller structure for a new device 3607 * and perform initial initializion. 3608 */ 3609 struct ahc_softc * 3610 ahc_alloc(void *platform_arg, char *name) 3611 { 3612 struct ahc_softc *ahc; 3613 int i; 3614 3615 #ifndef __FreeBSD__ 3616 ahc = malloc(sizeof(*ahc), M_DEVBUF, M_NOWAIT); 3617 if (!ahc) { 3618 printf("aic7xxx: cannot malloc softc!\n"); 3619 free(name, M_DEVBUF); 3620 return NULL; 3621 } 3622 #else 3623 ahc = device_get_softc((device_t)platform_arg); 3624 #endif 3625 memset(ahc, 0, sizeof(*ahc)); 3626 ahc->seep_config = malloc(sizeof(*ahc->seep_config), 3627 M_DEVBUF, M_NOWAIT); 3628 if (ahc->seep_config == NULL) { 3629 #ifndef __FreeBSD__ 3630 free(ahc, M_DEVBUF); 3631 #endif 3632 free(name, M_DEVBUF); 3633 return (NULL); 3634 } 3635 LIST_INIT(&ahc->pending_scbs); 3636 /* We don't know our unit number until the OSM sets it */ 3637 ahc->name = name; 3638 ahc->unit = -1; 3639 ahc->description = NULL; 3640 ahc->channel = 'A'; 3641 ahc->channel_b = 'B'; 3642 ahc->chip = AHC_NONE; 3643 ahc->features = AHC_FENONE; 3644 ahc->bugs = AHC_BUGNONE; 3645 ahc->flags = AHC_FNONE; 3646 3647 for (i = 0; i < AHC_NUM_TARGETS; i++) 3648 TAILQ_INIT(&ahc->untagged_queues[i]); 3649 if (ahc_platform_alloc(ahc, platform_arg) != 0) { 3650 ahc_free(ahc); 3651 ahc = NULL; 3652 } 3653 return (ahc); 3654 } 3655 3656 int 3657 ahc_softc_init(struct ahc_softc *ahc) 3658 { 3659 3660 /* The IRQMS bit is only valid on VL and EISA chips */ 3661 if ((ahc->chip & AHC_PCI) == 0) 3662 ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS; 3663 else 3664 ahc->unpause = 0; 3665 ahc->pause = ahc->unpause | PAUSE; 3666 /* XXX The shared scb data stuff should be deprecated */ 3667 if (ahc->scb_data == NULL) { 3668 ahc->scb_data = malloc(sizeof(*ahc->scb_data), 3669 M_DEVBUF, M_NOWAIT); 3670 if (ahc->scb_data == NULL) 3671 return (ENOMEM); 3672 memset(ahc->scb_data, 0, sizeof(*ahc->scb_data)); 3673 } 3674 3675 return (0); 3676 } 3677 3678 void 3679 ahc_softc_insert(struct ahc_softc *ahc) 3680 { 3681 struct ahc_softc *list_ahc; 3682 3683 #if AHC_PCI_CONFIG > 0 3684 /* 3685 * Second Function PCI devices need to inherit some 3686 * settings from function 0. 3687 */ 3688 if ((ahc->chip & AHC_BUS_MASK) == AHC_PCI 3689 && (ahc->features & AHC_MULTI_FUNC) != 0) { 3690 TAILQ_FOREACH(list_ahc, &ahc_tailq, links) { 3691 ahc_dev_softc_t list_pci; 3692 ahc_dev_softc_t pci; 3693 3694 list_pci = list_ahc->dev_softc; 3695 pci = ahc->dev_softc; 3696 if (ahc_get_pci_slot(list_pci) == ahc_get_pci_slot(pci) 3697 && ahc_get_pci_bus(list_pci) == ahc_get_pci_bus(pci)) { 3698 struct ahc_softc *master; 3699 struct ahc_softc *slave; 3700 3701 if (ahc_get_pci_function(list_pci) == 0) { 3702 master = list_ahc; 3703 slave = ahc; 3704 } else { 3705 master = ahc; 3706 slave = list_ahc; 3707 } 3708 slave->flags &= ~AHC_BIOS_ENABLED; 3709 slave->flags |= 3710 master->flags & AHC_BIOS_ENABLED; 3711 slave->flags &= ~AHC_PRIMARY_CHANNEL; 3712 slave->flags |= 3713 master->flags & AHC_PRIMARY_CHANNEL; 3714 break; 3715 } 3716 } 3717 } 3718 #endif 3719 3720 /* 3721 * Insertion sort into our list of softcs. 3722 */ 3723 list_ahc = TAILQ_FIRST(&ahc_tailq); 3724 while (list_ahc != NULL 3725 && ahc_softc_comp(list_ahc, ahc) <= 0) 3726 list_ahc = TAILQ_NEXT(list_ahc, links); 3727 if (list_ahc != NULL) 3728 TAILQ_INSERT_BEFORE(list_ahc, ahc, links); 3729 else 3730 TAILQ_INSERT_TAIL(&ahc_tailq, ahc, links); 3731 ahc->init_level++; 3732 } 3733 3734 /* 3735 * Verify that the passed in softc pointer is for a 3736 * controller that is still configured. 3737 */ 3738 struct ahc_softc * 3739 ahc_find_softc(struct ahc_softc *ahc) 3740 { 3741 struct ahc_softc *list_ahc; 3742 3743 TAILQ_FOREACH(list_ahc, &ahc_tailq, links) { 3744 if (list_ahc == ahc) 3745 return (ahc); 3746 } 3747 return (NULL); 3748 } 3749 3750 void 3751 ahc_set_unit(struct ahc_softc *ahc, int unit) 3752 { 3753 ahc->unit = unit; 3754 } 3755 3756 void 3757 ahc_set_name(struct ahc_softc *ahc, char *name) 3758 { 3759 if (ahc->name != NULL) 3760 free(ahc->name, M_DEVBUF); 3761 ahc->name = name; 3762 } 3763 3764 void 3765 ahc_free(struct ahc_softc *ahc) 3766 { 3767 int i; 3768 3769 ahc_fini_scbdata(ahc); 3770 switch (ahc->init_level) { 3771 default: 3772 case 5: 3773 ahc_shutdown(ahc); 3774 TAILQ_REMOVE(&ahc_tailq, ahc, links); 3775 /* FALLTHROUGH */ 3776 case 4: 3777 ahc_dmamap_unload(ahc, ahc->shared_data_dmat, 3778 ahc->shared_data_dmamap); 3779 /* FALLTHROUGH */ 3780 case 3: 3781 ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo, 3782 ahc->shared_data_dmamap); 3783 ahc_dmamap_destroy(ahc, ahc->shared_data_dmat, 3784 ahc->shared_data_dmamap); 3785 /* FALLTHROUGH */ 3786 case 2: 3787 ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat); 3788 case 1: 3789 #ifndef __linux__ 3790 ahc_dma_tag_destroy(ahc, ahc->buffer_dmat); 3791 #endif 3792 break; 3793 case 0: 3794 break; 3795 } 3796 3797 #ifndef __linux__ 3798 ahc_dma_tag_destroy(ahc, ahc->parent_dmat); 3799 #endif 3800 ahc_platform_free(ahc); 3801 for (i = 0; i < AHC_NUM_TARGETS; i++) { 3802 struct ahc_tmode_tstate *tstate; 3803 3804 tstate = ahc->enabled_targets[i]; 3805 if (tstate != NULL) { 3806 #if AHC_TARGET_MODE 3807 int j; 3808 3809 for (j = 0; j < AHC_NUM_LUNS; j++) { 3810 struct ahc_tmode_lstate *lstate; 3811 3812 lstate = tstate->enabled_luns[j]; 3813 if (lstate != NULL) { 3814 xpt_free_path(lstate->path); 3815 free(lstate, M_DEVBUF); 3816 } 3817 } 3818 #endif 3819 free(tstate, M_DEVBUF); 3820 } 3821 } 3822 #if AHC_TARGET_MODE 3823 if (ahc->black_hole != NULL) { 3824 xpt_free_path(ahc->black_hole->path); 3825 free(ahc->black_hole, M_DEVBUF); 3826 } 3827 #endif 3828 if (ahc->name != NULL) 3829 free(ahc->name, M_DEVBUF); 3830 if (ahc->seep_config != NULL) 3831 free(ahc->seep_config, M_DEVBUF); 3832 #ifndef __FreeBSD__ 3833 free(ahc, M_DEVBUF); 3834 #endif 3835 return; 3836 } 3837 3838 void 3839 ahc_shutdown(void *arg) 3840 { 3841 struct ahc_softc *ahc; 3842 int i; 3843 3844 ahc = (struct ahc_softc *)arg; 3845 3846 /* This will reset most registers to 0, but not all */ 3847 ahc_reset(ahc); 3848 ahc_outb(ahc, SCSISEQ, 0); 3849 ahc_outb(ahc, SXFRCTL0, 0); 3850 ahc_outb(ahc, DSPCISTATUS, 0); 3851 3852 for (i = TARG_SCSIRATE; i < SCSICONF; i++) 3853 ahc_outb(ahc, i, 0); 3854 } 3855 3856 /* 3857 * Reset the controller and record some information about it 3858 * that is only available just after a reset. 3859 */ 3860 int 3861 ahc_reset(struct ahc_softc *ahc) 3862 { 3863 u_int sblkctl; 3864 u_int sxfrctl1_a, sxfrctl1_b; 3865 int wait; 3866 3867 /* 3868 * Preserve the value of the SXFRCTL1 register for all channels. 3869 * It contains settings that affect termination and we don't want 3870 * to disturb the integrity of the bus. 3871 */ 3872 ahc_pause(ahc); 3873 sxfrctl1_b = 0; 3874 if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) { 3875 u_int sblkctl; 3876 3877 /* 3878 * Save channel B's settings in case this chip 3879 * is setup for TWIN channel operation. 3880 */ 3881 sblkctl = ahc_inb(ahc, SBLKCTL); 3882 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); 3883 sxfrctl1_b = ahc_inb(ahc, SXFRCTL1); 3884 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); 3885 } 3886 sxfrctl1_a = ahc_inb(ahc, SXFRCTL1); 3887 3888 ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause); 3889 3890 /* 3891 * Ensure that the reset has finished. We delay 1000us 3892 * prior to reading the register to make sure the chip 3893 * has sufficiently completed its reset to handle register 3894 * accesses. 3895 */ 3896 wait = 1000; 3897 do { 3898 ahc_delay(1000); 3899 } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK)); 3900 3901 if (wait == 0) { 3902 printf("%s: WARNING - Failed chip reset! " 3903 "Trying to initialize anyway.\n", ahc_name(ahc)); 3904 } 3905 ahc_outb(ahc, HCNTRL, ahc->pause); 3906 3907 /* Determine channel configuration */ 3908 sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE); 3909 /* No Twin Channel PCI cards */ 3910 if ((ahc->chip & AHC_PCI) != 0) 3911 sblkctl &= ~SELBUSB; 3912 switch (sblkctl) { 3913 case 0: 3914 /* Single Narrow Channel */ 3915 break; 3916 case 2: 3917 /* Wide Channel */ 3918 ahc->features |= AHC_WIDE; 3919 break; 3920 case 8: 3921 /* Twin Channel */ 3922 ahc->features |= AHC_TWIN; 3923 break; 3924 default: 3925 printf(" Unsupported adapter type. Ignoring\n"); 3926 return(-1); 3927 } 3928 3929 /* 3930 * Reload sxfrctl1. 3931 * 3932 * We must always initialize STPWEN to 1 before we 3933 * restore the saved values. STPWEN is initialized 3934 * to a tri-state condition which can only be cleared 3935 * by turning it on. 3936 */ 3937 if ((ahc->features & AHC_TWIN) != 0) { 3938 u_int sblkctl; 3939 3940 sblkctl = ahc_inb(ahc, SBLKCTL); 3941 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); 3942 ahc_outb(ahc, SXFRCTL1, sxfrctl1_b); 3943 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); 3944 } 3945 ahc_outb(ahc, SXFRCTL1, sxfrctl1_a); 3946 3947 #ifdef AHC_DUMP_SEQ 3948 if (ahc->init_level == 0) 3949 ahc_dumpseq(ahc); 3950 #endif 3951 3952 return (0); 3953 } 3954 3955 /* 3956 * Determine the number of SCBs available on the controller 3957 */ 3958 int 3959 ahc_probe_scbs(struct ahc_softc *ahc) { 3960 int i; 3961 3962 for (i = 0; i < AHC_SCB_MAX; i++) { 3963 3964 ahc_outb(ahc, SCBPTR, i); 3965 ahc_outb(ahc, SCB_BASE, i); 3966 if (ahc_inb(ahc, SCB_BASE) != i) 3967 break; 3968 ahc_outb(ahc, SCBPTR, 0); 3969 if (ahc_inb(ahc, SCB_BASE) != 0) 3970 break; 3971 } 3972 return (i); 3973 } 3974 3975 static void 3976 ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 3977 { 3978 bus_addr_t *baddr; 3979 3980 baddr = (bus_addr_t *)arg; 3981 *baddr = segs->ds_addr; 3982 } 3983 3984 static void 3985 ahc_build_free_scb_list(struct ahc_softc *ahc) 3986 { 3987 int scbsize; 3988 int i; 3989 3990 scbsize = 32; 3991 if ((ahc->flags & AHC_LSCBS_ENABLED) != 0) 3992 scbsize = 64; 3993 3994 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 3995 int j; 3996 3997 ahc_outb(ahc, SCBPTR, i); 3998 3999 /* 4000 * Touch all SCB bytes to avoid parity errors 4001 * should one of our debugging routines read 4002 * an otherwise uninitiatlized byte. 4003 */ 4004 for (j = 0; j < scbsize; j++) 4005 ahc_outb(ahc, SCB_BASE+j, 0xFF); 4006 4007 /* Clear the control byte. */ 4008 ahc_outb(ahc, SCB_CONTROL, 0); 4009 4010 /* Set the next pointer */ 4011 if ((ahc->flags & AHC_PAGESCBS) != 0) 4012 ahc_outb(ahc, SCB_NEXT, i+1); 4013 else 4014 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); 4015 4016 /* Make the tag number, SCSIID, and lun invalid */ 4017 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 4018 ahc_outb(ahc, SCB_SCSIID, 0xFF); 4019 ahc_outb(ahc, SCB_LUN, 0xFF); 4020 } 4021 4022 /* Make sure that the last SCB terminates the free list */ 4023 ahc_outb(ahc, SCBPTR, i-1); 4024 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); 4025 } 4026 4027 static int 4028 ahc_init_scbdata(struct ahc_softc *ahc) 4029 { 4030 struct scb_data *scb_data; 4031 4032 scb_data = ahc->scb_data; 4033 SLIST_INIT(&scb_data->free_scbs); 4034 SLIST_INIT(&scb_data->sg_maps); 4035 4036 /* Allocate SCB resources */ 4037 scb_data->scbarray = 4038 (struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, 4039 M_DEVBUF, M_NOWAIT); 4040 if (scb_data->scbarray == NULL) 4041 return (ENOMEM); 4042 memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC); 4043 4044 /* Determine the number of hardware SCBs and initialize them */ 4045 4046 scb_data->maxhscbs = ahc_probe_scbs(ahc); 4047 if ((ahc->flags & AHC_PAGESCBS) != 0) { 4048 /* SCB 0 heads the free list */ 4049 ahc_outb(ahc, FREE_SCBH, 0); 4050 } else { 4051 ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL); 4052 } 4053 4054 if (ahc->scb_data->maxhscbs == 0) { 4055 printf("%s: No SCB space found\n", ahc_name(ahc)); 4056 return (ENXIO); 4057 } 4058 4059 ahc_build_free_scb_list(ahc); 4060 4061 /* 4062 * Create our DMA tags. These tags define the kinds of device 4063 * accessible memory allocations and memory mappings we will 4064 * need to perform during normal operation. 4065 * 4066 * Unless we need to further restrict the allocation, we rely 4067 * on the restrictions of the parent dmat, hence the common 4068 * use of MAXADDR and MAXSIZE. 4069 */ 4070 4071 /* DMA tag for our hardware scb structures */ 4072 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 4073 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4074 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 4075 /*highaddr*/BUS_SPACE_MAXADDR, 4076 /*filter*/NULL, /*filterarg*/NULL, 4077 AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb), 4078 /*nsegments*/1, 4079 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 4080 /*flags*/0, &scb_data->hscb_dmat) != 0) { 4081 goto error_exit; 4082 } 4083 4084 scb_data->init_level++; 4085 4086 /* Allocation for our hscbs */ 4087 if (ahc_dmamem_alloc(ahc, scb_data->hscb_dmat, 4088 (void **)&scb_data->hscbs, 4089 BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) { 4090 goto error_exit; 4091 } 4092 4093 scb_data->init_level++; 4094 4095 /* And permanently map them */ 4096 ahc_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap, 4097 scb_data->hscbs, 4098 AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb), 4099 ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0); 4100 4101 scb_data->init_level++; 4102 4103 /* DMA tag for our sense buffers */ 4104 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 4105 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4106 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 4107 /*highaddr*/BUS_SPACE_MAXADDR, 4108 /*filter*/NULL, /*filterarg*/NULL, 4109 AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data), 4110 /*nsegments*/1, 4111 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 4112 /*flags*/0, &scb_data->sense_dmat) != 0) { 4113 goto error_exit; 4114 } 4115 4116 scb_data->init_level++; 4117 4118 /* Allocate them */ 4119 if (ahc_dmamem_alloc(ahc, scb_data->sense_dmat, 4120 (void **)&scb_data->sense, 4121 BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) { 4122 goto error_exit; 4123 } 4124 4125 scb_data->init_level++; 4126 4127 /* And permanently map them */ 4128 ahc_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap, 4129 scb_data->sense, 4130 AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data), 4131 ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0); 4132 4133 scb_data->init_level++; 4134 4135 /* DMA tag for our S/G structures. We allocate in page sized chunks */ 4136 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/8, 4137 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4138 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 4139 /*highaddr*/BUS_SPACE_MAXADDR, 4140 /*filter*/NULL, /*filterarg*/NULL, 4141 PAGE_SIZE, /*nsegments*/1, 4142 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 4143 /*flags*/0, &scb_data->sg_dmat) != 0) { 4144 goto error_exit; 4145 } 4146 4147 scb_data->init_level++; 4148 4149 /* Perform initial CCB allocation */ 4150 memset(scb_data->hscbs, 0, 4151 AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb)); 4152 ahc_alloc_scbs(ahc); 4153 4154 if (scb_data->numscbs == 0) { 4155 printf("%s: ahc_init_scbdata - " 4156 "Unable to allocate initial scbs\n", 4157 ahc_name(ahc)); 4158 goto error_exit; 4159 } 4160 4161 /* 4162 * Tell the sequencer which SCB will be the next one it receives. 4163 */ 4164 ahc->next_queued_scb = ahc_get_scb(ahc); 4165 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); 4166 4167 /* 4168 * Note that we were successfull 4169 */ 4170 return (0); 4171 4172 error_exit: 4173 4174 return (ENOMEM); 4175 } 4176 4177 static void 4178 ahc_fini_scbdata(struct ahc_softc *ahc) 4179 { 4180 struct scb_data *scb_data; 4181 4182 scb_data = ahc->scb_data; 4183 if (scb_data == NULL) 4184 return; 4185 4186 switch (scb_data->init_level) { 4187 default: 4188 case 7: 4189 { 4190 struct sg_map_node *sg_map; 4191 4192 while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) { 4193 SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); 4194 ahc_dmamap_unload(ahc, scb_data->sg_dmat, 4195 sg_map->sg_dmamap); 4196 ahc_dmamem_free(ahc, scb_data->sg_dmat, 4197 sg_map->sg_vaddr, 4198 sg_map->sg_dmamap); 4199 free(sg_map, M_DEVBUF); 4200 } 4201 ahc_dma_tag_destroy(ahc, scb_data->sg_dmat); 4202 } 4203 case 6: 4204 ahc_dmamap_unload(ahc, scb_data->sense_dmat, 4205 scb_data->sense_dmamap); 4206 case 5: 4207 ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense, 4208 scb_data->sense_dmamap); 4209 ahc_dmamap_destroy(ahc, scb_data->sense_dmat, 4210 scb_data->sense_dmamap); 4211 case 4: 4212 ahc_dma_tag_destroy(ahc, scb_data->sense_dmat); 4213 case 3: 4214 ahc_dmamap_unload(ahc, scb_data->hscb_dmat, 4215 scb_data->hscb_dmamap); 4216 case 2: 4217 ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs, 4218 scb_data->hscb_dmamap); 4219 ahc_dmamap_destroy(ahc, scb_data->hscb_dmat, 4220 scb_data->hscb_dmamap); 4221 case 1: 4222 ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat); 4223 break; 4224 case 0: 4225 break; 4226 } 4227 if (scb_data->scbarray != NULL) 4228 free(scb_data->scbarray, M_DEVBUF); 4229 } 4230 4231 void 4232 ahc_alloc_scbs(struct ahc_softc *ahc) 4233 { 4234 struct scb_data *scb_data; 4235 struct scb *next_scb; 4236 struct sg_map_node *sg_map; 4237 bus_addr_t physaddr; 4238 struct ahc_dma_seg *segs; 4239 int newcount; 4240 int i; 4241 4242 scb_data = ahc->scb_data; 4243 if (scb_data->numscbs >= AHC_SCB_MAX_ALLOC) 4244 /* Can't allocate any more */ 4245 return; 4246 4247 next_scb = &scb_data->scbarray[scb_data->numscbs]; 4248 4249 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); 4250 4251 if (sg_map == NULL) 4252 return; 4253 4254 /* Allocate S/G space for the next batch of SCBS */ 4255 if (ahc_dmamem_alloc(ahc, scb_data->sg_dmat, 4256 (void **)&sg_map->sg_vaddr, 4257 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { 4258 free(sg_map, M_DEVBUF); 4259 return; 4260 } 4261 4262 SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); 4263 4264 ahc_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap, 4265 sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb, 4266 &sg_map->sg_physaddr, /*flags*/0); 4267 4268 segs = sg_map->sg_vaddr; 4269 physaddr = sg_map->sg_physaddr; 4270 4271 newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg))); 4272 newcount = MIN(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs)); 4273 for (i = 0; i < newcount; i++) { 4274 struct scb_platform_data *pdata; 4275 #ifndef __linux__ 4276 int error; 4277 #endif 4278 pdata = (struct scb_platform_data *)malloc(sizeof(*pdata), 4279 M_DEVBUF, M_NOWAIT); 4280 if (pdata == NULL) 4281 break; 4282 next_scb->platform_data = pdata; 4283 next_scb->sg_map = sg_map; 4284 next_scb->sg_list = segs; 4285 /* 4286 * The sequencer always starts with the second entry. 4287 * The first entry is embedded in the scb. 4288 */ 4289 next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg); 4290 next_scb->ahc_softc = ahc; 4291 next_scb->flags = SCB_FREE; 4292 #ifndef __linux__ 4293 error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0, 4294 &next_scb->dmamap); 4295 if (error != 0) 4296 break; 4297 #endif 4298 next_scb->hscb = &scb_data->hscbs[scb_data->numscbs]; 4299 next_scb->hscb->tag = ahc->scb_data->numscbs; 4300 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, 4301 next_scb, links.sle); 4302 segs += AHC_NSEG; 4303 physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg)); 4304 next_scb++; 4305 ahc->scb_data->numscbs++; 4306 } 4307 } 4308 4309 void 4310 ahc_controller_info(struct ahc_softc *ahc, char *buf) 4311 { 4312 int len; 4313 4314 len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]); 4315 buf += len; 4316 if ((ahc->features & AHC_TWIN) != 0) 4317 len = sprintf(buf, "Twin Channel, A SCSI Id=%d, " 4318 "B SCSI Id=%d, primary %c, ", 4319 ahc->our_id, ahc->our_id_b, 4320 (ahc->flags & AHC_PRIMARY_CHANNEL) + 'A'); 4321 else { 4322 const char *speed; 4323 const char *type; 4324 4325 speed = ""; 4326 if ((ahc->features & AHC_ULTRA) != 0) { 4327 speed = "Ultra "; 4328 } else if ((ahc->features & AHC_DT) != 0) { 4329 speed = "Ultra160 "; 4330 } else if ((ahc->features & AHC_ULTRA2) != 0) { 4331 speed = "Ultra2 "; 4332 } 4333 if ((ahc->features & AHC_WIDE) != 0) { 4334 type = "Wide"; 4335 } else { 4336 type = "Single"; 4337 } 4338 len = sprintf(buf, "%s%s Channel %c, SCSI Id=%d, ", 4339 speed, type, ahc->channel, ahc->our_id); 4340 } 4341 buf += len; 4342 4343 if ((ahc->flags & AHC_PAGESCBS) != 0) 4344 sprintf(buf, "%d/%d SCBs", 4345 ahc->scb_data->maxhscbs, AHC_MAX_QUEUE); 4346 else 4347 sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs); 4348 } 4349 4350 /* 4351 * Start the board, ready for normal operation 4352 */ 4353 int 4354 ahc_init(struct ahc_softc *ahc) 4355 { 4356 int max_targ; 4357 int i; 4358 int term; 4359 u_int scsi_conf; 4360 u_int scsiseq_template; 4361 u_int ultraenb; 4362 u_int discenable; 4363 u_int tagenable; 4364 size_t driver_data_size; 4365 uint32_t physaddr; 4366 4367 #ifdef AHC_DEBUG_SEQUENCER 4368 ahc->flags |= AHC_SEQUENCER_DEBUG; 4369 #endif 4370 4371 #ifdef AHC_PRINT_SRAM 4372 printf("Scratch Ram:"); 4373 for (i = 0x20; i < 0x5f; i++) { 4374 if (((i % 8) == 0) && (i != 0)) { 4375 printf ("\n "); 4376 } 4377 printf (" 0x%x", ahc_inb(ahc, i)); 4378 } 4379 if ((ahc->features & AHC_MORE_SRAM) != 0) { 4380 for (i = 0x70; i < 0x7f; i++) { 4381 if (((i % 8) == 0) && (i != 0)) { 4382 printf ("\n "); 4383 } 4384 printf (" 0x%x", ahc_inb(ahc, i)); 4385 } 4386 } 4387 printf ("\n"); 4388 /* 4389 * Reading uninitialized scratch ram may 4390 * generate parity errors. 4391 */ 4392 ahc_outb(ahc, CLRINT, CLRPARERR); 4393 ahc_outb(ahc, CLRINT, CLRBRKADRINT); 4394 #endif 4395 max_targ = 15; 4396 4397 /* 4398 * Assume we have a board at this stage and it has been reset. 4399 */ 4400 if ((ahc->flags & AHC_USEDEFAULTS) != 0) 4401 ahc->our_id = ahc->our_id_b = 7; 4402 4403 /* 4404 * Default to allowing initiator operations. 4405 */ 4406 ahc->flags |= AHC_INITIATORROLE; 4407 4408 /* 4409 * Only allow target mode features if this unit has them enabled. 4410 */ 4411 if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0) 4412 ahc->features &= ~AHC_TARGETMODE; 4413 4414 #ifndef __linux__ 4415 /* DMA tag for mapping buffers into device visible space. */ 4416 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 4417 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4418 /*lowaddr*/BUS_SPACE_MAXADDR, 4419 /*highaddr*/BUS_SPACE_MAXADDR, 4420 /*filter*/NULL, /*filterarg*/NULL, 4421 /*maxsize*/MAXBSIZE, /*nsegments*/AHC_NSEG, 4422 /*maxsegsz*/AHC_MAXTRANSFER_SIZE, 4423 /*flags*/BUS_DMA_ALLOCNOW, 4424 &ahc->buffer_dmat) != 0) { 4425 return (ENOMEM); 4426 } 4427 #endif 4428 4429 ahc->init_level++; 4430 4431 /* 4432 * DMA tag for our command fifos and other data in system memory 4433 * the card's sequencer must be able to access. For initiator 4434 * roles, we need to allocate space for the qinfifo and qoutfifo. 4435 * The qinfifo and qoutfifo are composed of 256 1 byte elements. 4436 * When providing for the target mode role, we must additionally 4437 * provide space for the incoming target command fifo and an extra 4438 * byte to deal with a dma bug in some chip versions. 4439 */ 4440 driver_data_size = 2 * 256 * sizeof(uint8_t); 4441 if ((ahc->features & AHC_TARGETMODE) != 0) 4442 driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd) 4443 + /*DMA WideOdd Bug Buffer*/1; 4444 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 4445 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4446 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 4447 /*highaddr*/BUS_SPACE_MAXADDR, 4448 /*filter*/NULL, /*filterarg*/NULL, 4449 driver_data_size, 4450 /*nsegments*/1, 4451 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 4452 /*flags*/0, &ahc->shared_data_dmat) != 0) { 4453 return (ENOMEM); 4454 } 4455 4456 ahc->init_level++; 4457 4458 /* Allocation of driver data */ 4459 if (ahc_dmamem_alloc(ahc, ahc->shared_data_dmat, 4460 (void **)&ahc->qoutfifo, 4461 BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) { 4462 return (ENOMEM); 4463 } 4464 4465 ahc->init_level++; 4466 4467 /* And permanently map it in */ 4468 ahc_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, 4469 ahc->qoutfifo, driver_data_size, ahc_dmamap_cb, 4470 &ahc->shared_data_busaddr, /*flags*/0); 4471 4472 if ((ahc->features & AHC_TARGETMODE) != 0) { 4473 ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo; 4474 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS]; 4475 ahc->dma_bug_buf = ahc->shared_data_busaddr 4476 + driver_data_size - 1; 4477 /* All target command blocks start out invalid. */ 4478 for (i = 0; i < AHC_TMODE_CMDS; i++) 4479 ahc->targetcmds[i].cmd_valid = 0; 4480 ahc_sync_tqinfifo(ahc, BUS_DMASYNC_PREREAD); 4481 ahc->tqinfifonext = 1; 4482 ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1); 4483 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); 4484 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256]; 4485 } 4486 ahc->qinfifo = &ahc->qoutfifo[256]; 4487 4488 ahc->init_level++; 4489 4490 /* Allocate SCB data now that buffer_dmat is initialized */ 4491 if (ahc->scb_data->maxhscbs == 0) 4492 if (ahc_init_scbdata(ahc) != 0) 4493 return (ENOMEM); 4494 4495 /* 4496 * Allocate a tstate to house information for our 4497 * initiator presence on the bus as well as the user 4498 * data for any target mode initiator. 4499 */ 4500 if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) { 4501 printf("%s: unable to allocate ahc_tmode_tstate. " 4502 "Failing attach\n", ahc_name(ahc)); 4503 return (ENOMEM); 4504 } 4505 4506 if ((ahc->features & AHC_TWIN) != 0) { 4507 if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) { 4508 printf("%s: unable to allocate ahc_tmode_tstate. " 4509 "Failing attach\n", ahc_name(ahc)); 4510 return (ENOMEM); 4511 } 4512 } 4513 4514 ahc_outb(ahc, SEQ_FLAGS, 0); 4515 ahc_outb(ahc, SEQ_FLAGS2, 0); 4516 4517 if (ahc->scb_data->maxhscbs < AHC_SCB_MAX_ALLOC) { 4518 ahc->flags |= AHC_PAGESCBS; 4519 } else { 4520 ahc->flags &= ~AHC_PAGESCBS; 4521 } 4522 4523 #ifdef AHC_DEBUG 4524 if (ahc_debug & AHC_SHOW_MISC) { 4525 printf("%s: hardware scb %d bytes; kernel scb %d bytes; " 4526 "ahc_dma %d bytes\n", 4527 ahc_name(ahc), 4528 sizeof(struct hardware_scb), 4529 sizeof(struct scb), 4530 sizeof(struct ahc_dma_seg)); 4531 } 4532 #endif /* AHC_DEBUG */ 4533 4534 /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/ 4535 if (ahc->features & AHC_TWIN) { 4536 4537 /* 4538 * The device is gated to channel B after a chip reset, 4539 * so set those values first 4540 */ 4541 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); 4542 term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0; 4543 ahc_outb(ahc, SCSIID, ahc->our_id_b); 4544 scsi_conf = ahc_inb(ahc, SCSICONF + 1); 4545 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 4546 |term|ahc->seltime_b|ENSTIMER|ACTNEGEN); 4547 if ((ahc->features & AHC_ULTRA2) != 0) 4548 ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); 4549 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 4550 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 4551 4552 if ((scsi_conf & RESET_SCSI) != 0 4553 && (ahc->flags & AHC_INITIATORROLE) != 0) 4554 ahc->flags |= AHC_RESET_BUS_B; 4555 4556 /* Select Channel A */ 4557 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 4558 } 4559 term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0; 4560 if ((ahc->features & AHC_ULTRA2) != 0) 4561 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); 4562 else 4563 ahc_outb(ahc, SCSIID, ahc->our_id); 4564 scsi_conf = ahc_inb(ahc, SCSICONF); 4565 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 4566 |term|ahc->seltime 4567 |ENSTIMER|ACTNEGEN); 4568 if ((ahc->features & AHC_ULTRA2) != 0) 4569 ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); 4570 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 4571 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 4572 4573 if ((scsi_conf & RESET_SCSI) != 0 4574 && (ahc->flags & AHC_INITIATORROLE) != 0) 4575 ahc->flags |= AHC_RESET_BUS_A; 4576 4577 /* 4578 * Look at the information that board initialization or 4579 * the board bios has left us. 4580 */ 4581 ultraenb = 0; 4582 tagenable = ALL_TARGETS_MASK; 4583 4584 /* Grab the disconnection disable table and invert it for our needs */ 4585 if ((ahc->flags & AHC_USEDEFAULTS) != 0) { 4586 printf("%s: Host Adapter Bios disabled. Using default SCSI " 4587 "device parameters\n", ahc_name(ahc)); 4588 ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B| 4589 AHC_TERM_ENB_A|AHC_TERM_ENB_B; 4590 discenable = ALL_TARGETS_MASK; 4591 if ((ahc->features & AHC_ULTRA) != 0) 4592 ultraenb = ALL_TARGETS_MASK; 4593 } else { 4594 discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8) 4595 | ahc_inb(ahc, DISC_DSB)); 4596 if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0) 4597 ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8) 4598 | ahc_inb(ahc, ULTRA_ENB); 4599 } 4600 4601 if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) 4602 max_targ = 7; 4603 4604 for (i = 0; i <= max_targ; i++) { 4605 struct ahc_initiator_tinfo *tinfo; 4606 struct ahc_tmode_tstate *tstate; 4607 u_int our_id; 4608 u_int target_id; 4609 char channel; 4610 4611 channel = 'A'; 4612 our_id = ahc->our_id; 4613 target_id = i; 4614 if (i > 7 && (ahc->features & AHC_TWIN) != 0) { 4615 channel = 'B'; 4616 our_id = ahc->our_id_b; 4617 target_id = i % 8; 4618 } 4619 tinfo = ahc_fetch_transinfo(ahc, channel, our_id, 4620 target_id, &tstate); 4621 /* Default to async narrow across the board */ 4622 memset(tinfo, 0, sizeof(*tinfo)); 4623 if (ahc->flags & AHC_USEDEFAULTS) { 4624 if ((ahc->features & AHC_WIDE) != 0) 4625 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 4626 4627 /* 4628 * These will be truncated when we determine the 4629 * connection type we have with the target. 4630 */ 4631 tinfo->user.period = ahc_syncrates->period; 4632 tinfo->user.offset = ~0; 4633 } else { 4634 u_int scsirate; 4635 uint16_t mask; 4636 4637 /* Take the settings leftover in scratch RAM. */ 4638 scsirate = ahc_inb(ahc, TARG_SCSIRATE + i); 4639 mask = (0x01 << i); 4640 if ((ahc->features & AHC_ULTRA2) != 0) { 4641 u_int offset; 4642 u_int maxsync; 4643 4644 if ((scsirate & SOFS) == 0x0F) { 4645 /* 4646 * Haven't negotiated yet, 4647 * so the format is different. 4648 */ 4649 scsirate = (scsirate & SXFR) >> 4 4650 | (ultraenb & mask) 4651 ? 0x08 : 0x0 4652 | (scsirate & WIDEXFER); 4653 offset = MAX_OFFSET_ULTRA2; 4654 } else 4655 offset = ahc_inb(ahc, TARG_OFFSET + i); 4656 if ((scsirate & ~WIDEXFER) == 0 && offset != 0) 4657 /* Set to the lowest sync rate, 5MHz */ 4658 scsirate |= 0x1c; 4659 maxsync = AHC_SYNCRATE_ULTRA2; 4660 if ((ahc->features & AHC_DT) != 0) 4661 maxsync = AHC_SYNCRATE_DT; 4662 tinfo->user.period = 4663 ahc_find_period(ahc, scsirate, maxsync); 4664 if (offset == 0) 4665 tinfo->user.period = 0; 4666 else 4667 tinfo->user.offset = ~0; 4668 if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/ 4669 && (ahc->features & AHC_DT) != 0) 4670 tinfo->user.ppr_options = 4671 MSG_EXT_PPR_DT_REQ; 4672 } else if ((scsirate & SOFS) != 0) { 4673 if ((scsirate & SXFR) == 0x40 4674 && (ultraenb & mask) != 0) { 4675 /* Treat 10MHz as a non-ultra speed */ 4676 scsirate &= ~SXFR; 4677 ultraenb &= ~mask; 4678 } 4679 tinfo->user.period = 4680 ahc_find_period(ahc, scsirate, 4681 (ultraenb & mask) 4682 ? AHC_SYNCRATE_ULTRA 4683 : AHC_SYNCRATE_FAST); 4684 if (tinfo->user.period != 0) 4685 tinfo->user.offset = ~0; 4686 } 4687 if (tinfo->user.period == 0) 4688 tinfo->user.offset = 0; 4689 if ((scsirate & WIDEXFER) != 0 4690 && (ahc->features & AHC_WIDE) != 0) 4691 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 4692 tinfo->user.protocol_version = 4; 4693 if ((ahc->features & AHC_DT) != 0) 4694 tinfo->user.transport_version = 3; 4695 else 4696 tinfo->user.transport_version = 2; 4697 tinfo->goal.protocol_version = 2; 4698 tinfo->goal.transport_version = 2; 4699 tinfo->curr.protocol_version = 2; 4700 tinfo->curr.transport_version = 2; 4701 } 4702 tstate->ultraenb = ultraenb; 4703 } 4704 ahc->user_discenable = discenable; 4705 ahc->user_tagenable = tagenable; 4706 4707 /* There are no untagged SCBs active yet. */ 4708 for (i = 0; i < 16; i++) { 4709 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0)); 4710 if ((ahc->flags & AHC_SCB_BTT) != 0) { 4711 int lun; 4712 4713 /* 4714 * The SCB based BTT allows an entry per 4715 * target and lun pair. 4716 */ 4717 for (lun = 1; lun < AHC_NUM_LUNS; lun++) 4718 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun)); 4719 } 4720 } 4721 4722 /* All of our queues are empty */ 4723 for (i = 0; i < 256; i++) 4724 ahc->qoutfifo[i] = SCB_LIST_NULL; 4725 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_PREREAD); 4726 4727 for (i = 0; i < 256; i++) 4728 ahc->qinfifo[i] = SCB_LIST_NULL; 4729 4730 if ((ahc->features & AHC_MULTI_TID) != 0) { 4731 ahc_outb(ahc, TARGID, 0); 4732 ahc_outb(ahc, TARGID + 1, 0); 4733 } 4734 4735 /* 4736 * Tell the sequencer where it can find our arrays in memory. 4737 */ 4738 physaddr = ahc->scb_data->hscb_busaddr; 4739 ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF); 4740 ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF); 4741 ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF); 4742 ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF); 4743 4744 physaddr = ahc->shared_data_busaddr; 4745 ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF); 4746 ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF); 4747 ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF); 4748 ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF); 4749 4750 /* 4751 * Initialize the group code to command length table. 4752 * This overrides the values in TARG_SCSIRATE, so only 4753 * setup the table after we have processed that information. 4754 */ 4755 ahc_outb(ahc, CMDSIZE_TABLE, 5); 4756 ahc_outb(ahc, CMDSIZE_TABLE + 1, 9); 4757 ahc_outb(ahc, CMDSIZE_TABLE + 2, 9); 4758 ahc_outb(ahc, CMDSIZE_TABLE + 3, 0); 4759 ahc_outb(ahc, CMDSIZE_TABLE + 4, 15); 4760 ahc_outb(ahc, CMDSIZE_TABLE + 5, 11); 4761 ahc_outb(ahc, CMDSIZE_TABLE + 6, 0); 4762 ahc_outb(ahc, CMDSIZE_TABLE + 7, 0); 4763 4764 /* Tell the sequencer of our initial queue positions */ 4765 ahc_outb(ahc, KERNEL_QINPOS, 0); 4766 ahc_outb(ahc, QINPOS, 0); 4767 ahc_outb(ahc, QOUTPOS, 0); 4768 4769 /* 4770 * Use the built in queue management registers 4771 * if they are available. 4772 */ 4773 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 4774 ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256); 4775 ahc_outb(ahc, SDSCB_QOFF, 0); 4776 ahc_outb(ahc, SNSCB_QOFF, 0); 4777 ahc_outb(ahc, HNSCB_QOFF, 0); 4778 } 4779 4780 4781 /* We don't have any waiting selections */ 4782 ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL); 4783 4784 /* Our disconnection list is empty too */ 4785 ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL); 4786 4787 /* Message out buffer starts empty */ 4788 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 4789 4790 /* 4791 * Setup the allowed SCSI Sequences based on operational mode. 4792 * If we are a target, we'll enalbe select in operations once 4793 * we've had a lun enabled. 4794 */ 4795 scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP; 4796 if ((ahc->flags & AHC_INITIATORROLE) != 0) 4797 scsiseq_template |= ENRSELI; 4798 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template); 4799 4800 /* 4801 * Load the Sequencer program and Enable the adapter 4802 * in "fast" mode. 4803 */ 4804 if (bootverbose) 4805 printf("%s: Downloading Sequencer Program...", 4806 ahc_name(ahc)); 4807 4808 ahc_loadseq(ahc); 4809 4810 if ((ahc->features & AHC_ULTRA2) != 0) { 4811 int wait; 4812 4813 /* 4814 * Wait for up to 500ms for our transceivers 4815 * to settle. If the adapter does not have 4816 * a cable attached, the tranceivers may 4817 * never settle, so don't complain if we 4818 * fail here. 4819 */ 4820 ahc_pause(ahc); 4821 for (wait = 5000; 4822 (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; 4823 wait--) 4824 ahc_delay(100); 4825 ahc_unpause(ahc); 4826 } 4827 return (0); 4828 } 4829 4830 void 4831 ahc_intr_enable(struct ahc_softc *ahc, int enable) 4832 { 4833 u_int hcntrl; 4834 4835 hcntrl = ahc_inb(ahc, HCNTRL); 4836 hcntrl &= ~INTEN; 4837 ahc->pause &= ~INTEN; 4838 ahc->unpause &= ~INTEN; 4839 if (enable) { 4840 hcntrl |= INTEN; 4841 ahc->pause |= INTEN; 4842 ahc->unpause |= INTEN; 4843 } 4844 ahc_outb(ahc, HCNTRL, hcntrl); 4845 } 4846 4847 /* 4848 * Ensure that the card is paused in a location 4849 * outside of all critical sections and that all 4850 * pending work is completed prior to returning. 4851 * This routine should only be called from outside 4852 * an interrupt context. 4853 */ 4854 void 4855 ahc_pause_and_flushwork(struct ahc_softc *ahc) 4856 { 4857 int intstat; 4858 int maxloops; 4859 int paused; 4860 4861 maxloops = 1000; 4862 ahc->flags |= AHC_ALL_INTERRUPTS; 4863 intstat = 0; 4864 paused = FALSE; 4865 do { 4866 if (paused) 4867 ahc_unpause(ahc); 4868 ahc_intr(ahc); 4869 ahc_pause(ahc); 4870 paused = TRUE; 4871 ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO); 4872 ahc_clear_critical_section(ahc); 4873 if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) 4874 break; 4875 } while (--maxloops 4876 && (((intstat = ahc_inb(ahc, INTSTAT)) & INT_PEND) != 0 4877 || (ahc_inb(ahc, SSTAT0) & (SELDO|SELINGO)))); 4878 if (maxloops == 0) { 4879 printf("Infinite interrupt loop, INTSTAT = %x", 4880 ahc_inb(ahc, INTSTAT)); 4881 } 4882 ahc_platform_flushwork(ahc); 4883 ahc->flags &= ~AHC_ALL_INTERRUPTS; 4884 } 4885 4886 int 4887 ahc_suspend(struct ahc_softc *ahc) 4888 { 4889 uint8_t *ptr; 4890 int i; 4891 4892 ahc_pause_and_flushwork(ahc); 4893 4894 if (LIST_FIRST(&ahc->pending_scbs) != NULL) 4895 return (EBUSY); 4896 4897 #if AHC_TARGET_MODE 4898 /* 4899 * XXX What about ATIOs that have not yet been serviced? 4900 * Perhaps we should just refuse to be suspended if we 4901 * are acting in a target role. 4902 */ 4903 if (ahc->pending_device != NULL) 4904 return (EBUSY); 4905 #endif 4906 4907 /* Save volatile registers */ 4908 if ((ahc->features & AHC_TWIN) != 0) { 4909 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); 4910 ahc->suspend_state.channel[1].scsiseq = ahc_inb(ahc, SCSISEQ); 4911 ahc->suspend_state.channel[1].sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 4912 ahc->suspend_state.channel[1].sxfrctl1 = ahc_inb(ahc, SXFRCTL1); 4913 ahc->suspend_state.channel[1].simode0 = ahc_inb(ahc, SIMODE0); 4914 ahc->suspend_state.channel[1].simode1 = ahc_inb(ahc, SIMODE1); 4915 ahc->suspend_state.channel[1].seltimer = ahc_inb(ahc, SELTIMER); 4916 ahc->suspend_state.channel[1].seqctl = ahc_inb(ahc, SEQCTL); 4917 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 4918 } 4919 ahc->suspend_state.channel[0].scsiseq = ahc_inb(ahc, SCSISEQ); 4920 ahc->suspend_state.channel[0].sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 4921 ahc->suspend_state.channel[0].sxfrctl1 = ahc_inb(ahc, SXFRCTL1); 4922 ahc->suspend_state.channel[0].simode0 = ahc_inb(ahc, SIMODE0); 4923 ahc->suspend_state.channel[0].simode1 = ahc_inb(ahc, SIMODE1); 4924 ahc->suspend_state.channel[0].seltimer = ahc_inb(ahc, SELTIMER); 4925 ahc->suspend_state.channel[0].seqctl = ahc_inb(ahc, SEQCTL); 4926 4927 if ((ahc->chip & AHC_PCI) != 0) { 4928 ahc->suspend_state.dscommand0 = ahc_inb(ahc, DSCOMMAND0); 4929 ahc->suspend_state.dspcistatus = ahc_inb(ahc, DSPCISTATUS); 4930 } 4931 4932 if ((ahc->features & AHC_DT) != 0) { 4933 u_int sfunct; 4934 4935 sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE; 4936 ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE); 4937 ahc->suspend_state.optionmode = ahc_inb(ahc, OPTIONMODE); 4938 ahc_outb(ahc, SFUNCT, sfunct); 4939 ahc->suspend_state.crccontrol1 = ahc_inb(ahc, CRCCONTROL1); 4940 } 4941 4942 if ((ahc->features & AHC_MULTI_FUNC) != 0) 4943 ahc->suspend_state.scbbaddr = ahc_inb(ahc, SCBBADDR); 4944 4945 if ((ahc->features & AHC_ULTRA2) != 0) 4946 ahc->suspend_state.dff_thrsh = ahc_inb(ahc, DFF_THRSH); 4947 4948 ptr = ahc->suspend_state.scratch_ram; 4949 for (i = 0; i < 64; i++) 4950 *ptr++ = ahc_inb(ahc, SRAM_BASE + i); 4951 4952 if ((ahc->features & AHC_MORE_SRAM) != 0) { 4953 for (i = 0; i < 16; i++) 4954 *ptr++ = ahc_inb(ahc, TARG_OFFSET + i); 4955 } 4956 4957 ptr = ahc->suspend_state.btt; 4958 if ((ahc->flags & AHC_SCB_BTT) != 0) { 4959 for (i = 0;i < AHC_NUM_TARGETS; i++) { 4960 int j; 4961 4962 for (j = 0;j < AHC_NUM_LUNS; j++) { 4963 u_int tcl; 4964 4965 tcl = BUILD_TCL(i << 4, j); 4966 *ptr = ahc_index_busy_tcl(ahc, tcl); 4967 } 4968 } 4969 } 4970 ahc_shutdown(ahc); 4971 return (0); 4972 } 4973 4974 int 4975 ahc_resume(struct ahc_softc *ahc) 4976 { 4977 uint8_t *ptr; 4978 int i; 4979 4980 ahc_reset(ahc); 4981 4982 ahc_build_free_scb_list(ahc); 4983 4984 /* Restore volatile registers */ 4985 if ((ahc->features & AHC_TWIN) != 0) { 4986 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); 4987 ahc_outb(ahc, SCSIID, ahc->our_id); 4988 ahc_outb(ahc, SCSISEQ, ahc->suspend_state.channel[1].scsiseq); 4989 ahc_outb(ahc, SXFRCTL0, ahc->suspend_state.channel[1].sxfrctl0); 4990 ahc_outb(ahc, SXFRCTL1, ahc->suspend_state.channel[1].sxfrctl1); 4991 ahc_outb(ahc, SIMODE0, ahc->suspend_state.channel[1].simode0); 4992 ahc_outb(ahc, SIMODE1, ahc->suspend_state.channel[1].simode1); 4993 ahc_outb(ahc, SELTIMER, ahc->suspend_state.channel[1].seltimer); 4994 ahc_outb(ahc, SEQCTL, ahc->suspend_state.channel[1].seqctl); 4995 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 4996 } 4997 ahc_outb(ahc, SCSISEQ, ahc->suspend_state.channel[0].scsiseq); 4998 ahc_outb(ahc, SXFRCTL0, ahc->suspend_state.channel[0].sxfrctl0); 4999 ahc_outb(ahc, SXFRCTL1, ahc->suspend_state.channel[0].sxfrctl1); 5000 ahc_outb(ahc, SIMODE0, ahc->suspend_state.channel[0].simode0); 5001 ahc_outb(ahc, SIMODE1, ahc->suspend_state.channel[0].simode1); 5002 ahc_outb(ahc, SELTIMER, ahc->suspend_state.channel[0].seltimer); 5003 ahc_outb(ahc, SEQCTL, ahc->suspend_state.channel[0].seqctl); 5004 if ((ahc->features & AHC_ULTRA2) != 0) 5005 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); 5006 else 5007 ahc_outb(ahc, SCSIID, ahc->our_id); 5008 5009 if ((ahc->chip & AHC_PCI) != 0) { 5010 ahc_outb(ahc, DSCOMMAND0, ahc->suspend_state.dscommand0); 5011 ahc_outb(ahc, DSPCISTATUS, ahc->suspend_state.dspcistatus); 5012 } 5013 5014 if ((ahc->features & AHC_DT) != 0) { 5015 u_int sfunct; 5016 5017 sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE; 5018 ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE); 5019 ahc_outb(ahc, OPTIONMODE, ahc->suspend_state.optionmode); 5020 ahc_outb(ahc, SFUNCT, sfunct); 5021 ahc_outb(ahc, CRCCONTROL1, ahc->suspend_state.crccontrol1); 5022 } 5023 5024 if ((ahc->features & AHC_MULTI_FUNC) != 0) 5025 ahc_outb(ahc, SCBBADDR, ahc->suspend_state.scbbaddr); 5026 5027 if ((ahc->features & AHC_ULTRA2) != 0) 5028 ahc_outb(ahc, DFF_THRSH, ahc->suspend_state.dff_thrsh); 5029 5030 ptr = ahc->suspend_state.scratch_ram; 5031 for (i = 0; i < 64; i++) 5032 ahc_outb(ahc, SRAM_BASE + i, *ptr++); 5033 5034 if ((ahc->features & AHC_MORE_SRAM) != 0) { 5035 for (i = 0; i < 16; i++) 5036 ahc_outb(ahc, TARG_OFFSET + i, *ptr++); 5037 } 5038 5039 ptr = ahc->suspend_state.btt; 5040 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5041 for (i = 0;i < AHC_NUM_TARGETS; i++) { 5042 int j; 5043 5044 for (j = 0;j < AHC_NUM_LUNS; j++) { 5045 u_int tcl; 5046 5047 tcl = BUILD_TCL(i << 4, j); 5048 ahc_busy_tcl(ahc, tcl, *ptr); 5049 } 5050 } 5051 } 5052 return (0); 5053 } 5054 5055 /************************** Busy Target Table *********************************/ 5056 /* 5057 * Return the untagged transaction id for a given target/channel lun. 5058 * Optionally, clear the entry. 5059 */ 5060 u_int 5061 ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl) 5062 { 5063 u_int scbid; 5064 u_int target_offset; 5065 5066 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5067 u_int saved_scbptr; 5068 5069 saved_scbptr = ahc_inb(ahc, SCBPTR); 5070 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 5071 scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl)); 5072 ahc_outb(ahc, SCBPTR, saved_scbptr); 5073 } else { 5074 target_offset = TCL_TARGET_OFFSET(tcl); 5075 scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset); 5076 } 5077 5078 return (scbid); 5079 } 5080 5081 void 5082 ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl) 5083 { 5084 u_int target_offset; 5085 5086 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5087 u_int saved_scbptr; 5088 5089 saved_scbptr = ahc_inb(ahc, SCBPTR); 5090 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 5091 ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL); 5092 ahc_outb(ahc, SCBPTR, saved_scbptr); 5093 } else { 5094 target_offset = TCL_TARGET_OFFSET(tcl); 5095 ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL); 5096 } 5097 } 5098 5099 void 5100 ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid) 5101 { 5102 u_int target_offset; 5103 5104 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5105 u_int saved_scbptr; 5106 5107 saved_scbptr = ahc_inb(ahc, SCBPTR); 5108 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 5109 ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid); 5110 ahc_outb(ahc, SCBPTR, saved_scbptr); 5111 } else { 5112 target_offset = TCL_TARGET_OFFSET(tcl); 5113 ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid); 5114 } 5115 } 5116 5117 /************************** SCB and SCB queue management **********************/ 5118 int 5119 ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target, 5120 char channel, int lun, u_int tag, role_t role) 5121 { 5122 int targ = SCB_GET_TARGET(ahc, scb); 5123 char chan = SCB_GET_CHANNEL(ahc, scb); 5124 int slun = SCB_GET_LUN(scb); 5125 int match; 5126 5127 match = ((chan == channel) || (channel == ALL_CHANNELS)); 5128 if (match != 0) 5129 match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); 5130 if (match != 0) 5131 match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); 5132 if (match != 0) { 5133 #if AHC_TARGET_MODE 5134 int group; 5135 5136 group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); 5137 if (role == ROLE_INITIATOR) { 5138 match = (group != XPT_FC_GROUP_TMODE) 5139 && ((tag == scb->hscb->tag) 5140 || (tag == SCB_LIST_NULL)); 5141 } else if (role == ROLE_TARGET) { 5142 match = (group == XPT_FC_GROUP_TMODE) 5143 && ((tag == scb->io_ctx->csio.tag_id) 5144 || (tag == SCB_LIST_NULL)); 5145 } 5146 #else /* !AHC_TARGET_MODE */ 5147 match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL)); 5148 #endif /* AHC_TARGET_MODE */ 5149 } 5150 5151 return match; 5152 } 5153 5154 void 5155 ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb) 5156 { 5157 int target; 5158 char channel; 5159 int lun; 5160 5161 target = SCB_GET_TARGET(ahc, scb); 5162 lun = SCB_GET_LUN(scb); 5163 channel = SCB_GET_CHANNEL(ahc, scb); 5164 5165 ahc_search_qinfifo(ahc, target, channel, lun, 5166 /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, 5167 CAM_REQUEUE_REQ, SEARCH_COMPLETE); 5168 5169 ahc_platform_freeze_devq(ahc, scb); 5170 } 5171 5172 void 5173 ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb) 5174 { 5175 struct scb *prev_scb; 5176 5177 prev_scb = NULL; 5178 if (ahc_qinfifo_count(ahc) != 0) { 5179 u_int prev_tag; 5180 uint8_t prev_pos; 5181 5182 prev_pos = ahc->qinfifonext - 1; 5183 prev_tag = ahc->qinfifo[prev_pos]; 5184 prev_scb = ahc_lookup_scb(ahc, prev_tag); 5185 } 5186 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5187 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5188 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 5189 } else { 5190 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 5191 } 5192 } 5193 5194 static void 5195 ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb, 5196 struct scb *scb) 5197 { 5198 if (prev_scb == NULL) { 5199 ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); 5200 } else { 5201 prev_scb->hscb->next = scb->hscb->tag; 5202 ahc_sync_scb(ahc, prev_scb, 5203 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5204 } 5205 ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; 5206 scb->hscb->next = ahc->next_queued_scb->hscb->tag; 5207 ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5208 } 5209 5210 static int 5211 ahc_qinfifo_count(struct ahc_softc *ahc) 5212 { 5213 uint8_t qinpos; 5214 uint8_t diff; 5215 5216 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5217 qinpos = ahc_inb(ahc, SNSCB_QOFF); 5218 ahc_outb(ahc, SNSCB_QOFF, qinpos); 5219 } else 5220 qinpos = ahc_inb(ahc, QINPOS); 5221 diff = ahc->qinfifonext - qinpos; 5222 return (diff); 5223 } 5224 5225 int 5226 ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel, 5227 int lun, u_int tag, role_t role, uint32_t status, 5228 ahc_search_action action) 5229 { 5230 struct scb *scb; 5231 struct scb *prev_scb; 5232 uint8_t qinstart; 5233 uint8_t qinpos; 5234 uint8_t qintail; 5235 uint8_t next; 5236 uint8_t prev; 5237 uint8_t curscbptr; 5238 int found; 5239 int have_qregs; 5240 5241 qintail = ahc->qinfifonext; 5242 have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0; 5243 if (have_qregs) { 5244 qinstart = ahc_inb(ahc, SNSCB_QOFF); 5245 ahc_outb(ahc, SNSCB_QOFF, qinstart); 5246 } else 5247 qinstart = ahc_inb(ahc, QINPOS); 5248 qinpos = qinstart; 5249 found = 0; 5250 prev_scb = NULL; 5251 5252 if (action == SEARCH_COMPLETE) { 5253 /* 5254 * Don't attempt to run any queued untagged transactions 5255 * until we are done with the abort process. 5256 */ 5257 ahc_freeze_untagged_queues(ahc); 5258 } 5259 5260 /* 5261 * Start with an empty queue. Entries that are not chosen 5262 * for removal will be re-added to the queue as we go. 5263 */ 5264 ahc->qinfifonext = qinpos; 5265 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); 5266 5267 while (qinpos != qintail) { 5268 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]); 5269 if (scb == NULL) { 5270 printf("qinpos = %d, SCB index = %d\n", 5271 qinpos, ahc->qinfifo[qinpos]); 5272 panic("Loop 1\n"); 5273 } 5274 5275 if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) { 5276 /* 5277 * We found an scb that needs to be acted on. 5278 */ 5279 found++; 5280 switch (action) { 5281 case SEARCH_COMPLETE: 5282 { 5283 cam_status ostat; 5284 cam_status cstat; 5285 5286 ostat = ahc_get_transaction_status(scb); 5287 if (ostat == CAM_REQ_INPROG) 5288 ahc_set_transaction_status(scb, status); 5289 cstat = ahc_get_transaction_status(scb); 5290 if (cstat != CAM_REQ_CMP) 5291 ahc_freeze_scb(scb); 5292 if ((scb->flags & SCB_ACTIVE) == 0) 5293 printf("Inactive SCB in qinfifo\n"); 5294 ahc_done(ahc, scb); 5295 5296 /* FALLTHROUGH */ 5297 } 5298 case SEARCH_REMOVE: 5299 break; 5300 case SEARCH_COUNT: 5301 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5302 prev_scb = scb; 5303 break; 5304 } 5305 } else { 5306 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5307 prev_scb = scb; 5308 } 5309 qinpos++; 5310 } 5311 5312 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5313 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 5314 } else { 5315 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 5316 } 5317 5318 if (action != SEARCH_COUNT 5319 && (found != 0) 5320 && (qinstart != ahc->qinfifonext)) { 5321 /* 5322 * The sequencer may be in the process of dmaing 5323 * down the SCB at the beginning of the queue. 5324 * This could be problematic if either the first, 5325 * or the second SCB is removed from the queue 5326 * (the first SCB includes a pointer to the "next" 5327 * SCB to dma). If we have removed any entries, swap 5328 * the first element in the queue with the next HSCB 5329 * so the sequencer will notice that NEXT_QUEUED_SCB 5330 * has changed during its dma attempt and will retry 5331 * the DMA. 5332 */ 5333 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]); 5334 5335 if (scb == NULL) { 5336 printf("found = %d, qinstart = %d, qinfifionext = %d\n", 5337 found, qinstart, ahc->qinfifonext); 5338 panic("First/Second Qinfifo fixup\n"); 5339 } 5340 /* 5341 * ahc_swap_with_next_hscb forces our next pointer to 5342 * point to the reserved SCB for future commands. Save 5343 * and restore our original next pointer to maintain 5344 * queue integrity. 5345 */ 5346 next = scb->hscb->next; 5347 ahc->scb_data->scbindex[scb->hscb->tag] = NULL; 5348 ahc_swap_with_next_hscb(ahc, scb); 5349 scb->hscb->next = next; 5350 ahc->qinfifo[qinstart] = scb->hscb->tag; 5351 5352 /* Tell the card about the new head of the qinfifo. */ 5353 ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); 5354 5355 /* Fixup the tail "next" pointer. */ 5356 qintail = ahc->qinfifonext - 1; 5357 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]); 5358 scb->hscb->next = ahc->next_queued_scb->hscb->tag; 5359 } 5360 5361 /* 5362 * Search waiting for selection list. 5363 */ 5364 curscbptr = ahc_inb(ahc, SCBPTR); 5365 next = ahc_inb(ahc, WAITING_SCBH); /* Start at head of list. */ 5366 prev = SCB_LIST_NULL; 5367 5368 while (next != SCB_LIST_NULL) { 5369 uint8_t scb_index; 5370 5371 ahc_outb(ahc, SCBPTR, next); 5372 scb_index = ahc_inb(ahc, SCB_TAG); 5373 if (scb_index >= ahc->scb_data->numscbs) { 5374 printf("Waiting List inconsistency. " 5375 "SCB index == %d, yet numscbs == %d.", 5376 scb_index, ahc->scb_data->numscbs); 5377 ahc_dump_card_state(ahc); 5378 panic("for safety"); 5379 } 5380 scb = ahc_lookup_scb(ahc, scb_index); 5381 if (scb == NULL) { 5382 printf("scb_index = %d, next = %d\n", 5383 scb_index, next); 5384 panic("Waiting List traversal\n"); 5385 } 5386 if (ahc_match_scb(ahc, scb, target, channel, 5387 lun, SCB_LIST_NULL, role)) { 5388 /* 5389 * We found an scb that needs to be acted on. 5390 */ 5391 found++; 5392 switch (action) { 5393 case SEARCH_COMPLETE: 5394 { 5395 cam_status ostat; 5396 cam_status cstat; 5397 5398 ostat = ahc_get_transaction_status(scb); 5399 if (ostat == CAM_REQ_INPROG) 5400 ahc_set_transaction_status(scb, 5401 status); 5402 cstat = ahc_get_transaction_status(scb); 5403 if (cstat != CAM_REQ_CMP) 5404 ahc_freeze_scb(scb); 5405 if ((scb->flags & SCB_ACTIVE) == 0) 5406 printf("Inactive SCB in Waiting List\n"); 5407 ahc_done(ahc, scb); 5408 /* FALLTHROUGH */ 5409 } 5410 case SEARCH_REMOVE: 5411 next = ahc_rem_wscb(ahc, next, prev); 5412 break; 5413 case SEARCH_COUNT: 5414 prev = next; 5415 next = ahc_inb(ahc, SCB_NEXT); 5416 break; 5417 } 5418 } else { 5419 5420 prev = next; 5421 next = ahc_inb(ahc, SCB_NEXT); 5422 } 5423 } 5424 ahc_outb(ahc, SCBPTR, curscbptr); 5425 5426 found += ahc_search_untagged_queues(ahc, /*ahc_io_ctx_t*/NULL, target, 5427 channel, lun, status, action); 5428 5429 if (action == SEARCH_COMPLETE) 5430 ahc_release_untagged_queues(ahc); 5431 return (found); 5432 } 5433 5434 int 5435 ahc_search_untagged_queues(struct ahc_softc *ahc, ahc_io_ctx_t ctx, 5436 int target, char channel, int lun, uint32_t status, 5437 ahc_search_action action) 5438 { 5439 struct scb *scb; 5440 int maxtarget; 5441 int found; 5442 int i; 5443 5444 if (action == SEARCH_COMPLETE) { 5445 /* 5446 * Don't attempt to run any queued untagged transactions 5447 * until we are done with the abort process. 5448 */ 5449 ahc_freeze_untagged_queues(ahc); 5450 } 5451 5452 found = 0; 5453 i = 0; 5454 if ((ahc->flags & AHC_SCB_BTT) == 0) { 5455 5456 maxtarget = 16; 5457 if (target != CAM_TARGET_WILDCARD) { 5458 5459 i = target; 5460 if (channel == 'B') 5461 i += 8; 5462 maxtarget = i + 1; 5463 } 5464 } else { 5465 maxtarget = 0; 5466 } 5467 5468 for (; i < maxtarget; i++) { 5469 struct scb_tailq *untagged_q; 5470 struct scb *next_scb; 5471 5472 untagged_q = &(ahc->untagged_queues[i]); 5473 next_scb = TAILQ_FIRST(untagged_q); 5474 while (next_scb != NULL) { 5475 5476 scb = next_scb; 5477 next_scb = TAILQ_NEXT(scb, links.tqe); 5478 5479 /* 5480 * The head of the list may be the currently 5481 * active untagged command for a device. 5482 * We're only searching for commands that 5483 * have not been started. A transaction 5484 * marked active but still in the qinfifo 5485 * is removed by the qinfifo scanning code 5486 * above. 5487 */ 5488 if ((scb->flags & SCB_ACTIVE) != 0) 5489 continue; 5490 5491 if (ahc_match_scb(ahc, scb, target, channel, lun, 5492 SCB_LIST_NULL, ROLE_INITIATOR) == 0 5493 || (ctx != NULL && ctx != scb->io_ctx)) 5494 continue; 5495 5496 /* 5497 * We found an scb that needs to be acted on. 5498 */ 5499 found++; 5500 switch (action) { 5501 case SEARCH_COMPLETE: 5502 { 5503 cam_status ostat; 5504 cam_status cstat; 5505 5506 ostat = ahc_get_transaction_status(scb); 5507 if (ostat == CAM_REQ_INPROG) 5508 ahc_set_transaction_status(scb, status); 5509 cstat = ahc_get_transaction_status(scb); 5510 if (cstat != CAM_REQ_CMP) 5511 ahc_freeze_scb(scb); 5512 if ((scb->flags & SCB_ACTIVE) == 0) 5513 printf("Inactive SCB in untaggedQ\n"); 5514 ahc_done(ahc, scb); 5515 break; 5516 } 5517 case SEARCH_REMOVE: 5518 scb->flags &= ~SCB_UNTAGGEDQ; 5519 TAILQ_REMOVE(untagged_q, scb, links.tqe); 5520 break; 5521 case SEARCH_COUNT: 5522 break; 5523 } 5524 } 5525 } 5526 5527 if (action == SEARCH_COMPLETE) 5528 ahc_release_untagged_queues(ahc); 5529 return (found); 5530 } 5531 5532 int 5533 ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel, 5534 int lun, u_int tag, int stop_on_first, int remove, 5535 int save_state) 5536 { 5537 struct scb *scbp; 5538 u_int next; 5539 u_int prev; 5540 u_int count; 5541 u_int active_scb; 5542 5543 count = 0; 5544 next = ahc_inb(ahc, DISCONNECTED_SCBH); 5545 prev = SCB_LIST_NULL; 5546 5547 if (save_state) { 5548 /* restore this when we're done */ 5549 active_scb = ahc_inb(ahc, SCBPTR); 5550 } else 5551 /* Silence compiler */ 5552 active_scb = SCB_LIST_NULL; 5553 5554 while (next != SCB_LIST_NULL) { 5555 u_int scb_index; 5556 5557 ahc_outb(ahc, SCBPTR, next); 5558 scb_index = ahc_inb(ahc, SCB_TAG); 5559 if (scb_index >= ahc->scb_data->numscbs) { 5560 printf("Disconnected List inconsistency. " 5561 "SCB index == %d, yet numscbs == %d.", 5562 scb_index, ahc->scb_data->numscbs); 5563 ahc_dump_card_state(ahc); 5564 panic("for safety"); 5565 } 5566 5567 if (next == prev) { 5568 panic("Disconnected List Loop. " 5569 "cur SCBPTR == %x, prev SCBPTR == %x.", 5570 next, prev); 5571 } 5572 scbp = ahc_lookup_scb(ahc, scb_index); 5573 if (ahc_match_scb(ahc, scbp, target, channel, lun, 5574 tag, ROLE_INITIATOR)) { 5575 count++; 5576 if (remove) { 5577 next = 5578 ahc_rem_scb_from_disc_list(ahc, prev, next); 5579 } else { 5580 prev = next; 5581 next = ahc_inb(ahc, SCB_NEXT); 5582 } 5583 if (stop_on_first) 5584 break; 5585 } else { 5586 prev = next; 5587 next = ahc_inb(ahc, SCB_NEXT); 5588 } 5589 } 5590 if (save_state) 5591 ahc_outb(ahc, SCBPTR, active_scb); 5592 return (count); 5593 } 5594 5595 /* 5596 * Remove an SCB from the on chip list of disconnected transactions. 5597 * This is empty/unused if we are not performing SCB paging. 5598 */ 5599 static u_int 5600 ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr) 5601 { 5602 u_int next; 5603 5604 ahc_outb(ahc, SCBPTR, scbptr); 5605 next = ahc_inb(ahc, SCB_NEXT); 5606 5607 ahc_outb(ahc, SCB_CONTROL, 0); 5608 5609 ahc_add_curscb_to_free_list(ahc); 5610 5611 if (prev != SCB_LIST_NULL) { 5612 ahc_outb(ahc, SCBPTR, prev); 5613 ahc_outb(ahc, SCB_NEXT, next); 5614 } else 5615 ahc_outb(ahc, DISCONNECTED_SCBH, next); 5616 5617 return (next); 5618 } 5619 5620 /* 5621 * Add the SCB as selected by SCBPTR onto the on chip list of 5622 * free hardware SCBs. This list is empty/unused if we are not 5623 * performing SCB paging. 5624 */ 5625 static void 5626 ahc_add_curscb_to_free_list(struct ahc_softc *ahc) 5627 { 5628 /* 5629 * Invalidate the tag so that our abort 5630 * routines don't think it's active. 5631 */ 5632 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 5633 5634 if ((ahc->flags & AHC_PAGESCBS) != 0) { 5635 ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH)); 5636 ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR)); 5637 } 5638 } 5639 5640 /* 5641 * Manipulate the waiting for selection list and return the 5642 * scb that follows the one that we remove. 5643 */ 5644 static u_int 5645 ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev) 5646 { 5647 u_int curscb, next; 5648 5649 /* 5650 * Select the SCB we want to abort and 5651 * pull the next pointer out of it. 5652 */ 5653 curscb = ahc_inb(ahc, SCBPTR); 5654 ahc_outb(ahc, SCBPTR, scbpos); 5655 next = ahc_inb(ahc, SCB_NEXT); 5656 5657 /* Clear the necessary fields */ 5658 ahc_outb(ahc, SCB_CONTROL, 0); 5659 5660 ahc_add_curscb_to_free_list(ahc); 5661 5662 /* update the waiting list */ 5663 if (prev == SCB_LIST_NULL) { 5664 /* First in the list */ 5665 ahc_outb(ahc, WAITING_SCBH, next); 5666 5667 /* 5668 * Ensure we aren't attempting to perform 5669 * selection for this entry. 5670 */ 5671 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 5672 } else { 5673 /* 5674 * Select the scb that pointed to us 5675 * and update its next pointer. 5676 */ 5677 ahc_outb(ahc, SCBPTR, prev); 5678 ahc_outb(ahc, SCB_NEXT, next); 5679 } 5680 5681 /* 5682 * Point us back at the original scb position. 5683 */ 5684 ahc_outb(ahc, SCBPTR, curscb); 5685 return next; 5686 } 5687 5688 /******************************** Error Handling ******************************/ 5689 /* 5690 * Abort all SCBs that match the given description (target/channel/lun/tag), 5691 * setting their status to the passed in status if the status has not already 5692 * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer 5693 * is paused before it is called. 5694 */ 5695 int 5696 ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, 5697 int lun, u_int tag, role_t role, uint32_t status) 5698 { 5699 struct scb *scbp; 5700 struct scb *scbp_next; 5701 u_int active_scb; 5702 int i, j; 5703 int maxtarget; 5704 int minlun; 5705 int maxlun; 5706 5707 int found; 5708 5709 /* 5710 * Don't attempt to run any queued untagged transactions 5711 * until we are done with the abort process. 5712 */ 5713 ahc_freeze_untagged_queues(ahc); 5714 5715 /* restore this when we're done */ 5716 active_scb = ahc_inb(ahc, SCBPTR); 5717 5718 found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL, 5719 role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); 5720 5721 /* 5722 * Clean out the busy target table for any untagged commands. 5723 */ 5724 i = 0; 5725 maxtarget = 16; 5726 if (target != CAM_TARGET_WILDCARD) { 5727 i = target; 5728 if (channel == 'B') 5729 i += 8; 5730 maxtarget = i + 1; 5731 } 5732 5733 if (lun == CAM_LUN_WILDCARD) { 5734 5735 /* 5736 * Unless we are using an SCB based 5737 * busy targets table, there is only 5738 * one table entry for all luns of 5739 * a target. 5740 */ 5741 minlun = 0; 5742 maxlun = 1; 5743 if ((ahc->flags & AHC_SCB_BTT) != 0) 5744 maxlun = AHC_NUM_LUNS; 5745 } else { 5746 minlun = lun; 5747 maxlun = lun + 1; 5748 } 5749 5750 if (role != ROLE_TARGET) { 5751 for (;i < maxtarget; i++) { 5752 for (j = minlun;j < maxlun; j++) { 5753 u_int scbid; 5754 u_int tcl; 5755 5756 tcl = BUILD_TCL(i << 4, j); 5757 scbid = ahc_index_busy_tcl(ahc, tcl); 5758 scbp = ahc_lookup_scb(ahc, scbid); 5759 if (scbp == NULL 5760 || ahc_match_scb(ahc, scbp, target, channel, 5761 lun, tag, role) == 0) 5762 continue; 5763 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j)); 5764 } 5765 } 5766 5767 /* 5768 * Go through the disconnected list and remove any entries we 5769 * have queued for completion, 0'ing their control byte too. 5770 * We save the active SCB and restore it ourselves, so there 5771 * is no reason for this search to restore it too. 5772 */ 5773 ahc_search_disc_list(ahc, target, channel, lun, tag, 5774 /*stop_on_first*/FALSE, /*remove*/TRUE, 5775 /*save_state*/FALSE); 5776 } 5777 5778 /* 5779 * Go through the hardware SCB array looking for commands that 5780 * were active but not on any list. In some cases, these remnants 5781 * might not still have mappings in the scbindex array (e.g. unexpected 5782 * bus free with the same scb queued for an abort). Don't hold this 5783 * against them. 5784 */ 5785 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 5786 u_int scbid; 5787 5788 ahc_outb(ahc, SCBPTR, i); 5789 scbid = ahc_inb(ahc, SCB_TAG); 5790 scbp = ahc_lookup_scb(ahc, scbid); 5791 if ((scbp == NULL && scbid != SCB_LIST_NULL) 5792 || (scbp != NULL 5793 && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role))) 5794 ahc_add_curscb_to_free_list(ahc); 5795 } 5796 5797 /* 5798 * Go through the pending CCB list and look for 5799 * commands for this target that are still active. 5800 * These are other tagged commands that were 5801 * disconnected when the reset occurred. 5802 */ 5803 scbp_next = LIST_FIRST(&ahc->pending_scbs); 5804 while (scbp_next != NULL) { 5805 scbp = scbp_next; 5806 scbp_next = LIST_NEXT(scbp, pending_links); 5807 if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) { 5808 cam_status ostat; 5809 5810 ostat = ahc_get_transaction_status(scbp); 5811 if (ostat == CAM_REQ_INPROG) 5812 ahc_set_transaction_status(scbp, status); 5813 if (ahc_get_transaction_status(scbp) != CAM_REQ_CMP) 5814 ahc_freeze_scb(scbp); 5815 if ((scbp->flags & SCB_ACTIVE) == 0) 5816 printf("Inactive SCB on pending list\n"); 5817 ahc_done(ahc, scbp); 5818 found++; 5819 } 5820 } 5821 ahc_outb(ahc, SCBPTR, active_scb); 5822 ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status); 5823 ahc_release_untagged_queues(ahc); 5824 return found; 5825 } 5826 5827 static void 5828 ahc_reset_current_bus(struct ahc_softc *ahc) 5829 { 5830 uint8_t scsiseq; 5831 5832 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST); 5833 scsiseq = ahc_inb(ahc, SCSISEQ); 5834 ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO); 5835 ahc_flush_device_writes(ahc); 5836 ahc_delay(AHC_BUSRESET_DELAY); 5837 /* Turn off the bus reset */ 5838 ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO); 5839 5840 ahc_clear_intstat(ahc); 5841 5842 /* Re-enable reset interrupts */ 5843 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST); 5844 } 5845 5846 int 5847 ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset) 5848 { 5849 struct ahc_devinfo devinfo; 5850 u_int initiator, target, max_scsiid; 5851 u_int sblkctl; 5852 u_int scsiseq; 5853 u_int simode1; 5854 int found; 5855 int restart_needed; 5856 char cur_channel; 5857 5858 ahc->pending_device = NULL; 5859 5860 ahc_compile_devinfo(&devinfo, 5861 CAM_TARGET_WILDCARD, 5862 CAM_TARGET_WILDCARD, 5863 CAM_LUN_WILDCARD, 5864 channel, ROLE_UNKNOWN); 5865 ahc_pause(ahc); 5866 5867 /* Make sure the sequencer is in a safe location. */ 5868 ahc_clear_critical_section(ahc); 5869 5870 /* 5871 * Run our command complete fifos to ensure that we perform 5872 * completion processing on any commands that 'completed' 5873 * before the reset occurred. 5874 */ 5875 ahc_run_qoutfifo(ahc); 5876 #if AHC_TARGET_MODE 5877 /* 5878 * XXX - In Twin mode, the tqinfifo may have commands 5879 * for an unaffected channel in it. However, if 5880 * we have run out of ATIO resources to drain that 5881 * queue, we may not get them all out here. Further, 5882 * the blocked transactions for the reset channel 5883 * should just be killed off, irrespecitve of whether 5884 * we are blocked on ATIO resources. Write a routine 5885 * to compact the tqinfifo appropriately. 5886 */ 5887 if ((ahc->flags & AHC_TARGETROLE) != 0) { 5888 ahc_run_tqinfifo(ahc, /*paused*/TRUE); 5889 } 5890 #endif 5891 5892 /* 5893 * Reset the bus if we are initiating this reset 5894 */ 5895 sblkctl = ahc_inb(ahc, SBLKCTL); 5896 cur_channel = 'A'; 5897 if ((ahc->features & AHC_TWIN) != 0 5898 && ((sblkctl & SELBUSB) != 0)) 5899 cur_channel = 'B'; 5900 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 5901 if (cur_channel != channel) { 5902 /* Case 1: Command for another bus is active 5903 * Stealthily reset the other bus without 5904 * upsetting the current bus. 5905 */ 5906 ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB); 5907 simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); 5908 #if AHC_TARGET_MODE 5909 /* 5910 * Bus resets clear ENSELI, so we cannot 5911 * defer re-enabling bus reset interrupts 5912 * if we are in target mode. 5913 */ 5914 if ((ahc->flags & AHC_TARGETROLE) != 0) 5915 simode1 |= ENSCSIRST; 5916 #endif 5917 ahc_outb(ahc, SIMODE1, simode1); 5918 if (initiate_reset) 5919 ahc_reset_current_bus(ahc); 5920 ahc_clear_intstat(ahc); 5921 ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); 5922 ahc_outb(ahc, SBLKCTL, sblkctl); 5923 restart_needed = FALSE; 5924 } else { 5925 /* Case 2: A command from this bus is active or we're idle */ 5926 simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); 5927 #if AHC_TARGET_MODE 5928 /* 5929 * Bus resets clear ENSELI, so we cannot 5930 * defer re-enabling bus reset interrupts 5931 * if we are in target mode. 5932 */ 5933 if ((ahc->flags & AHC_TARGETROLE) != 0) 5934 simode1 |= ENSCSIRST; 5935 #endif 5936 ahc_outb(ahc, SIMODE1, simode1); 5937 if (initiate_reset) 5938 ahc_reset_current_bus(ahc); 5939 ahc_clear_intstat(ahc); 5940 ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); 5941 restart_needed = TRUE; 5942 } 5943 5944 /* 5945 * Clean up all the state information for the 5946 * pending transactions on this bus. 5947 */ 5948 found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel, 5949 CAM_LUN_WILDCARD, SCB_LIST_NULL, 5950 ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); 5951 5952 max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7; 5953 5954 #ifdef AHC_TARGET_MODE 5955 /* 5956 * Send an immediate notify ccb to all target more peripheral 5957 * drivers affected by this action. 5958 */ 5959 for (target = 0; target <= max_scsiid; target++) { 5960 struct ahc_tmode_tstate* tstate; 5961 u_int lun; 5962 5963 tstate = ahc->enabled_targets[target]; 5964 if (tstate == NULL) 5965 continue; 5966 for (lun = 0; lun < AHC_NUM_LUNS; lun++) { 5967 struct ahc_tmode_lstate* lstate; 5968 5969 lstate = tstate->enabled_luns[lun]; 5970 if (lstate == NULL) 5971 continue; 5972 5973 ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD, 5974 EVENT_TYPE_BUS_RESET, /*arg*/0); 5975 ahc_send_lstate_events(ahc, lstate); 5976 } 5977 } 5978 #endif 5979 /* Notify the XPT that a bus reset occurred */ 5980 ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD, 5981 CAM_LUN_WILDCARD, AC_BUS_RESET, NULL); 5982 5983 /* 5984 * Revert to async/narrow transfers until we renegotiate. 5985 */ 5986 for (target = 0; target <= max_scsiid; target++) { 5987 5988 if (ahc->enabled_targets[target] == NULL) 5989 continue; 5990 for (initiator = 0; initiator <= max_scsiid; initiator++) { 5991 struct ahc_devinfo devinfo; 5992 5993 ahc_compile_devinfo(&devinfo, target, initiator, 5994 CAM_LUN_WILDCARD, 5995 channel, ROLE_UNKNOWN); 5996 ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 5997 AHC_TRANS_CUR, /*paused*/TRUE); 5998 ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, 5999 /*period*/0, /*offset*/0, 6000 /*ppr_options*/0, AHC_TRANS_CUR, 6001 /*paused*/TRUE); 6002 } 6003 } 6004 6005 if (restart_needed) 6006 ahc_restart(ahc); 6007 else 6008 ahc_unpause(ahc); 6009 return found; 6010 } 6011 6012 6013 /***************************** Residual Processing ****************************/ 6014 /* 6015 * Calculate the residual for a just completed SCB. 6016 */ 6017 void 6018 ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb) 6019 { 6020 struct hardware_scb *hscb; 6021 struct status_pkt *spkt; 6022 uint32_t sgptr; 6023 uint32_t resid_sgptr; 6024 uint32_t resid; 6025 6026 /* 6027 * 5 cases. 6028 * 1) No residual. 6029 * SG_RESID_VALID clear in sgptr. 6030 * 2) Transferless command 6031 * 3) Never performed any transfers. 6032 * sgptr has SG_FULL_RESID set. 6033 * 4) No residual but target did not 6034 * save data pointers after the 6035 * last transfer, so sgptr was 6036 * never updated. 6037 * 5) We have a partial residual. 6038 * Use residual_sgptr to determine 6039 * where we are. 6040 */ 6041 6042 hscb = scb->hscb; 6043 sgptr = ahc_le32toh(hscb->sgptr); 6044 if ((sgptr & SG_RESID_VALID) == 0) 6045 /* Case 1 */ 6046 return; 6047 sgptr &= ~SG_RESID_VALID; 6048 6049 if ((sgptr & SG_LIST_NULL) != 0) 6050 /* Case 2 */ 6051 return; 6052 6053 spkt = &hscb->shared_data.status; 6054 resid_sgptr = ahc_le32toh(spkt->residual_sg_ptr); 6055 if ((sgptr & SG_FULL_RESID) != 0) { 6056 /* Case 3 */ 6057 resid = ahc_get_transfer_length(scb); 6058 } else if ((resid_sgptr & SG_LIST_NULL) != 0) { 6059 /* Case 4 */ 6060 return; 6061 } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { 6062 panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); 6063 } else { 6064 struct ahc_dma_seg *sg; 6065 6066 /* 6067 * Remainder of the SG where the transfer 6068 * stopped. 6069 */ 6070 resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK; 6071 sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK); 6072 6073 /* The residual sg_ptr always points to the next sg */ 6074 sg--; 6075 6076 /* 6077 * Add up the contents of all residual 6078 * SG segments that are after the SG where 6079 * the transfer stopped. 6080 */ 6081 while ((ahc_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) { 6082 sg++; 6083 resid += ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; 6084 } 6085 } 6086 if ((scb->flags & SCB_SENSE) == 0) 6087 ahc_set_residual(scb, resid); 6088 else 6089 ahc_set_sense_residual(scb, resid); 6090 6091 #ifdef AHC_DEBUG 6092 if ((ahc_debug & AHC_SHOW_MISC) != 0) { 6093 ahc_print_path(ahc, scb); 6094 printf("Handled Residual of %d bytes\n", resid); 6095 } 6096 #endif 6097 } 6098 6099 /******************************* Target Mode **********************************/ 6100 #ifdef AHC_TARGET_MODE 6101 /* 6102 * Add a target mode event to this lun's queue 6103 */ 6104 static void 6105 ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate, 6106 u_int initiator_id, u_int event_type, u_int event_arg) 6107 { 6108 struct ahc_tmode_event *event; 6109 int pending; 6110 6111 xpt_freeze_devq(lstate->path, /*count*/1); 6112 if (lstate->event_w_idx >= lstate->event_r_idx) 6113 pending = lstate->event_w_idx - lstate->event_r_idx; 6114 else 6115 pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1 6116 - (lstate->event_r_idx - lstate->event_w_idx); 6117 6118 if (event_type == EVENT_TYPE_BUS_RESET 6119 || event_type == MSG_BUS_DEV_RESET) { 6120 /* 6121 * Any earlier events are irrelevant, so reset our buffer. 6122 * This has the effect of allowing us to deal with reset 6123 * floods (an external device holding down the reset line) 6124 * without losing the event that is really interesting. 6125 */ 6126 lstate->event_r_idx = 0; 6127 lstate->event_w_idx = 0; 6128 xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); 6129 } 6130 6131 if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) { 6132 xpt_print_path(lstate->path); 6133 printf("immediate event %x:%x lost\n", 6134 lstate->event_buffer[lstate->event_r_idx].event_type, 6135 lstate->event_buffer[lstate->event_r_idx].event_arg); 6136 lstate->event_r_idx++; 6137 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6138 lstate->event_r_idx = 0; 6139 xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); 6140 } 6141 6142 event = &lstate->event_buffer[lstate->event_w_idx]; 6143 event->initiator_id = initiator_id; 6144 event->event_type = event_type; 6145 event->event_arg = event_arg; 6146 lstate->event_w_idx++; 6147 if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6148 lstate->event_w_idx = 0; 6149 } 6150 6151 /* 6152 * Send any target mode events queued up waiting 6153 * for immediate notify resources. 6154 */ 6155 void 6156 ahc_send_lstate_events(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate) 6157 { 6158 struct ccb_hdr *ccbh; 6159 struct ccb_immed_notify *inot; 6160 6161 while (lstate->event_r_idx != lstate->event_w_idx 6162 && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { 6163 struct ahc_tmode_event *event; 6164 6165 event = &lstate->event_buffer[lstate->event_r_idx]; 6166 SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); 6167 inot = (struct ccb_immed_notify *)ccbh; 6168 switch (event->event_type) { 6169 case EVENT_TYPE_BUS_RESET: 6170 ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; 6171 break; 6172 default: 6173 ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 6174 inot->message_args[0] = event->event_type; 6175 inot->message_args[1] = event->event_arg; 6176 break; 6177 } 6178 inot->initiator_id = event->initiator_id; 6179 inot->sense_len = 0; 6180 xpt_done((union ccb *)inot); 6181 lstate->event_r_idx++; 6182 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6183 lstate->event_r_idx = 0; 6184 } 6185 } 6186 #endif 6187 6188 /******************** Sequencer Program Patching/Download *********************/ 6189 6190 #ifdef AHC_DUMP_SEQ 6191 void 6192 ahc_dumpseq(struct ahc_softc* ahc) 6193 { 6194 int i; 6195 int max_prog; 6196 6197 if ((ahc->chip & AHC_BUS_MASK) < AHC_PCI) 6198 max_prog = 448; 6199 else if ((ahc->features & AHC_ULTRA2) != 0) 6200 max_prog = 768; 6201 else 6202 max_prog = 512; 6203 6204 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 6205 ahc_outb(ahc, SEQADDR0, 0); 6206 ahc_outb(ahc, SEQADDR1, 0); 6207 for (i = 0; i < max_prog; i++) { 6208 uint8_t ins_bytes[4]; 6209 6210 ahc_insb(ahc, SEQRAM, ins_bytes, 4); 6211 printf("0x%08x\n", ins_bytes[0] << 24 6212 | ins_bytes[1] << 16 6213 | ins_bytes[2] << 8 6214 | ins_bytes[3]); 6215 } 6216 } 6217 #endif 6218 6219 static void 6220 ahc_loadseq(struct ahc_softc *ahc) 6221 { 6222 struct cs cs_table[num_critical_sections]; 6223 u_int begin_set[num_critical_sections]; 6224 u_int end_set[num_critical_sections]; 6225 struct patch *cur_patch; 6226 u_int cs_count; 6227 u_int cur_cs; 6228 u_int i; 6229 int downloaded; 6230 u_int skip_addr; 6231 u_int sg_prefetch_cnt; 6232 uint8_t download_consts[7]; 6233 6234 /* 6235 * Start out with 0 critical sections 6236 * that apply to this firmware load. 6237 */ 6238 cs_count = 0; 6239 cur_cs = 0; 6240 memset(begin_set, 0, sizeof(begin_set)); 6241 memset(end_set, 0, sizeof(end_set)); 6242 6243 /* Setup downloadable constant table */ 6244 download_consts[QOUTFIFO_OFFSET] = 0; 6245 if (ahc->targetcmds != NULL) 6246 download_consts[QOUTFIFO_OFFSET] += 32; 6247 download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1; 6248 download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1; 6249 download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1); 6250 sg_prefetch_cnt = ahc->pci_cachesize; 6251 if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg))) 6252 sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg); 6253 download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; 6254 download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1); 6255 download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1); 6256 6257 cur_patch = patches; 6258 downloaded = 0; 6259 skip_addr = 0; 6260 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 6261 ahc_outb(ahc, SEQADDR0, 0); 6262 ahc_outb(ahc, SEQADDR1, 0); 6263 6264 for (i = 0; i < sizeof(seqprog)/4; i++) { 6265 if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) { 6266 /* 6267 * Don't download this instruction as it 6268 * is in a patch that was removed. 6269 */ 6270 continue; 6271 } 6272 /* 6273 * Move through the CS table until we find a CS 6274 * that might apply to this instruction. 6275 */ 6276 for (; cur_cs < num_critical_sections; cur_cs++) { 6277 if (critical_sections[cur_cs].end <= i) { 6278 if (begin_set[cs_count] == TRUE 6279 && end_set[cs_count] == FALSE) { 6280 cs_table[cs_count].end = downloaded; 6281 end_set[cs_count] = TRUE; 6282 cs_count++; 6283 } 6284 continue; 6285 } 6286 if (critical_sections[cur_cs].begin <= i 6287 && begin_set[cs_count] == FALSE) { 6288 cs_table[cs_count].begin = downloaded; 6289 begin_set[cs_count] = TRUE; 6290 } 6291 break; 6292 } 6293 ahc_download_instr(ahc, i, download_consts); 6294 downloaded++; 6295 } 6296 6297 ahc->num_critical_sections = cs_count; 6298 if (cs_count != 0) { 6299 6300 cs_count *= sizeof(struct cs); 6301 ahc->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT); 6302 if (ahc->critical_sections == NULL) 6303 panic("ahc_loadseq: Could not malloc"); 6304 memcpy(ahc->critical_sections, cs_table, cs_count); 6305 } 6306 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE); 6307 ahc_restart(ahc); 6308 6309 if (bootverbose) 6310 printf(" %d instructions downloaded\n", downloaded); 6311 } 6312 6313 static int 6314 ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch, 6315 u_int start_instr, u_int *skip_addr) 6316 { 6317 struct patch *cur_patch; 6318 struct patch *last_patch; 6319 u_int num_patches; 6320 6321 num_patches = sizeof(patches)/sizeof(struct patch); 6322 last_patch = &patches[num_patches]; 6323 cur_patch = *start_patch; 6324 6325 while (cur_patch < last_patch && start_instr == cur_patch->begin) { 6326 6327 if (cur_patch->patch_func(ahc) == 0) { 6328 6329 /* Start rejecting code */ 6330 *skip_addr = start_instr + cur_patch->skip_instr; 6331 cur_patch += cur_patch->skip_patch; 6332 } else { 6333 /* Accepted this patch. Advance to the next 6334 * one and wait for our intruction pointer to 6335 * hit this point. 6336 */ 6337 cur_patch++; 6338 } 6339 } 6340 6341 *start_patch = cur_patch; 6342 if (start_instr < *skip_addr) 6343 /* Still skipping */ 6344 return (0); 6345 6346 return (1); 6347 } 6348 6349 static void 6350 ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts) 6351 { 6352 union ins_formats instr; 6353 struct ins_format1 *fmt1_ins; 6354 struct ins_format3 *fmt3_ins; 6355 u_int opcode; 6356 6357 /* 6358 * The firmware is always compiled into a little endian format. 6359 */ 6360 instr.integer = ahc_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); 6361 6362 fmt1_ins = &instr.format1; 6363 fmt3_ins = NULL; 6364 6365 /* Pull the opcode */ 6366 opcode = instr.format1.opcode; 6367 switch (opcode) { 6368 case AIC_OP_JMP: 6369 case AIC_OP_JC: 6370 case AIC_OP_JNC: 6371 case AIC_OP_CALL: 6372 case AIC_OP_JNE: 6373 case AIC_OP_JNZ: 6374 case AIC_OP_JE: 6375 case AIC_OP_JZ: 6376 { 6377 struct patch *cur_patch; 6378 int address_offset; 6379 u_int address; 6380 u_int skip_addr; 6381 u_int i; 6382 6383 fmt3_ins = &instr.format3; 6384 address_offset = 0; 6385 address = fmt3_ins->address; 6386 cur_patch = patches; 6387 skip_addr = 0; 6388 6389 for (i = 0; i < address;) { 6390 6391 ahc_check_patch(ahc, &cur_patch, i, &skip_addr); 6392 6393 if (skip_addr > i) { 6394 int end_addr; 6395 6396 end_addr = MIN(address, skip_addr); 6397 address_offset += end_addr - i; 6398 i = skip_addr; 6399 } else { 6400 i++; 6401 } 6402 } 6403 address -= address_offset; 6404 fmt3_ins->address = address; 6405 /* FALLTHROUGH */ 6406 } 6407 case AIC_OP_OR: 6408 case AIC_OP_AND: 6409 case AIC_OP_XOR: 6410 case AIC_OP_ADD: 6411 case AIC_OP_ADC: 6412 case AIC_OP_BMOV: 6413 if (fmt1_ins->parity != 0) { 6414 fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; 6415 } 6416 fmt1_ins->parity = 0; 6417 if ((ahc->features & AHC_CMD_CHAN) == 0 6418 && opcode == AIC_OP_BMOV) { 6419 /* 6420 * Block move was added at the same time 6421 * as the command channel. Verify that 6422 * this is only a move of a single element 6423 * and convert the BMOV to a MOV 6424 * (AND with an immediate of FF). 6425 */ 6426 if (fmt1_ins->immediate != 1) 6427 panic("%s: BMOV not supported\n", 6428 ahc_name(ahc)); 6429 fmt1_ins->opcode = AIC_OP_AND; 6430 fmt1_ins->immediate = 0xff; 6431 } 6432 /* FALLTHROUGH */ 6433 case AIC_OP_ROL: 6434 if ((ahc->features & AHC_ULTRA2) != 0) { 6435 int i, count; 6436 6437 /* Calculate odd parity for the instruction */ 6438 for (i = 0, count = 0; i < 31; i++) { 6439 uint32_t mask; 6440 6441 mask = 0x01 << i; 6442 if ((instr.integer & mask) != 0) 6443 count++; 6444 } 6445 if ((count & 0x01) == 0) 6446 instr.format1.parity = 1; 6447 } else { 6448 /* Compress the instruction for older sequencers */ 6449 if (fmt3_ins != NULL) { 6450 instr.integer = 6451 fmt3_ins->immediate 6452 | (fmt3_ins->source << 8) 6453 | (fmt3_ins->address << 16) 6454 | (fmt3_ins->opcode << 25); 6455 } else { 6456 instr.integer = 6457 fmt1_ins->immediate 6458 | (fmt1_ins->source << 8) 6459 | (fmt1_ins->destination << 16) 6460 | (fmt1_ins->ret << 24) 6461 | (fmt1_ins->opcode << 25); 6462 } 6463 } 6464 /* The sequencer is a little endian cpu */ 6465 instr.integer = ahc_htole32(instr.integer); 6466 ahc_outsb(ahc, SEQRAM, instr.bytes, 4); 6467 break; 6468 default: 6469 panic("Unknown opcode encountered in seq program"); 6470 break; 6471 } 6472 } 6473 6474 int 6475 ahc_print_register(ahc_reg_parse_entry_t *table, u_int num_entries, 6476 const char *name, u_int address, u_int value, 6477 u_int *cur_column, u_int wrap_point) 6478 { 6479 int printed; 6480 u_int printed_mask; 6481 6482 if (*cur_column >= wrap_point) { 6483 printf("\n"); 6484 *cur_column = 0; 6485 } 6486 printed = printf("%s[0x%x]", name, value); 6487 if (table == NULL) { 6488 printed += printf(" "); 6489 *cur_column += printed; 6490 return (printed); 6491 } 6492 printed_mask = 0; 6493 while (printed_mask != 0xFF) { 6494 int entry; 6495 6496 for (entry = 0; entry < num_entries; entry++) { 6497 if (((value & table[entry].mask) 6498 != table[entry].value) 6499 || ((printed_mask & table[entry].mask) 6500 == table[entry].mask)) 6501 continue; 6502 6503 printed += printf("%s%s", 6504 printed_mask == 0 ? ":(" : "|", 6505 table[entry].name); 6506 printed_mask |= table[entry].mask; 6507 6508 break; 6509 } 6510 if (entry >= num_entries) 6511 break; 6512 } 6513 if (printed_mask != 0) 6514 printed += printf(") "); 6515 else 6516 printed += printf(" "); 6517 *cur_column += printed; 6518 return (printed); 6519 } 6520 6521 void 6522 ahc_dump_card_state(struct ahc_softc *ahc) 6523 { 6524 struct scb *scb; 6525 struct scb_tailq *untagged_q; 6526 int target; 6527 int maxtarget; 6528 int i; 6529 uint8_t last_phase; 6530 uint8_t qinpos; 6531 uint8_t qintail; 6532 uint8_t qoutpos; 6533 uint8_t scb_index; 6534 uint8_t saved_scbptr; 6535 6536 saved_scbptr = ahc_inb(ahc, SCBPTR); 6537 6538 last_phase = ahc_inb(ahc, LASTPHASE); 6539 printf("%s: Dumping Card State %s, at SEQADDR 0x%x\n", 6540 ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg, 6541 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); 6542 printf("ACCUM = 0x%x, SINDEX = 0x%x, DINDEX = 0x%x, ARG_2 = 0x%x\n", 6543 ahc_inb(ahc, ACCUM), ahc_inb(ahc, SINDEX), ahc_inb(ahc, DINDEX), 6544 ahc_inb(ahc, ARG_2)); 6545 printf("HCNT = 0x%x SCBPTR = 0x%x\n", ahc_inb(ahc, HCNT), 6546 ahc_inb(ahc, SCBPTR)); 6547 printf("SCSISEQ = 0x%x, SBLKCTL = 0x%x\n", 6548 ahc_inb(ahc, SCSISEQ), ahc_inb(ahc, SBLKCTL)); 6549 printf(" DFCNTRL = 0x%x, DFSTATUS = 0x%x\n", 6550 ahc_inb(ahc, DFCNTRL), ahc_inb(ahc, DFSTATUS)); 6551 printf("LASTPHASE = 0x%x, SCSISIGI = 0x%x, SXFRCTL0 = 0x%x\n", 6552 last_phase, ahc_inb(ahc, SCSISIGI), ahc_inb(ahc, SXFRCTL0)); 6553 printf("SSTAT0 = 0x%x, SSTAT1 = 0x%x\n", 6554 ahc_inb(ahc, SSTAT0), ahc_inb(ahc, SSTAT1)); 6555 if ((ahc->features & AHC_DT) != 0) 6556 printf("SCSIPHASE = 0x%x\n", ahc_inb(ahc, SCSIPHASE)); 6557 printf("STACK == 0x%x, 0x%x, 0x%x, 0x%x\n", 6558 ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8), 6559 ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8), 6560 ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8), 6561 ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8)); 6562 printf("SCB count = %d\n", ahc->scb_data->numscbs); 6563 printf("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag); 6564 printf("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB)); 6565 /* QINFIFO */ 6566 printf("QINFIFO entries: "); 6567 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 6568 qinpos = ahc_inb(ahc, SNSCB_QOFF); 6569 ahc_outb(ahc, SNSCB_QOFF, qinpos); 6570 } else 6571 qinpos = ahc_inb(ahc, QINPOS); 6572 qintail = ahc->qinfifonext; 6573 while (qinpos != qintail) { 6574 printf("%d ", ahc->qinfifo[qinpos]); 6575 qinpos++; 6576 } 6577 printf("\n"); 6578 6579 printf("Waiting Queue entries: "); 6580 scb_index = ahc_inb(ahc, WAITING_SCBH); 6581 i = 0; 6582 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6583 ahc_outb(ahc, SCBPTR, scb_index); 6584 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); 6585 scb_index = ahc_inb(ahc, SCB_NEXT); 6586 } 6587 printf("\n"); 6588 6589 printf("Disconnected Queue entries: "); 6590 scb_index = ahc_inb(ahc, DISCONNECTED_SCBH); 6591 i = 0; 6592 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6593 ahc_outb(ahc, SCBPTR, scb_index); 6594 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); 6595 scb_index = ahc_inb(ahc, SCB_NEXT); 6596 } 6597 printf("\n"); 6598 6599 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); 6600 printf("QOUTFIFO entries: "); 6601 qoutpos = ahc->qoutfifonext; 6602 i = 0; 6603 while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) { 6604 printf("%d ", ahc->qoutfifo[qoutpos]); 6605 qoutpos++; 6606 } 6607 printf("\n"); 6608 6609 printf("Sequencer Free SCB List: "); 6610 scb_index = ahc_inb(ahc, FREE_SCBH); 6611 i = 0; 6612 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6613 ahc_outb(ahc, SCBPTR, scb_index); 6614 printf("%d ", scb_index); 6615 scb_index = ahc_inb(ahc, SCB_NEXT); 6616 } 6617 printf("\n"); 6618 6619 printf("Sequencer SCB Info: "); 6620 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 6621 ahc_outb(ahc, SCBPTR, i); 6622 printf("%d(c 0x%x, s 0x%x, l %d, t 0x%x) ", 6623 i, ahc_inb(ahc, SCB_CONTROL), 6624 ahc_inb(ahc, SCB_SCSIID), 6625 ahc_inb(ahc, SCB_LUN), 6626 ahc_inb(ahc, SCB_TAG)); 6627 } 6628 printf("\n"); 6629 6630 printf("Pending list: "); 6631 i = 0; 6632 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { 6633 if (i++ > 256) 6634 break; 6635 if (scb != LIST_FIRST(&ahc->pending_scbs)) 6636 printf(", "); 6637 printf("%d(c 0x%x, s 0x%x, l %d)", scb->hscb->tag, 6638 scb->hscb->control, scb->hscb->scsiid, scb->hscb->lun); 6639 if ((ahc->flags & AHC_PAGESCBS) == 0) { 6640 ahc_outb(ahc, SCBPTR, scb->hscb->tag); 6641 printf("(0x%x, 0x%x)", ahc_inb(ahc, SCB_CONTROL), 6642 ahc_inb(ahc, SCB_TAG)); 6643 } 6644 } 6645 printf("\n"); 6646 6647 printf("Kernel Free SCB list: "); 6648 i = 0; 6649 SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) { 6650 if (i++ > 256) 6651 break; 6652 printf("%d ", scb->hscb->tag); 6653 } 6654 printf("\n"); 6655 6656 maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7; 6657 for (target = 0; target <= maxtarget; target++) { 6658 untagged_q = &ahc->untagged_queues[target]; 6659 if (TAILQ_FIRST(untagged_q) == NULL) 6660 continue; 6661 printf("Untagged Q(%d): ", target); 6662 i = 0; 6663 TAILQ_FOREACH(scb, untagged_q, links.tqe) { 6664 if (i++ > 256) 6665 break; 6666 printf("%d ", scb->hscb->tag); 6667 } 6668 printf("\n"); 6669 } 6670 6671 ahc_platform_dump_card_state(ahc); 6672 ahc_outb(ahc, SCBPTR, saved_scbptr); 6673 } 6674 6675 /************************* Target Mode ****************************************/ 6676 #ifdef AHC_TARGET_MODE 6677 cam_status 6678 ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb, 6679 struct ahc_tmode_tstate **tstate, 6680 struct ahc_tmode_lstate **lstate, 6681 int notfound_failure) 6682 { 6683 6684 if ((ahc->features & AHC_TARGETMODE) == 0) 6685 return (CAM_REQ_INVALID); 6686 6687 /* 6688 * Handle the 'black hole' device that sucks up 6689 * requests to unattached luns on enabled targets. 6690 */ 6691 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD 6692 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 6693 *tstate = NULL; 6694 *lstate = ahc->black_hole; 6695 } else { 6696 u_int max_id; 6697 6698 max_id = (ahc->features & AHC_WIDE) ? 15 : 7; 6699 if (ccb->ccb_h.target_id > max_id) 6700 return (CAM_TID_INVALID); 6701 6702 if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS) 6703 return (CAM_LUN_INVALID); 6704 6705 *tstate = ahc->enabled_targets[ccb->ccb_h.target_id]; 6706 *lstate = NULL; 6707 if (*tstate != NULL) 6708 *lstate = 6709 (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; 6710 } 6711 6712 if (notfound_failure != 0 && *lstate == NULL) 6713 return (CAM_PATH_INVALID); 6714 6715 return (CAM_REQ_CMP); 6716 } 6717 6718 void 6719 ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) 6720 { 6721 struct ahc_tmode_tstate *tstate; 6722 struct ahc_tmode_lstate *lstate; 6723 struct ccb_en_lun *cel; 6724 cam_status status; 6725 u_int target; 6726 u_int lun; 6727 u_int target_mask; 6728 u_int our_id; 6729 u_long s; 6730 char channel; 6731 6732 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate, 6733 /*notfound_failure*/FALSE); 6734 6735 if (status != CAM_REQ_CMP) { 6736 ccb->ccb_h.status = status; 6737 return; 6738 } 6739 6740 if (cam_sim_bus(sim) == 0) 6741 our_id = ahc->our_id; 6742 else 6743 our_id = ahc->our_id_b; 6744 6745 if (ccb->ccb_h.target_id != our_id) { 6746 /* 6747 * our_id represents our initiator ID, or 6748 * the ID of the first target to have an 6749 * enabled lun in target mode. There are 6750 * two cases that may preclude enabling a 6751 * target id other than our_id. 6752 * 6753 * o our_id is for an active initiator role. 6754 * Since the hardware does not support 6755 * reselections to the initiator role at 6756 * anything other than our_id, and our_id 6757 * is used by the hardware to indicate the 6758 * ID to use for both select-out and 6759 * reselect-out operations, the only target 6760 * ID we can support in this mode is our_id. 6761 * 6762 * o The MULTARGID feature is not available and 6763 * a previous target mode ID has been enabled. 6764 */ 6765 if ((ahc->features & AHC_MULTIROLE) != 0) { 6766 6767 if ((ahc->features & AHC_MULTI_TID) != 0 6768 && (ahc->flags & AHC_INITIATORROLE) != 0) { 6769 /* 6770 * Only allow additional targets if 6771 * the initiator role is disabled. 6772 * The hardware cannot handle a re-select-in 6773 * on the initiator id during a re-select-out 6774 * on a different target id. 6775 */ 6776 status = CAM_TID_INVALID; 6777 } else if ((ahc->flags & AHC_INITIATORROLE) != 0 6778 || ahc->enabled_luns > 0) { 6779 /* 6780 * Only allow our target id to change 6781 * if the initiator role is not configured 6782 * and there are no enabled luns which 6783 * are attached to the currently registered 6784 * scsi id. 6785 */ 6786 status = CAM_TID_INVALID; 6787 } 6788 } else if ((ahc->features & AHC_MULTI_TID) == 0 6789 && ahc->enabled_luns > 0) { 6790 6791 status = CAM_TID_INVALID; 6792 } 6793 } 6794 6795 if (status != CAM_REQ_CMP) { 6796 ccb->ccb_h.status = status; 6797 return; 6798 } 6799 6800 /* 6801 * We now have an id that is valid. 6802 * If we aren't in target mode, switch modes. 6803 */ 6804 if ((ahc->flags & AHC_TARGETROLE) == 0 6805 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 6806 u_long s; 6807 6808 printf("Configuring Target Mode\n"); 6809 ahc_lock(ahc, &s); 6810 if (LIST_FIRST(&ahc->pending_scbs) != NULL) { 6811 ccb->ccb_h.status = CAM_BUSY; 6812 ahc_unlock(ahc, &s); 6813 return; 6814 } 6815 ahc->flags |= AHC_TARGETROLE; 6816 if ((ahc->features & AHC_MULTIROLE) == 0) 6817 ahc->flags &= ~AHC_INITIATORROLE; 6818 ahc_pause(ahc); 6819 ahc_loadseq(ahc); 6820 ahc_unlock(ahc, &s); 6821 } 6822 cel = &ccb->cel; 6823 target = ccb->ccb_h.target_id; 6824 lun = ccb->ccb_h.target_lun; 6825 channel = SIM_CHANNEL(ahc, sim); 6826 target_mask = 0x01 << target; 6827 if (channel == 'B') 6828 target_mask <<= 8; 6829 6830 if (cel->enable != 0) { 6831 u_int scsiseq; 6832 6833 /* Are we already enabled?? */ 6834 if (lstate != NULL) { 6835 xpt_print_path(ccb->ccb_h.path); 6836 printf("Lun already enabled\n"); 6837 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 6838 return; 6839 } 6840 6841 if (cel->grp6_len != 0 6842 || cel->grp7_len != 0) { 6843 /* 6844 * Don't (yet?) support vendor 6845 * specific commands. 6846 */ 6847 ccb->ccb_h.status = CAM_REQ_INVALID; 6848 printf("Non-zero Group Codes\n"); 6849 return; 6850 } 6851 6852 /* 6853 * Seems to be okay. 6854 * Setup our data structures. 6855 */ 6856 if (target != CAM_TARGET_WILDCARD && tstate == NULL) { 6857 tstate = ahc_alloc_tstate(ahc, target, channel); 6858 if (tstate == NULL) { 6859 xpt_print_path(ccb->ccb_h.path); 6860 printf("Couldn't allocate tstate\n"); 6861 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 6862 return; 6863 } 6864 } 6865 lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT); 6866 if (lstate == NULL) { 6867 xpt_print_path(ccb->ccb_h.path); 6868 printf("Couldn't allocate lstate\n"); 6869 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 6870 return; 6871 } 6872 memset(lstate, 0, sizeof(*lstate)); 6873 status = xpt_create_path(&lstate->path, /*periph*/NULL, 6874 xpt_path_path_id(ccb->ccb_h.path), 6875 xpt_path_target_id(ccb->ccb_h.path), 6876 xpt_path_lun_id(ccb->ccb_h.path)); 6877 if (status != CAM_REQ_CMP) { 6878 free(lstate, M_DEVBUF); 6879 xpt_print_path(ccb->ccb_h.path); 6880 printf("Couldn't allocate path\n"); 6881 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 6882 return; 6883 } 6884 SLIST_INIT(&lstate->accept_tios); 6885 SLIST_INIT(&lstate->immed_notifies); 6886 ahc_lock(ahc, &s); 6887 ahc_pause(ahc); 6888 if (target != CAM_TARGET_WILDCARD) { 6889 tstate->enabled_luns[lun] = lstate; 6890 ahc->enabled_luns++; 6891 6892 if ((ahc->features & AHC_MULTI_TID) != 0) { 6893 u_int targid_mask; 6894 6895 targid_mask = ahc_inb(ahc, TARGID) 6896 | (ahc_inb(ahc, TARGID + 1) << 8); 6897 6898 targid_mask |= target_mask; 6899 ahc_outb(ahc, TARGID, targid_mask); 6900 ahc_outb(ahc, TARGID+1, (targid_mask >> 8)); 6901 6902 ahc_update_scsiid(ahc, targid_mask); 6903 } else { 6904 u_int our_id; 6905 char channel; 6906 6907 channel = SIM_CHANNEL(ahc, sim); 6908 our_id = SIM_SCSI_ID(ahc, sim); 6909 6910 /* 6911 * This can only happen if selections 6912 * are not enabled 6913 */ 6914 if (target != our_id) { 6915 u_int sblkctl; 6916 char cur_channel; 6917 int swap; 6918 6919 sblkctl = ahc_inb(ahc, SBLKCTL); 6920 cur_channel = (sblkctl & SELBUSB) 6921 ? 'B' : 'A'; 6922 if ((ahc->features & AHC_TWIN) == 0) 6923 cur_channel = 'A'; 6924 swap = cur_channel != channel; 6925 if (channel == 'A') 6926 ahc->our_id = target; 6927 else 6928 ahc->our_id_b = target; 6929 6930 if (swap) 6931 ahc_outb(ahc, SBLKCTL, 6932 sblkctl ^ SELBUSB); 6933 6934 ahc_outb(ahc, SCSIID, target); 6935 6936 if (swap) 6937 ahc_outb(ahc, SBLKCTL, sblkctl); 6938 } 6939 } 6940 } else 6941 ahc->black_hole = lstate; 6942 /* Allow select-in operations */ 6943 if (ahc->black_hole != NULL && ahc->enabled_luns > 0) { 6944 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 6945 scsiseq |= ENSELI; 6946 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 6947 scsiseq = ahc_inb(ahc, SCSISEQ); 6948 scsiseq |= ENSELI; 6949 ahc_outb(ahc, SCSISEQ, scsiseq); 6950 } 6951 ahc_unpause(ahc); 6952 ahc_unlock(ahc, &s); 6953 ccb->ccb_h.status = CAM_REQ_CMP; 6954 xpt_print_path(ccb->ccb_h.path); 6955 printf("Lun now enabled for target mode\n"); 6956 } else { 6957 struct scb *scb; 6958 int i, empty; 6959 6960 if (lstate == NULL) { 6961 ccb->ccb_h.status = CAM_LUN_INVALID; 6962 return; 6963 } 6964 6965 ahc_lock(ahc, &s); 6966 6967 ccb->ccb_h.status = CAM_REQ_CMP; 6968 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { 6969 struct ccb_hdr *ccbh; 6970 6971 ccbh = &scb->io_ctx->ccb_h; 6972 if (ccbh->func_code == XPT_CONT_TARGET_IO 6973 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ 6974 printf("CTIO pending\n"); 6975 ccb->ccb_h.status = CAM_REQ_INVALID; 6976 ahc_unlock(ahc, &s); 6977 return; 6978 } 6979 } 6980 6981 if (SLIST_FIRST(&lstate->accept_tios) != NULL) { 6982 printf("ATIOs pending\n"); 6983 ccb->ccb_h.status = CAM_REQ_INVALID; 6984 } 6985 6986 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { 6987 printf("INOTs pending\n"); 6988 ccb->ccb_h.status = CAM_REQ_INVALID; 6989 } 6990 6991 if (ccb->ccb_h.status != CAM_REQ_CMP) { 6992 ahc_unlock(ahc, &s); 6993 return; 6994 } 6995 6996 xpt_print_path(ccb->ccb_h.path); 6997 printf("Target mode disabled\n"); 6998 xpt_free_path(lstate->path); 6999 free(lstate, M_DEVBUF); 7000 7001 ahc_pause(ahc); 7002 /* Can we clean up the target too? */ 7003 if (target != CAM_TARGET_WILDCARD) { 7004 tstate->enabled_luns[lun] = NULL; 7005 ahc->enabled_luns--; 7006 for (empty = 1, i = 0; i < 8; i++) 7007 if (tstate->enabled_luns[i] != NULL) { 7008 empty = 0; 7009 break; 7010 } 7011 7012 if (empty) { 7013 ahc_free_tstate(ahc, target, channel, 7014 /*force*/FALSE); 7015 if (ahc->features & AHC_MULTI_TID) { 7016 u_int targid_mask; 7017 7018 targid_mask = ahc_inb(ahc, TARGID) 7019 | (ahc_inb(ahc, TARGID + 1) 7020 << 8); 7021 7022 targid_mask &= ~target_mask; 7023 ahc_outb(ahc, TARGID, targid_mask); 7024 ahc_outb(ahc, TARGID+1, 7025 (targid_mask >> 8)); 7026 ahc_update_scsiid(ahc, targid_mask); 7027 } 7028 } 7029 } else { 7030 7031 ahc->black_hole = NULL; 7032 7033 /* 7034 * We can't allow selections without 7035 * our black hole device. 7036 */ 7037 empty = TRUE; 7038 } 7039 if (ahc->enabled_luns == 0) { 7040 /* Disallow select-in */ 7041 u_int scsiseq; 7042 7043 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 7044 scsiseq &= ~ENSELI; 7045 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 7046 scsiseq = ahc_inb(ahc, SCSISEQ); 7047 scsiseq &= ~ENSELI; 7048 ahc_outb(ahc, SCSISEQ, scsiseq); 7049 7050 if ((ahc->features & AHC_MULTIROLE) == 0) { 7051 printf("Configuring Initiator Mode\n"); 7052 ahc->flags &= ~AHC_TARGETROLE; 7053 ahc->flags |= AHC_INITIATORROLE; 7054 ahc_pause(ahc); 7055 ahc_loadseq(ahc); 7056 } 7057 } 7058 ahc_unpause(ahc); 7059 ahc_unlock(ahc, &s); 7060 } 7061 } 7062 7063 static void 7064 ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask) 7065 { 7066 u_int scsiid_mask; 7067 u_int scsiid; 7068 7069 if ((ahc->features & AHC_MULTI_TID) == 0) 7070 panic("ahc_update_scsiid called on non-multitid unit\n"); 7071 7072 /* 7073 * Since we will rely on the TARGID mask 7074 * for selection enables, ensure that OID 7075 * in SCSIID is not set to some other ID 7076 * that we don't want to allow selections on. 7077 */ 7078 if ((ahc->features & AHC_ULTRA2) != 0) 7079 scsiid = ahc_inb(ahc, SCSIID_ULTRA2); 7080 else 7081 scsiid = ahc_inb(ahc, SCSIID); 7082 scsiid_mask = 0x1 << (scsiid & OID); 7083 if ((targid_mask & scsiid_mask) == 0) { 7084 u_int our_id; 7085 7086 /* ffs counts from 1 */ 7087 our_id = ffs(targid_mask); 7088 if (our_id == 0) 7089 our_id = ahc->our_id; 7090 else 7091 our_id--; 7092 scsiid &= TID; 7093 scsiid |= our_id; 7094 } 7095 if ((ahc->features & AHC_ULTRA2) != 0) 7096 ahc_outb(ahc, SCSIID_ULTRA2, scsiid); 7097 else 7098 ahc_outb(ahc, SCSIID, scsiid); 7099 } 7100 7101 void 7102 ahc_run_tqinfifo(struct ahc_softc *ahc, int paused) 7103 { 7104 struct target_cmd *cmd; 7105 7106 /* 7107 * If the card supports auto-access pause, 7108 * we can access the card directly regardless 7109 * of whether it is paused or not. 7110 */ 7111 if ((ahc->features & AHC_AUTOPAUSE) != 0) 7112 paused = TRUE; 7113 7114 ahc_sync_tqinfifo(ahc, BUS_DMASYNC_POSTREAD); 7115 while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) { 7116 7117 /* 7118 * Only advance through the queue if we 7119 * have the resources to process the command. 7120 */ 7121 if (ahc_handle_target_cmd(ahc, cmd) != 0) 7122 break; 7123 7124 cmd->cmd_valid = 0; 7125 ahc_dmamap_sync(ahc, ahc->shared_data_dmat, 7126 ahc->shared_data_dmamap, 7127 ahc_targetcmd_offset(ahc, ahc->tqinfifonext), 7128 sizeof(struct target_cmd), 7129 BUS_DMASYNC_PREREAD); 7130 ahc->tqinfifonext++; 7131 7132 /* 7133 * Lazily update our position in the target mode incoming 7134 * command queue as seen by the sequencer. 7135 */ 7136 if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { 7137 if ((ahc->features & AHC_HS_MAILBOX) != 0) { 7138 u_int hs_mailbox; 7139 7140 hs_mailbox = ahc_inb(ahc, HS_MAILBOX); 7141 hs_mailbox &= ~HOST_TQINPOS; 7142 hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS; 7143 ahc_outb(ahc, HS_MAILBOX, hs_mailbox); 7144 } else { 7145 if (!paused) 7146 ahc_pause(ahc); 7147 ahc_outb(ahc, KERNEL_TQINPOS, 7148 ahc->tqinfifonext & HOST_TQINPOS); 7149 if (!paused) 7150 ahc_unpause(ahc); 7151 } 7152 } 7153 } 7154 } 7155 7156 static int 7157 ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd) 7158 { 7159 struct ahc_tmode_tstate *tstate; 7160 struct ahc_tmode_lstate *lstate; 7161 struct ccb_accept_tio *atio; 7162 uint8_t *byte; 7163 int initiator; 7164 int target; 7165 int lun; 7166 7167 initiator = SCSIID_TARGET(ahc, cmd->scsiid); 7168 target = SCSIID_OUR_ID(cmd->scsiid); 7169 lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); 7170 7171 byte = cmd->bytes; 7172 tstate = ahc->enabled_targets[target]; 7173 lstate = NULL; 7174 if (tstate != NULL) 7175 lstate = tstate->enabled_luns[lun]; 7176 7177 /* 7178 * Commands for disabled luns go to the black hole driver. 7179 */ 7180 if (lstate == NULL) 7181 lstate = ahc->black_hole; 7182 7183 atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); 7184 if (atio == NULL) { 7185 ahc->flags |= AHC_TQINFIFO_BLOCKED; 7186 /* 7187 * Wait for more ATIOs from the peripheral driver for this lun. 7188 */ 7189 if (bootverbose) 7190 printf("%s: ATIOs exhausted\n", ahc_name(ahc)); 7191 return (1); 7192 } else 7193 ahc->flags &= ~AHC_TQINFIFO_BLOCKED; 7194 #if 0 7195 printf("Incoming command from %d for %d:%d%s\n", 7196 initiator, target, lun, 7197 lstate == ahc->black_hole ? "(Black Holed)" : ""); 7198 #endif 7199 SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); 7200 7201 if (lstate == ahc->black_hole) { 7202 /* Fill in the wildcards */ 7203 atio->ccb_h.target_id = target; 7204 atio->ccb_h.target_lun = lun; 7205 } 7206 7207 /* 7208 * Package it up and send it off to 7209 * whomever has this lun enabled. 7210 */ 7211 atio->sense_len = 0; 7212 atio->init_id = initiator; 7213 if (byte[0] != 0xFF) { 7214 /* Tag was included */ 7215 atio->tag_action = *byte++; 7216 atio->tag_id = *byte++; 7217 atio->ccb_h.flags = CAM_TAG_ACTION_VALID; 7218 } else { 7219 atio->ccb_h.flags = 0; 7220 } 7221 byte++; 7222 7223 /* Okay. Now determine the cdb size based on the command code */ 7224 switch (*byte >> CMD_GROUP_CODE_SHIFT) { 7225 case 0: 7226 atio->cdb_len = 6; 7227 break; 7228 case 1: 7229 case 2: 7230 atio->cdb_len = 10; 7231 break; 7232 case 4: 7233 atio->cdb_len = 16; 7234 break; 7235 case 5: 7236 atio->cdb_len = 12; 7237 break; 7238 case 3: 7239 default: 7240 /* Only copy the opcode. */ 7241 atio->cdb_len = 1; 7242 printf("Reserved or VU command code type encountered\n"); 7243 break; 7244 } 7245 7246 memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); 7247 7248 atio->ccb_h.status |= CAM_CDB_RECVD; 7249 7250 if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { 7251 /* 7252 * We weren't allowed to disconnect. 7253 * We're hanging on the bus until a 7254 * continue target I/O comes in response 7255 * to this accept tio. 7256 */ 7257 #if 0 7258 printf("Received Immediate Command %d:%d:%d - %p\n", 7259 initiator, target, lun, ahc->pending_device); 7260 #endif 7261 ahc->pending_device = lstate; 7262 ahc_freeze_ccb((union ccb *)atio); 7263 atio->ccb_h.flags |= CAM_DIS_DISCONNECT; 7264 } 7265 xpt_done((union ccb*)atio); 7266 return (0); 7267 } 7268 7269 #endif 7270