1 /* 2 * Core routines and tables shareable across OS platforms. 3 * 4 * Copyright (c) 1994-2002 Justin T. Gibbs. 5 * Copyright (c) 2000-2002 Adaptec Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification. 14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 15 * substantially similar to the "NO WARRANTY" disclaimer below 16 * ("Disclaimer") and any redistribution must be conditioned upon 17 * including a substantially similar Disclaimer requirement for further 18 * binary redistribution. 19 * 3. Neither the names of the above-listed copyright holders nor the names 20 * of any contributors may be used to endorse or promote products derived 21 * from this software without specific prior written permission. 22 * 23 * Alternatively, this software may be distributed under the terms of the 24 * GNU General Public License ("GPL") version 2 as published by the Free 25 * Software Foundation. 26 * 27 * NO WARRANTY 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 36 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 37 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 38 * POSSIBILITY OF SUCH DAMAGES. 39 * 40 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#134 $ 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #ifdef __linux__ 47 #include "aic7xxx_osm.h" 48 #include "aic7xxx_inline.h" 49 #include "aicasm/aicasm_insformat.h" 50 #else 51 #include <dev/aic7xxx/aic7xxx_osm.h> 52 #include <dev/aic7xxx/aic7xxx_inline.h> 53 #include <dev/aic7xxx/aicasm/aicasm_insformat.h> 54 #endif 55 56 /****************************** Softc Data ************************************/ 57 struct ahc_softc_tailq ahc_tailq = TAILQ_HEAD_INITIALIZER(ahc_tailq); 58 59 /***************************** Lookup Tables **********************************/ 60 char *ahc_chip_names[] = 61 { 62 "NONE", 63 "aic7770", 64 "aic7850", 65 "aic7855", 66 "aic7859", 67 "aic7860", 68 "aic7870", 69 "aic7880", 70 "aic7895", 71 "aic7895C", 72 "aic7890/91", 73 "aic7896/97", 74 "aic7892", 75 "aic7899" 76 }; 77 static const u_int num_chip_names = NUM_ELEMENTS(ahc_chip_names); 78 79 /* 80 * Hardware error codes. 81 */ 82 struct ahc_hard_error_entry { 83 uint8_t errno; 84 char *errmesg; 85 }; 86 87 static struct ahc_hard_error_entry ahc_hard_errors[] = { 88 { ILLHADDR, "Illegal Host Access" }, 89 { ILLSADDR, "Illegal Sequencer Address referrenced" }, 90 { ILLOPCODE, "Illegal Opcode in sequencer program" }, 91 { SQPARERR, "Sequencer Parity Error" }, 92 { DPARERR, "Data-path Parity Error" }, 93 { MPARERR, "Scratch or SCB Memory Parity Error" }, 94 { PCIERRSTAT, "PCI Error detected" }, 95 { CIOPARERR, "CIOBUS Parity Error" }, 96 }; 97 static const u_int num_errors = NUM_ELEMENTS(ahc_hard_errors); 98 99 static struct ahc_phase_table_entry ahc_phase_table[] = 100 { 101 { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, 102 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, 103 { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" }, 104 { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" }, 105 { P_COMMAND, MSG_NOOP, "in Command phase" }, 106 { P_MESGOUT, MSG_NOOP, "in Message-out phase" }, 107 { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" }, 108 { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, 109 { P_BUSFREE, MSG_NOOP, "while idle" }, 110 { 0, MSG_NOOP, "in unknown phase" } 111 }; 112 113 /* 114 * In most cases we only wish to itterate over real phases, so 115 * exclude the last element from the count. 116 */ 117 static const u_int num_phases = NUM_ELEMENTS(ahc_phase_table) - 1; 118 119 /* 120 * Valid SCSIRATE values. (p. 3-17) 121 * Provides a mapping of tranfer periods in ns to the proper value to 122 * stick in the scsixfer reg. 123 */ 124 static struct ahc_syncrate ahc_syncrates[] = 125 { 126 /* ultra2 fast/ultra period rate */ 127 { 0x42, 0x000, 9, "80.0" }, 128 { 0x03, 0x000, 10, "40.0" }, 129 { 0x04, 0x000, 11, "33.0" }, 130 { 0x05, 0x100, 12, "20.0" }, 131 { 0x06, 0x110, 15, "16.0" }, 132 { 0x07, 0x120, 18, "13.4" }, 133 { 0x08, 0x000, 25, "10.0" }, 134 { 0x19, 0x010, 31, "8.0" }, 135 { 0x1a, 0x020, 37, "6.67" }, 136 { 0x1b, 0x030, 43, "5.7" }, 137 { 0x1c, 0x040, 50, "5.0" }, 138 { 0x00, 0x050, 56, "4.4" }, 139 { 0x00, 0x060, 62, "4.0" }, 140 { 0x00, 0x070, 68, "3.6" }, 141 { 0x00, 0x000, 0, NULL } 142 }; 143 144 /* Our Sequencer Program */ 145 #include "aic7xxx_seq.h" 146 147 /**************************** Function Declarations ***************************/ 148 static void ahc_force_renegotiation(struct ahc_softc *ahc, 149 struct ahc_devinfo *devinfo); 150 static struct ahc_tmode_tstate* 151 ahc_alloc_tstate(struct ahc_softc *ahc, 152 u_int scsi_id, char channel); 153 #ifdef AHC_TARGET_MODE 154 static void ahc_free_tstate(struct ahc_softc *ahc, 155 u_int scsi_id, char channel, int force); 156 #endif 157 static struct ahc_syncrate* 158 ahc_devlimited_syncrate(struct ahc_softc *ahc, 159 struct ahc_initiator_tinfo *, 160 u_int *period, 161 u_int *ppr_options, 162 role_t role); 163 static void ahc_update_pending_scbs(struct ahc_softc *ahc); 164 static void ahc_fetch_devinfo(struct ahc_softc *ahc, 165 struct ahc_devinfo *devinfo); 166 static void ahc_scb_devinfo(struct ahc_softc *ahc, 167 struct ahc_devinfo *devinfo, 168 struct scb *scb); 169 static void ahc_assert_atn(struct ahc_softc *ahc); 170 static void ahc_setup_initiator_msgout(struct ahc_softc *ahc, 171 struct ahc_devinfo *devinfo, 172 struct scb *scb); 173 static void ahc_build_transfer_msg(struct ahc_softc *ahc, 174 struct ahc_devinfo *devinfo); 175 static void ahc_construct_sdtr(struct ahc_softc *ahc, 176 struct ahc_devinfo *devinfo, 177 u_int period, u_int offset); 178 static void ahc_construct_wdtr(struct ahc_softc *ahc, 179 struct ahc_devinfo *devinfo, 180 u_int bus_width); 181 static void ahc_construct_ppr(struct ahc_softc *ahc, 182 struct ahc_devinfo *devinfo, 183 u_int period, u_int offset, 184 u_int bus_width, u_int ppr_options); 185 static void ahc_clear_msg_state(struct ahc_softc *ahc); 186 static void ahc_handle_proto_violation(struct ahc_softc *ahc); 187 static void ahc_handle_message_phase(struct ahc_softc *ahc); 188 typedef enum { 189 AHCMSG_1B, 190 AHCMSG_2B, 191 AHCMSG_EXT 192 } ahc_msgtype; 193 static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, 194 u_int msgval, int full); 195 static int ahc_parse_msg(struct ahc_softc *ahc, 196 struct ahc_devinfo *devinfo); 197 static int ahc_handle_msg_reject(struct ahc_softc *ahc, 198 struct ahc_devinfo *devinfo); 199 static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc, 200 struct ahc_devinfo *devinfo); 201 static void ahc_reinitialize_dataptrs(struct ahc_softc *ahc); 202 static void ahc_handle_devreset(struct ahc_softc *ahc, 203 struct ahc_devinfo *devinfo, 204 cam_status status, char *message, 205 int verbose_level); 206 #ifdef AHC_TARGET_MODE 207 static void ahc_setup_target_msgin(struct ahc_softc *ahc, 208 struct ahc_devinfo *devinfo, 209 struct scb *scb); 210 #endif 211 212 static bus_dmamap_callback_t ahc_dmamap_cb; 213 static void ahc_build_free_scb_list(struct ahc_softc *ahc); 214 static int ahc_init_scbdata(struct ahc_softc *ahc); 215 static void ahc_fini_scbdata(struct ahc_softc *ahc); 216 static void ahc_qinfifo_requeue(struct ahc_softc *ahc, 217 struct scb *prev_scb, 218 struct scb *scb); 219 static int ahc_qinfifo_count(struct ahc_softc *ahc); 220 static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, 221 u_int prev, u_int scbptr); 222 static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc); 223 static u_int ahc_rem_wscb(struct ahc_softc *ahc, 224 u_int scbpos, u_int prev); 225 static void ahc_reset_current_bus(struct ahc_softc *ahc); 226 #ifdef AHC_DUMP_SEQ 227 static void ahc_dumpseq(struct ahc_softc *ahc); 228 #endif 229 static int ahc_loadseq(struct ahc_softc *ahc); 230 static int ahc_check_patch(struct ahc_softc *ahc, 231 struct patch **start_patch, 232 u_int start_instr, u_int *skip_addr); 233 static void ahc_download_instr(struct ahc_softc *ahc, 234 u_int instrptr, uint8_t *dconsts); 235 #ifdef AHC_TARGET_MODE 236 static void ahc_queue_lstate_event(struct ahc_softc *ahc, 237 struct ahc_tmode_lstate *lstate, 238 u_int initiator_id, 239 u_int event_type, 240 u_int event_arg); 241 static void ahc_update_scsiid(struct ahc_softc *ahc, 242 u_int targid_mask); 243 static int ahc_handle_target_cmd(struct ahc_softc *ahc, 244 struct target_cmd *cmd); 245 #endif 246 /************************* Sequencer Execution Control ************************/ 247 /* 248 * Restart the sequencer program from address zero 249 */ 250 void 251 ahc_restart(struct ahc_softc *ahc) 252 { 253 254 ahc_pause(ahc); 255 256 /* No more pending messages. */ 257 ahc_clear_msg_state(ahc); 258 259 ahc_outb(ahc, SCSISIGO, 0); /* De-assert BSY */ 260 ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* No message to send */ 261 ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); 262 ahc_outb(ahc, LASTPHASE, P_BUSFREE); 263 ahc_outb(ahc, SAVED_SCSIID, 0xFF); 264 ahc_outb(ahc, SAVED_LUN, 0xFF); 265 266 /* 267 * Ensure that the sequencer's idea of TQINPOS 268 * matches our own. The sequencer increments TQINPOS 269 * only after it sees a DMA complete and a reset could 270 * occur before the increment leaving the kernel to believe 271 * the command arrived but the sequencer to not. 272 */ 273 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); 274 275 /* Always allow reselection */ 276 ahc_outb(ahc, SCSISEQ, 277 ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); 278 if ((ahc->features & AHC_CMD_CHAN) != 0) { 279 /* Ensure that no DMA operations are in progress */ 280 ahc_outb(ahc, CCSCBCNT, 0); 281 ahc_outb(ahc, CCSGCTL, 0); 282 ahc_outb(ahc, CCSCBCTL, 0); 283 } 284 /* 285 * If we were in the process of DMA'ing SCB data into 286 * an SCB, replace that SCB on the free list. This prevents 287 * an SCB leak. 288 */ 289 if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) { 290 ahc_add_curscb_to_free_list(ahc); 291 ahc_outb(ahc, SEQ_FLAGS2, 292 ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA); 293 } 294 ahc_outb(ahc, MWI_RESIDUAL, 0); 295 ahc_outb(ahc, SEQCTL, ahc->seqctl); 296 ahc_outb(ahc, SEQADDR0, 0); 297 ahc_outb(ahc, SEQADDR1, 0); 298 ahc_unpause(ahc); 299 } 300 301 /************************* Input/Output Queues ********************************/ 302 void 303 ahc_run_qoutfifo(struct ahc_softc *ahc) 304 { 305 struct scb *scb; 306 u_int scb_index; 307 308 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); 309 while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) { 310 311 scb_index = ahc->qoutfifo[ahc->qoutfifonext]; 312 if ((ahc->qoutfifonext & 0x03) == 0x03) { 313 u_int modnext; 314 315 /* 316 * Clear 32bits of QOUTFIFO at a time 317 * so that we don't clobber an incoming 318 * byte DMA to the array on architectures 319 * that only support 32bit load and store 320 * operations. 321 */ 322 modnext = ahc->qoutfifonext & ~0x3; 323 *((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL; 324 ahc_dmamap_sync(ahc, ahc->shared_data_dmat, 325 ahc->shared_data_dmamap, 326 /*offset*/modnext, /*len*/4, 327 BUS_DMASYNC_PREREAD); 328 } 329 ahc->qoutfifonext++; 330 331 scb = ahc_lookup_scb(ahc, scb_index); 332 if (scb == NULL) { 333 printf("%s: WARNING no command for scb %d " 334 "(cmdcmplt)\nQOUTPOS = %d\n", 335 ahc_name(ahc), scb_index, 336 (ahc->qoutfifonext - 1) & 0xFF); 337 continue; 338 } 339 340 /* 341 * Save off the residual 342 * if there is one. 343 */ 344 ahc_update_residual(ahc, scb); 345 ahc_done(ahc, scb); 346 } 347 } 348 349 void 350 ahc_run_untagged_queues(struct ahc_softc *ahc) 351 { 352 int i; 353 354 for (i = 0; i < 16; i++) 355 ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]); 356 } 357 358 void 359 ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue) 360 { 361 struct scb *scb; 362 363 if (ahc->untagged_queue_lock != 0) 364 return; 365 366 if ((scb = TAILQ_FIRST(queue)) != NULL 367 && (scb->flags & SCB_ACTIVE) == 0) { 368 scb->flags |= SCB_ACTIVE; 369 ahc_queue_scb(ahc, scb); 370 } 371 } 372 373 /************************* Interrupt Handling *********************************/ 374 void 375 ahc_handle_brkadrint(struct ahc_softc *ahc) 376 { 377 /* 378 * We upset the sequencer :-( 379 * Lookup the error message 380 */ 381 int i; 382 int error; 383 384 error = ahc_inb(ahc, ERROR); 385 for (i = 0; error != 1 && i < num_errors; i++) 386 error >>= 1; 387 printf("%s: brkadrint, %s at seqaddr = 0x%x\n", 388 ahc_name(ahc), ahc_hard_errors[i].errmesg, 389 ahc_inb(ahc, SEQADDR0) | 390 (ahc_inb(ahc, SEQADDR1) << 8)); 391 392 ahc_dump_card_state(ahc); 393 394 /* Tell everyone that this HBA is no longer available */ 395 ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS, 396 CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, 397 CAM_NO_HBA); 398 399 /* Disable all interrupt sources by resetting the controller */ 400 ahc_shutdown(ahc); 401 } 402 403 void 404 ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat) 405 { 406 struct scb *scb; 407 struct ahc_devinfo devinfo; 408 409 ahc_fetch_devinfo(ahc, &devinfo); 410 411 /* 412 * Clear the upper byte that holds SEQINT status 413 * codes and clear the SEQINT bit. We will unpause 414 * the sequencer, if appropriate, after servicing 415 * the request. 416 */ 417 ahc_outb(ahc, CLRINT, CLRSEQINT); 418 switch (intstat & SEQINT_MASK) { 419 case BAD_STATUS: 420 { 421 u_int scb_index; 422 struct hardware_scb *hscb; 423 424 /* 425 * Set the default return value to 0 (don't 426 * send sense). The sense code will change 427 * this if needed. 428 */ 429 ahc_outb(ahc, RETURN_1, 0); 430 431 /* 432 * The sequencer will notify us when a command 433 * has an error that would be of interest to 434 * the kernel. This allows us to leave the sequencer 435 * running in the common case of command completes 436 * without error. The sequencer will already have 437 * dma'd the SCB back up to us, so we can reference 438 * the in kernel copy directly. 439 */ 440 scb_index = ahc_inb(ahc, SCB_TAG); 441 scb = ahc_lookup_scb(ahc, scb_index); 442 if (scb == NULL) { 443 ahc_print_devinfo(ahc, &devinfo); 444 printf("ahc_intr - referenced scb " 445 "not valid during seqint 0x%x scb(%d)\n", 446 intstat, scb_index); 447 ahc_dump_card_state(ahc); 448 panic("for safety"); 449 goto unpause; 450 } 451 452 hscb = scb->hscb; 453 454 /* Don't want to clobber the original sense code */ 455 if ((scb->flags & SCB_SENSE) != 0) { 456 /* 457 * Clear the SCB_SENSE Flag and have 458 * the sequencer do a normal command 459 * complete. 460 */ 461 scb->flags &= ~SCB_SENSE; 462 ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); 463 break; 464 } 465 ahc_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); 466 /* Freeze the queue until the client sees the error. */ 467 ahc_freeze_devq(ahc, scb); 468 ahc_freeze_scb(scb); 469 ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status); 470 switch (hscb->shared_data.status.scsi_status) { 471 case SCSI_STATUS_OK: 472 printf("%s: Interrupted for staus of 0???\n", 473 ahc_name(ahc)); 474 break; 475 case SCSI_STATUS_CMD_TERMINATED: 476 case SCSI_STATUS_CHECK_COND: 477 { 478 struct ahc_dma_seg *sg; 479 struct scsi_sense *sc; 480 struct ahc_initiator_tinfo *targ_info; 481 struct ahc_tmode_tstate *tstate; 482 struct ahc_transinfo *tinfo; 483 #ifdef AHC_DEBUG 484 if (ahc_debug & AHC_SHOW_SENSE) { 485 ahc_print_path(ahc, scb); 486 printf("SCB %d: requests Check Status\n", 487 scb->hscb->tag); 488 } 489 #endif 490 491 if (ahc_perform_autosense(scb) == 0) 492 break; 493 494 targ_info = ahc_fetch_transinfo(ahc, 495 devinfo.channel, 496 devinfo.our_scsiid, 497 devinfo.target, 498 &tstate); 499 tinfo = &targ_info->curr; 500 sg = scb->sg_list; 501 sc = (struct scsi_sense *)(&hscb->shared_data.cdb); 502 /* 503 * Save off the residual if there is one. 504 */ 505 ahc_update_residual(ahc, scb); 506 #ifdef AHC_DEBUG 507 if (ahc_debug & AHC_SHOW_SENSE) { 508 ahc_print_path(ahc, scb); 509 printf("Sending Sense\n"); 510 } 511 #endif 512 sg->addr = ahc_get_sense_bufaddr(ahc, scb); 513 sg->len = ahc_get_sense_bufsize(ahc, scb); 514 sg->len |= AHC_DMA_LAST_SEG; 515 516 /* Fixup byte order */ 517 sg->addr = ahc_htole32(sg->addr); 518 sg->len = ahc_htole32(sg->len); 519 520 sc->opcode = REQUEST_SENSE; 521 sc->byte2 = 0; 522 if (tinfo->protocol_version <= SCSI_REV_2 523 && SCB_GET_LUN(scb) < 8) 524 sc->byte2 = SCB_GET_LUN(scb) << 5; 525 sc->unused[0] = 0; 526 sc->unused[1] = 0; 527 sc->length = sg->len; 528 sc->control = 0; 529 530 /* 531 * We can't allow the target to disconnect. 532 * This will be an untagged transaction and 533 * having the target disconnect will make this 534 * transaction indestinguishable from outstanding 535 * tagged transactions. 536 */ 537 hscb->control = 0; 538 539 /* 540 * This request sense could be because the 541 * the device lost power or in some other 542 * way has lost our transfer negotiations. 543 * Renegotiate if appropriate. Unit attention 544 * errors will be reported before any data 545 * phases occur. 546 */ 547 if (ahc_get_residual(scb) 548 == ahc_get_transfer_length(scb)) { 549 ahc_update_neg_request(ahc, &devinfo, 550 tstate, targ_info, 551 AHC_NEG_IF_NON_ASYNC); 552 } 553 if (tstate->auto_negotiate & devinfo.target_mask) { 554 hscb->control |= MK_MESSAGE; 555 scb->flags &= ~SCB_NEGOTIATE; 556 scb->flags |= SCB_AUTO_NEGOTIATE; 557 } 558 hscb->cdb_len = sizeof(*sc); 559 hscb->dataptr = sg->addr; 560 hscb->datacnt = sg->len; 561 hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID; 562 hscb->sgptr = ahc_htole32(hscb->sgptr); 563 scb->sg_count = 1; 564 scb->flags |= SCB_SENSE; 565 ahc_qinfifo_requeue_tail(ahc, scb); 566 ahc_outb(ahc, RETURN_1, SEND_SENSE); 567 /* 568 * Ensure we have enough time to actually 569 * retrieve the sense. 570 */ 571 ahc_scb_timer_reset(scb, 5 * 1000000); 572 break; 573 } 574 default: 575 break; 576 } 577 break; 578 } 579 case NO_MATCH: 580 { 581 /* Ensure we don't leave the selection hardware on */ 582 ahc_outb(ahc, SCSISEQ, 583 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 584 585 printf("%s:%c:%d: no active SCB for reconnecting " 586 "target - issuing BUS DEVICE RESET\n", 587 ahc_name(ahc), devinfo.channel, devinfo.target); 588 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " 589 "ARG_1 == 0x%x ACCUM = 0x%x\n", 590 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), 591 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); 592 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " 593 "SINDEX == 0x%x\n", 594 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), 595 ahc_index_busy_tcl(ahc, 596 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), 597 ahc_inb(ahc, SAVED_LUN))), 598 ahc_inb(ahc, SINDEX)); 599 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " 600 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", 601 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), 602 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), 603 ahc_inb(ahc, SCB_CONTROL)); 604 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", 605 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); 606 printf("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0)); 607 printf("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL)); 608 ahc_dump_card_state(ahc); 609 ahc->msgout_buf[0] = MSG_BUS_DEV_RESET; 610 ahc->msgout_len = 1; 611 ahc->msgout_index = 0; 612 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 613 ahc_outb(ahc, MSG_OUT, HOST_MSG); 614 ahc_assert_atn(ahc); 615 break; 616 } 617 case SEND_REJECT: 618 { 619 u_int rejbyte = ahc_inb(ahc, ACCUM); 620 printf("%s:%c:%d: Warning - unknown message received from " 621 "target (0x%x). Rejecting\n", 622 ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte); 623 break; 624 } 625 case PROTO_VIOLATION: 626 { 627 ahc_handle_proto_violation(ahc); 628 break; 629 } 630 case IGN_WIDE_RES: 631 ahc_handle_ign_wide_residue(ahc, &devinfo); 632 break; 633 case PDATA_REINIT: 634 ahc_reinitialize_dataptrs(ahc); 635 break; 636 case BAD_PHASE: 637 { 638 u_int lastphase; 639 640 lastphase = ahc_inb(ahc, LASTPHASE); 641 printf("%s:%c:%d: unknown scsi bus phase %x, " 642 "lastphase = 0x%x. Attempting to continue\n", 643 ahc_name(ahc), devinfo.channel, devinfo.target, 644 lastphase, ahc_inb(ahc, SCSISIGI)); 645 break; 646 } 647 case MISSED_BUSFREE: 648 { 649 u_int lastphase; 650 651 lastphase = ahc_inb(ahc, LASTPHASE); 652 printf("%s:%c:%d: Missed busfree. " 653 "Lastphase = 0x%x, Curphase = 0x%x\n", 654 ahc_name(ahc), devinfo.channel, devinfo.target, 655 lastphase, ahc_inb(ahc, SCSISIGI)); 656 ahc_restart(ahc); 657 return; 658 } 659 case HOST_MSG_LOOP: 660 { 661 /* 662 * The sequencer has encountered a message phase 663 * that requires host assistance for completion. 664 * While handling the message phase(s), we will be 665 * notified by the sequencer after each byte is 666 * transfered so we can track bus phase changes. 667 * 668 * If this is the first time we've seen a HOST_MSG_LOOP 669 * interrupt, initialize the state of the host message 670 * loop. 671 */ 672 if (ahc->msg_type == MSG_TYPE_NONE) { 673 struct scb *scb; 674 u_int scb_index; 675 u_int bus_phase; 676 677 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 678 if (bus_phase != P_MESGIN 679 && bus_phase != P_MESGOUT) { 680 printf("ahc_intr: HOST_MSG_LOOP bad " 681 "phase 0x%x\n", 682 bus_phase); 683 /* 684 * Probably transitioned to bus free before 685 * we got here. Just punt the message. 686 */ 687 ahc_clear_intstat(ahc); 688 ahc_restart(ahc); 689 return; 690 } 691 692 scb_index = ahc_inb(ahc, SCB_TAG); 693 scb = ahc_lookup_scb(ahc, scb_index); 694 if (devinfo.role == ROLE_INITIATOR) { 695 if (scb == NULL) 696 panic("HOST_MSG_LOOP with " 697 "invalid SCB %x\n", scb_index); 698 699 if (bus_phase == P_MESGOUT) 700 ahc_setup_initiator_msgout(ahc, 701 &devinfo, 702 scb); 703 else { 704 ahc->msg_type = 705 MSG_TYPE_INITIATOR_MSGIN; 706 ahc->msgin_index = 0; 707 } 708 } 709 #ifdef AHC_TARGET_MODE 710 else { 711 if (bus_phase == P_MESGOUT) { 712 ahc->msg_type = 713 MSG_TYPE_TARGET_MSGOUT; 714 ahc->msgin_index = 0; 715 } 716 else 717 ahc_setup_target_msgin(ahc, 718 &devinfo, 719 scb); 720 } 721 #endif 722 } 723 724 ahc_handle_message_phase(ahc); 725 break; 726 } 727 case PERR_DETECTED: 728 { 729 /* 730 * If we've cleared the parity error interrupt 731 * but the sequencer still believes that SCSIPERR 732 * is true, it must be that the parity error is 733 * for the currently presented byte on the bus, 734 * and we are not in a phase (data-in) where we will 735 * eventually ack this byte. Ack the byte and 736 * throw it away in the hope that the target will 737 * take us to message out to deliver the appropriate 738 * error message. 739 */ 740 if ((intstat & SCSIINT) == 0 741 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) { 742 743 if ((ahc->features & AHC_DT) == 0) { 744 u_int curphase; 745 746 /* 747 * The hardware will only let you ack bytes 748 * if the expected phase in SCSISIGO matches 749 * the current phase. Make sure this is 750 * currently the case. 751 */ 752 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 753 ahc_outb(ahc, LASTPHASE, curphase); 754 ahc_outb(ahc, SCSISIGO, curphase); 755 } 756 if ((ahc_inb(ahc, SCSISIGI) & (CDI|MSGI)) == 0) { 757 int wait; 758 759 /* 760 * In a data phase. Faster to bitbucket 761 * the data than to individually ack each 762 * byte. This is also the only strategy 763 * that will work with AUTOACK enabled. 764 */ 765 ahc_outb(ahc, SXFRCTL1, 766 ahc_inb(ahc, SXFRCTL1) | BITBUCKET); 767 wait = 5000; 768 while (--wait != 0) { 769 if ((ahc_inb(ahc, SCSISIGI) 770 & (CDI|MSGI)) != 0) 771 break; 772 ahc_delay(100); 773 } 774 ahc_outb(ahc, SXFRCTL1, 775 ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); 776 if (wait == 0) { 777 struct scb *scb; 778 u_int scb_index; 779 780 ahc_print_devinfo(ahc, &devinfo); 781 printf("Unable to clear parity error. " 782 "Resetting bus.\n"); 783 scb_index = ahc_inb(ahc, SCB_TAG); 784 scb = ahc_lookup_scb(ahc, scb_index); 785 if (scb != NULL) 786 ahc_set_transaction_status(scb, 787 CAM_UNCOR_PARITY); 788 ahc_reset_channel(ahc, devinfo.channel, 789 /*init reset*/TRUE); 790 } 791 } else { 792 ahc_inb(ahc, SCSIDATL); 793 } 794 } 795 break; 796 } 797 case DATA_OVERRUN: 798 { 799 /* 800 * When the sequencer detects an overrun, it 801 * places the controller in "BITBUCKET" mode 802 * and allows the target to complete its transfer. 803 * Unfortunately, none of the counters get updated 804 * when the controller is in this mode, so we have 805 * no way of knowing how large the overrun was. 806 */ 807 u_int scbindex = ahc_inb(ahc, SCB_TAG); 808 u_int lastphase = ahc_inb(ahc, LASTPHASE); 809 u_int i; 810 811 scb = ahc_lookup_scb(ahc, scbindex); 812 for (i = 0; i < num_phases; i++) { 813 if (lastphase == ahc_phase_table[i].phase) 814 break; 815 } 816 ahc_print_path(ahc, scb); 817 printf("data overrun detected %s." 818 " Tag == 0x%x.\n", 819 ahc_phase_table[i].phasemsg, 820 scb->hscb->tag); 821 ahc_print_path(ahc, scb); 822 printf("%s seen Data Phase. Length = %ld. NumSGs = %d.\n", 823 ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", 824 ahc_get_transfer_length(scb), scb->sg_count); 825 if (scb->sg_count > 0) { 826 for (i = 0; i < scb->sg_count; i++) { 827 828 printf("sg[%d] - Addr 0x%x%x : Length %d\n", 829 i, 830 (ahc_le32toh(scb->sg_list[i].len) >> 24 831 & SG_HIGH_ADDR_BITS), 832 ahc_le32toh(scb->sg_list[i].addr), 833 ahc_le32toh(scb->sg_list[i].len) 834 & AHC_SG_LEN_MASK); 835 } 836 } 837 /* 838 * Set this and it will take effect when the 839 * target does a command complete. 840 */ 841 ahc_freeze_devq(ahc, scb); 842 if ((scb->flags & SCB_SENSE) == 0) { 843 ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR); 844 } else { 845 scb->flags &= ~SCB_SENSE; 846 ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); 847 } 848 ahc_freeze_scb(scb); 849 850 if ((ahc->features & AHC_ULTRA2) != 0) { 851 /* 852 * Clear the channel in case we return 853 * to data phase later. 854 */ 855 ahc_outb(ahc, SXFRCTL0, 856 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); 857 ahc_outb(ahc, SXFRCTL0, 858 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); 859 } 860 if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { 861 u_int dscommand1; 862 863 /* Ensure HHADDR is 0 for future DMA operations. */ 864 dscommand1 = ahc_inb(ahc, DSCOMMAND1); 865 ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); 866 ahc_outb(ahc, HADDR, 0); 867 ahc_outb(ahc, DSCOMMAND1, dscommand1); 868 } 869 break; 870 } 871 case MKMSG_FAILED: 872 { 873 u_int scbindex; 874 875 printf("%s:%c:%d:%d: Attempt to issue message failed\n", 876 ahc_name(ahc), devinfo.channel, devinfo.target, 877 devinfo.lun); 878 scbindex = ahc_inb(ahc, SCB_TAG); 879 scb = ahc_lookup_scb(ahc, scbindex); 880 if (scb != NULL 881 && (scb->flags & SCB_RECOVERY_SCB) != 0) 882 /* 883 * Ensure that we didn't put a second instance of this 884 * SCB into the QINFIFO. 885 */ 886 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), 887 SCB_GET_CHANNEL(ahc, scb), 888 SCB_GET_LUN(scb), scb->hscb->tag, 889 ROLE_INITIATOR, /*status*/0, 890 SEARCH_REMOVE); 891 break; 892 } 893 case NO_FREE_SCB: 894 { 895 printf("%s: No free or disconnected SCBs\n", ahc_name(ahc)); 896 ahc_dump_card_state(ahc); 897 panic("for safety"); 898 break; 899 } 900 case SCB_MISMATCH: 901 { 902 u_int scbptr; 903 904 scbptr = ahc_inb(ahc, SCBPTR); 905 printf("Bogus TAG after DMA. SCBPTR %d, tag %d, our tag %d\n", 906 scbptr, ahc_inb(ahc, ARG_1), 907 ahc->scb_data->hscbs[scbptr].tag); 908 ahc_dump_card_state(ahc); 909 panic("for saftey"); 910 break; 911 } 912 case OUT_OF_RANGE: 913 { 914 printf("%s: BTT calculation out of range\n", ahc_name(ahc)); 915 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " 916 "ARG_1 == 0x%x ACCUM = 0x%x\n", 917 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), 918 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); 919 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " 920 "SINDEX == 0x%x\n, A == 0x%x\n", 921 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), 922 ahc_index_busy_tcl(ahc, 923 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), 924 ahc_inb(ahc, SAVED_LUN))), 925 ahc_inb(ahc, SINDEX), 926 ahc_inb(ahc, ACCUM)); 927 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " 928 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", 929 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), 930 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), 931 ahc_inb(ahc, SCB_CONTROL)); 932 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", 933 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); 934 ahc_dump_card_state(ahc); 935 panic("for safety"); 936 break; 937 } 938 default: 939 printf("ahc_intr: seqint, " 940 "intstat == 0x%x, scsisigi = 0x%x\n", 941 intstat, ahc_inb(ahc, SCSISIGI)); 942 break; 943 } 944 unpause: 945 /* 946 * The sequencer is paused immediately on 947 * a SEQINT, so we should restart it when 948 * we're done. 949 */ 950 ahc_unpause(ahc); 951 } 952 953 void 954 ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat) 955 { 956 u_int scb_index; 957 u_int status0; 958 u_int status; 959 struct scb *scb; 960 char cur_channel; 961 char intr_channel; 962 963 if ((ahc->features & AHC_TWIN) != 0 964 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0)) 965 cur_channel = 'B'; 966 else 967 cur_channel = 'A'; 968 intr_channel = cur_channel; 969 970 if ((ahc->features & AHC_ULTRA2) != 0) 971 status0 = ahc_inb(ahc, SSTAT0) & IOERR; 972 else 973 status0 = 0; 974 status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); 975 if (status == 0 && status0 == 0) { 976 if ((ahc->features & AHC_TWIN) != 0) { 977 /* Try the other channel */ 978 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 979 status = ahc_inb(ahc, SSTAT1) 980 & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); 981 intr_channel = (cur_channel == 'A') ? 'B' : 'A'; 982 } 983 if (status == 0) { 984 printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc)); 985 ahc_outb(ahc, CLRINT, CLRSCSIINT); 986 ahc_unpause(ahc); 987 return; 988 } 989 } 990 991 /* Make sure the sequencer is in a safe location. */ 992 ahc_clear_critical_section(ahc); 993 994 scb_index = ahc_inb(ahc, SCB_TAG); 995 scb = ahc_lookup_scb(ahc, scb_index); 996 if (scb != NULL 997 && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) 998 scb = NULL; 999 1000 if ((ahc->features & AHC_ULTRA2) != 0 1001 && (status0 & IOERR) != 0) { 1002 int now_lvd; 1003 1004 now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40; 1005 printf("%s: Transceiver State Has Changed to %s mode\n", 1006 ahc_name(ahc), now_lvd ? "LVD" : "SE"); 1007 ahc_outb(ahc, CLRSINT0, CLRIOERR); 1008 /* 1009 * When transitioning to SE mode, the reset line 1010 * glitches, triggering an arbitration bug in some 1011 * Ultra2 controllers. This bug is cleared when we 1012 * assert the reset line. Since a reset glitch has 1013 * already occurred with this transition and a 1014 * transceiver state change is handled just like 1015 * a bus reset anyway, asserting the reset line 1016 * ourselves is safe. 1017 */ 1018 ahc_reset_channel(ahc, intr_channel, 1019 /*Initiate Reset*/now_lvd == 0); 1020 } else if ((status & SCSIRSTI) != 0) { 1021 printf("%s: Someone reset channel %c\n", 1022 ahc_name(ahc), intr_channel); 1023 if (intr_channel != cur_channel) 1024 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 1025 ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE); 1026 } else if ((status & SCSIPERR) != 0) { 1027 /* 1028 * Determine the bus phase and queue an appropriate message. 1029 * SCSIPERR is latched true as soon as a parity error 1030 * occurs. If the sequencer acked the transfer that 1031 * caused the parity error and the currently presented 1032 * transfer on the bus has correct parity, SCSIPERR will 1033 * be cleared by CLRSCSIPERR. Use this to determine if 1034 * we should look at the last phase the sequencer recorded, 1035 * or the current phase presented on the bus. 1036 */ 1037 struct ahc_devinfo devinfo; 1038 u_int mesg_out; 1039 u_int curphase; 1040 u_int errorphase; 1041 u_int lastphase; 1042 u_int scsirate; 1043 u_int i; 1044 u_int sstat2; 1045 int silent; 1046 1047 lastphase = ahc_inb(ahc, LASTPHASE); 1048 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 1049 sstat2 = ahc_inb(ahc, SSTAT2); 1050 ahc_outb(ahc, CLRSINT1, CLRSCSIPERR); 1051 /* 1052 * For all phases save DATA, the sequencer won't 1053 * automatically ack a byte that has a parity error 1054 * in it. So the only way that the current phase 1055 * could be 'data-in' is if the parity error is for 1056 * an already acked byte in the data phase. During 1057 * synchronous data-in transfers, we may actually 1058 * ack bytes before latching the current phase in 1059 * LASTPHASE, leading to the discrepancy between 1060 * curphase and lastphase. 1061 */ 1062 if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0 1063 || curphase == P_DATAIN || curphase == P_DATAIN_DT) 1064 errorphase = curphase; 1065 else 1066 errorphase = lastphase; 1067 1068 for (i = 0; i < num_phases; i++) { 1069 if (errorphase == ahc_phase_table[i].phase) 1070 break; 1071 } 1072 mesg_out = ahc_phase_table[i].mesg_out; 1073 silent = FALSE; 1074 if (scb != NULL) { 1075 if (SCB_IS_SILENT(scb)) 1076 silent = TRUE; 1077 else 1078 ahc_print_path(ahc, scb); 1079 scb->flags |= SCB_TRANSMISSION_ERROR; 1080 } else 1081 printf("%s:%c:%d: ", ahc_name(ahc), intr_channel, 1082 SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID))); 1083 scsirate = ahc_inb(ahc, SCSIRATE); 1084 if (silent == FALSE) { 1085 printf("parity error detected %s. " 1086 "SEQADDR(0x%x) SCSIRATE(0x%x)\n", 1087 ahc_phase_table[i].phasemsg, 1088 ahc_inw(ahc, SEQADDR0), 1089 scsirate); 1090 if ((ahc->features & AHC_DT) != 0) { 1091 if ((sstat2 & CRCVALERR) != 0) 1092 printf("\tCRC Value Mismatch\n"); 1093 if ((sstat2 & CRCENDERR) != 0) 1094 printf("\tNo terminal CRC packet " 1095 "recevied\n"); 1096 if ((sstat2 & CRCREQERR) != 0) 1097 printf("\tIllegal CRC packet " 1098 "request\n"); 1099 if ((sstat2 & DUAL_EDGE_ERR) != 0) 1100 printf("\tUnexpected %sDT Data Phase\n", 1101 (scsirate & SINGLE_EDGE) 1102 ? "" : "non-"); 1103 } 1104 } 1105 1106 if ((ahc->features & AHC_DT) != 0 1107 && (sstat2 & DUAL_EDGE_ERR) != 0) { 1108 /* 1109 * This error applies regardless of 1110 * data direction, so ignore the value 1111 * in the phase table. 1112 */ 1113 mesg_out = MSG_INITIATOR_DET_ERR; 1114 } 1115 1116 /* 1117 * We've set the hardware to assert ATN if we 1118 * get a parity error on "in" phases, so all we 1119 * need to do is stuff the message buffer with 1120 * the appropriate message. "In" phases have set 1121 * mesg_out to something other than MSG_NOP. 1122 */ 1123 if (mesg_out != MSG_NOOP) { 1124 if (ahc->msg_type != MSG_TYPE_NONE) 1125 ahc->send_msg_perror = TRUE; 1126 else 1127 ahc_outb(ahc, MSG_OUT, mesg_out); 1128 } 1129 /* 1130 * Force a renegotiation with this target just in 1131 * case we are out of sync for some external reason 1132 * unknown (or unreported) by the target. 1133 */ 1134 ahc_fetch_devinfo(ahc, &devinfo); 1135 ahc_force_renegotiation(ahc, &devinfo); 1136 1137 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1138 ahc_unpause(ahc); 1139 } else if ((status & SELTO) != 0) { 1140 u_int scbptr; 1141 1142 /* Stop the selection */ 1143 ahc_outb(ahc, SCSISEQ, 0); 1144 1145 /* No more pending messages */ 1146 ahc_clear_msg_state(ahc); 1147 1148 /* Clear interrupt state */ 1149 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 1150 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); 1151 1152 /* 1153 * Although the driver does not care about the 1154 * 'Selection in Progress' status bit, the busy 1155 * LED does. SELINGO is only cleared by a sucessfull 1156 * selection, so we must manually clear it to insure 1157 * the LED turns off just incase no future successful 1158 * selections occur (e.g. no devices on the bus). 1159 */ 1160 ahc_outb(ahc, CLRSINT0, CLRSELINGO); 1161 1162 scbptr = ahc_inb(ahc, WAITING_SCBH); 1163 ahc_outb(ahc, SCBPTR, scbptr); 1164 scb_index = ahc_inb(ahc, SCB_TAG); 1165 1166 scb = ahc_lookup_scb(ahc, scb_index); 1167 if (scb == NULL) { 1168 printf("%s: ahc_intr - referenced scb not " 1169 "valid during SELTO scb(%d, %d)\n", 1170 ahc_name(ahc), scbptr, scb_index); 1171 ahc_dump_card_state(ahc); 1172 } else { 1173 struct ahc_devinfo devinfo; 1174 #ifdef AHC_DEBUG 1175 if ((ahc_debug & AHC_SHOW_SELTO) != 0) { 1176 ahc_print_path(ahc, scb); 1177 printf("Saw Selection Timeout for SCB 0x%x\n", 1178 scb_index); 1179 } 1180 #endif 1181 /* 1182 * Force a renegotiation with this target just in 1183 * case the cable was pulled and will later be 1184 * re-attached. The target may forget its negotiation 1185 * settings with us should it attempt to reselect 1186 * during the interruption. The target will not issue 1187 * a unit attention in this case, so we must always 1188 * renegotiate. 1189 */ 1190 ahc_scb_devinfo(ahc, &devinfo, scb); 1191 ahc_force_renegotiation(ahc, &devinfo); 1192 ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT); 1193 ahc_freeze_devq(ahc, scb); 1194 } 1195 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1196 ahc_restart(ahc); 1197 } else if ((status & BUSFREE) != 0 1198 && (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) { 1199 struct ahc_devinfo devinfo; 1200 u_int lastphase; 1201 u_int saved_scsiid; 1202 u_int saved_lun; 1203 u_int target; 1204 u_int initiator_role_id; 1205 char channel; 1206 int printerror; 1207 1208 /* 1209 * Clear our selection hardware as soon as possible. 1210 * We may have an entry in the waiting Q for this target, 1211 * that is affected by this busfree and we don't want to 1212 * go about selecting the target while we handle the event. 1213 */ 1214 ahc_outb(ahc, SCSISEQ, 1215 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 1216 1217 /* 1218 * Disable busfree interrupts and clear the busfree 1219 * interrupt status. We do this here so that several 1220 * bus transactions occur prior to clearing the SCSIINT 1221 * latch. It can take a bit for the clearing to take effect. 1222 */ 1223 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 1224 ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR); 1225 1226 /* 1227 * Look at what phase we were last in. 1228 * If its message out, chances are pretty good 1229 * that the busfree was in response to one of 1230 * our abort requests. 1231 */ 1232 lastphase = ahc_inb(ahc, LASTPHASE); 1233 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); 1234 saved_lun = ahc_inb(ahc, SAVED_LUN); 1235 target = SCSIID_TARGET(ahc, saved_scsiid); 1236 initiator_role_id = SCSIID_OUR_ID(saved_scsiid); 1237 channel = SCSIID_CHANNEL(ahc, saved_scsiid); 1238 ahc_compile_devinfo(&devinfo, initiator_role_id, 1239 target, saved_lun, channel, ROLE_INITIATOR); 1240 printerror = 1; 1241 1242 if (lastphase == P_MESGOUT) { 1243 u_int tag; 1244 1245 tag = SCB_LIST_NULL; 1246 if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE) 1247 || ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) { 1248 if (ahc->msgout_buf[ahc->msgout_index - 1] 1249 == MSG_ABORT_TAG) 1250 tag = scb->hscb->tag; 1251 ahc_print_path(ahc, scb); 1252 printf("SCB %d - Abort%s Completed.\n", 1253 scb->hscb->tag, tag == SCB_LIST_NULL ? 1254 "" : " Tag"); 1255 ahc_abort_scbs(ahc, target, channel, 1256 saved_lun, tag, 1257 ROLE_INITIATOR, 1258 CAM_REQ_ABORTED); 1259 printerror = 0; 1260 } else if (ahc_sent_msg(ahc, AHCMSG_1B, 1261 MSG_BUS_DEV_RESET, TRUE)) { 1262 #ifdef __FreeBSD__ 1263 /* 1264 * Don't mark the user's request for this BDR 1265 * as completing with CAM_BDR_SENT. CAM3 1266 * specifies CAM_REQ_CMP. 1267 */ 1268 if (scb != NULL 1269 && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV 1270 && ahc_match_scb(ahc, scb, target, channel, 1271 CAM_LUN_WILDCARD, 1272 SCB_LIST_NULL, 1273 ROLE_INITIATOR)) { 1274 ahc_set_transaction_status(scb, CAM_REQ_CMP); 1275 } 1276 #endif 1277 ahc_compile_devinfo(&devinfo, 1278 initiator_role_id, 1279 target, 1280 CAM_LUN_WILDCARD, 1281 channel, 1282 ROLE_INITIATOR); 1283 ahc_handle_devreset(ahc, &devinfo, 1284 CAM_BDR_SENT, 1285 "Bus Device Reset", 1286 /*verbose_level*/0); 1287 printerror = 0; 1288 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, 1289 MSG_EXT_PPR, FALSE)) { 1290 struct ahc_initiator_tinfo *tinfo; 1291 struct ahc_tmode_tstate *tstate; 1292 1293 /* 1294 * PPR Rejected. Try non-ppr negotiation 1295 * and retry command. 1296 */ 1297 tinfo = ahc_fetch_transinfo(ahc, 1298 devinfo.channel, 1299 devinfo.our_scsiid, 1300 devinfo.target, 1301 &tstate); 1302 tinfo->curr.transport_version = 2; 1303 tinfo->goal.transport_version = 2; 1304 tinfo->goal.ppr_options = 0; 1305 ahc_qinfifo_requeue_tail(ahc, scb); 1306 printerror = 0; 1307 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, 1308 MSG_EXT_WDTR, FALSE)) { 1309 /* 1310 * Negotiation Rejected. Go-narrow and 1311 * retry command. 1312 */ 1313 ahc_set_width(ahc, &devinfo, 1314 MSG_EXT_WDTR_BUS_8_BIT, 1315 AHC_TRANS_CUR|AHC_TRANS_GOAL, 1316 /*paused*/TRUE); 1317 ahc_qinfifo_requeue_tail(ahc, scb); 1318 printerror = 0; 1319 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, 1320 MSG_EXT_SDTR, FALSE)) { 1321 /* 1322 * Negotiation Rejected. Go-async and 1323 * retry command. 1324 */ 1325 ahc_set_syncrate(ahc, &devinfo, 1326 /*syncrate*/NULL, 1327 /*period*/0, /*offset*/0, 1328 /*ppr_options*/0, 1329 AHC_TRANS_CUR|AHC_TRANS_GOAL, 1330 /*paused*/TRUE); 1331 ahc_qinfifo_requeue_tail(ahc, scb); 1332 printerror = 0; 1333 } 1334 } 1335 if (printerror != 0) { 1336 u_int i; 1337 1338 if (scb != NULL) { 1339 u_int tag; 1340 1341 if ((scb->hscb->control & TAG_ENB) != 0) 1342 tag = scb->hscb->tag; 1343 else 1344 tag = SCB_LIST_NULL; 1345 ahc_print_path(ahc, scb); 1346 ahc_abort_scbs(ahc, target, channel, 1347 SCB_GET_LUN(scb), tag, 1348 ROLE_INITIATOR, 1349 CAM_UNEXP_BUSFREE); 1350 } else { 1351 /* 1352 * We had not fully identified this connection, 1353 * so we cannot abort anything. 1354 */ 1355 printf("%s: ", ahc_name(ahc)); 1356 } 1357 for (i = 0; i < num_phases; i++) { 1358 if (lastphase == ahc_phase_table[i].phase) 1359 break; 1360 } 1361 if (lastphase != P_BUSFREE) { 1362 /* 1363 * Renegotiate with this device at the 1364 * next oportunity just in case this busfree 1365 * is due to a negotiation mismatch with the 1366 * device. 1367 */ 1368 ahc_force_renegotiation(ahc, &devinfo); 1369 } 1370 printf("Unexpected busfree %s\n" 1371 "SEQADDR == 0x%x\n", 1372 ahc_phase_table[i].phasemsg, 1373 ahc_inb(ahc, SEQADDR0) 1374 | (ahc_inb(ahc, SEQADDR1) << 8)); 1375 } 1376 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1377 ahc_restart(ahc); 1378 } else { 1379 printf("%s: Missing case in ahc_handle_scsiint. status = %x\n", 1380 ahc_name(ahc), status); 1381 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1382 } 1383 } 1384 1385 /* 1386 * Force renegotiation to occur the next time we initiate 1387 * a command to the current device. 1388 */ 1389 static void 1390 ahc_force_renegotiation(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 1391 { 1392 struct ahc_initiator_tinfo *targ_info; 1393 struct ahc_tmode_tstate *tstate; 1394 1395 targ_info = ahc_fetch_transinfo(ahc, 1396 devinfo->channel, 1397 devinfo->our_scsiid, 1398 devinfo->target, 1399 &tstate); 1400 ahc_update_neg_request(ahc, devinfo, tstate, 1401 targ_info, AHC_NEG_IF_NON_ASYNC); 1402 } 1403 1404 #define AHC_MAX_STEPS 2000 1405 void 1406 ahc_clear_critical_section(struct ahc_softc *ahc) 1407 { 1408 int stepping; 1409 int steps; 1410 u_int simode0; 1411 u_int simode1; 1412 1413 if (ahc->num_critical_sections == 0) 1414 return; 1415 1416 stepping = FALSE; 1417 steps = 0; 1418 simode0 = 0; 1419 simode1 = 0; 1420 for (;;) { 1421 struct cs *cs; 1422 u_int seqaddr; 1423 u_int i; 1424 1425 seqaddr = ahc_inb(ahc, SEQADDR0) 1426 | (ahc_inb(ahc, SEQADDR1) << 8); 1427 1428 /* 1429 * Seqaddr represents the next instruction to execute, 1430 * so we are really executing the instruction just 1431 * before it. 1432 */ 1433 if (seqaddr != 0) 1434 seqaddr -= 1; 1435 cs = ahc->critical_sections; 1436 for (i = 0; i < ahc->num_critical_sections; i++, cs++) { 1437 1438 if (cs->begin < seqaddr && cs->end >= seqaddr) 1439 break; 1440 } 1441 1442 if (i == ahc->num_critical_sections) 1443 break; 1444 1445 if (steps > AHC_MAX_STEPS) { 1446 printf("%s: Infinite loop in critical section\n", 1447 ahc_name(ahc)); 1448 ahc_dump_card_state(ahc); 1449 panic("critical section loop"); 1450 } 1451 1452 steps++; 1453 if (stepping == FALSE) { 1454 1455 /* 1456 * Disable all interrupt sources so that the 1457 * sequencer will not be stuck by a pausing 1458 * interrupt condition while we attempt to 1459 * leave a critical section. 1460 */ 1461 simode0 = ahc_inb(ahc, SIMODE0); 1462 ahc_outb(ahc, SIMODE0, 0); 1463 simode1 = ahc_inb(ahc, SIMODE1); 1464 if ((ahc->features & AHC_DT) != 0) 1465 /* 1466 * On DT class controllers, we 1467 * use the enhanced busfree logic. 1468 * Unfortunately we cannot re-enable 1469 * busfree detection within the 1470 * current connection, so we must 1471 * leave it on while single stepping. 1472 */ 1473 ahc_outb(ahc, SIMODE1, simode1 & ENBUSFREE); 1474 else 1475 ahc_outb(ahc, SIMODE1, 0); 1476 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1477 ahc_outb(ahc, SEQCTL, ahc->seqctl | STEP); 1478 stepping = TRUE; 1479 } 1480 if ((ahc->features & AHC_DT) != 0) { 1481 ahc_outb(ahc, CLRSINT1, CLRBUSFREE); 1482 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1483 } 1484 ahc_outb(ahc, HCNTRL, ahc->unpause); 1485 while (!ahc_is_paused(ahc)) 1486 ahc_delay(200); 1487 } 1488 if (stepping) { 1489 ahc_outb(ahc, SIMODE0, simode0); 1490 ahc_outb(ahc, SIMODE1, simode1); 1491 ahc_outb(ahc, SEQCTL, ahc->seqctl); 1492 } 1493 } 1494 1495 /* 1496 * Clear any pending interrupt status. 1497 */ 1498 void 1499 ahc_clear_intstat(struct ahc_softc *ahc) 1500 { 1501 /* Clear any interrupt conditions this may have caused */ 1502 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI 1503 |CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG| 1504 CLRREQINIT); 1505 ahc_flush_device_writes(ahc); 1506 ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO); 1507 ahc_flush_device_writes(ahc); 1508 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1509 ahc_flush_device_writes(ahc); 1510 } 1511 1512 /**************************** Debugging Routines ******************************/ 1513 #ifdef AHC_DEBUG 1514 uint32_t ahc_debug = AHC_DEBUG_OPTS; 1515 #endif 1516 1517 void 1518 ahc_print_scb(struct scb *scb) 1519 { 1520 int i; 1521 1522 struct hardware_scb *hscb = scb->hscb; 1523 1524 printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", 1525 (void *)scb, 1526 hscb->control, 1527 hscb->scsiid, 1528 hscb->lun, 1529 hscb->cdb_len); 1530 printf("Shared Data: "); 1531 for (i = 0; i < sizeof(hscb->shared_data.cdb); i++) 1532 printf("%#02x", hscb->shared_data.cdb[i]); 1533 printf(" dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n", 1534 ahc_le32toh(hscb->dataptr), 1535 ahc_le32toh(hscb->datacnt), 1536 ahc_le32toh(hscb->sgptr), 1537 hscb->tag); 1538 if (scb->sg_count > 0) { 1539 for (i = 0; i < scb->sg_count; i++) { 1540 printf("sg[%d] - Addr 0x%x%x : Length %d\n", 1541 i, 1542 (ahc_le32toh(scb->sg_list[i].len) >> 24 1543 & SG_HIGH_ADDR_BITS), 1544 ahc_le32toh(scb->sg_list[i].addr), 1545 ahc_le32toh(scb->sg_list[i].len)); 1546 } 1547 } 1548 } 1549 1550 /************************* Transfer Negotiation *******************************/ 1551 /* 1552 * Allocate per target mode instance (ID we respond to as a target) 1553 * transfer negotiation data structures. 1554 */ 1555 static struct ahc_tmode_tstate * 1556 ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel) 1557 { 1558 struct ahc_tmode_tstate *master_tstate; 1559 struct ahc_tmode_tstate *tstate; 1560 int i; 1561 1562 master_tstate = ahc->enabled_targets[ahc->our_id]; 1563 if (channel == 'B') { 1564 scsi_id += 8; 1565 master_tstate = ahc->enabled_targets[ahc->our_id_b + 8]; 1566 } 1567 if (ahc->enabled_targets[scsi_id] != NULL 1568 && ahc->enabled_targets[scsi_id] != master_tstate) 1569 panic("%s: ahc_alloc_tstate - Target already allocated", 1570 ahc_name(ahc)); 1571 tstate = (struct ahc_tmode_tstate*)malloc(sizeof(*tstate), 1572 M_DEVBUF, M_NOWAIT); 1573 if (tstate == NULL) 1574 return (NULL); 1575 1576 /* 1577 * If we have allocated a master tstate, copy user settings from 1578 * the master tstate (taken from SRAM or the EEPROM) for this 1579 * channel, but reset our current and goal settings to async/narrow 1580 * until an initiator talks to us. 1581 */ 1582 if (master_tstate != NULL) { 1583 memcpy(tstate, master_tstate, sizeof(*tstate)); 1584 memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); 1585 tstate->ultraenb = 0; 1586 for (i = 0; i < AHC_NUM_TARGETS; i++) { 1587 memset(&tstate->transinfo[i].curr, 0, 1588 sizeof(tstate->transinfo[i].curr)); 1589 memset(&tstate->transinfo[i].goal, 0, 1590 sizeof(tstate->transinfo[i].goal)); 1591 } 1592 } else 1593 memset(tstate, 0, sizeof(*tstate)); 1594 ahc->enabled_targets[scsi_id] = tstate; 1595 return (tstate); 1596 } 1597 1598 #ifdef AHC_TARGET_MODE 1599 /* 1600 * Free per target mode instance (ID we respond to as a target) 1601 * transfer negotiation data structures. 1602 */ 1603 static void 1604 ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force) 1605 { 1606 struct ahc_tmode_tstate *tstate; 1607 1608 /* 1609 * Don't clean up our "master" tstate. 1610 * It has our default user settings. 1611 */ 1612 if (((channel == 'B' && scsi_id == ahc->our_id_b) 1613 || (channel == 'A' && scsi_id == ahc->our_id)) 1614 && force == FALSE) 1615 return; 1616 1617 if (channel == 'B') 1618 scsi_id += 8; 1619 tstate = ahc->enabled_targets[scsi_id]; 1620 if (tstate != NULL) 1621 free(tstate, M_DEVBUF); 1622 ahc->enabled_targets[scsi_id] = NULL; 1623 } 1624 #endif 1625 1626 /* 1627 * Called when we have an active connection to a target on the bus, 1628 * this function finds the nearest syncrate to the input period limited 1629 * by the capabilities of the bus connectivity of and sync settings for 1630 * the target. 1631 */ 1632 struct ahc_syncrate * 1633 ahc_devlimited_syncrate(struct ahc_softc *ahc, 1634 struct ahc_initiator_tinfo *tinfo, 1635 u_int *period, u_int *ppr_options, role_t role) 1636 { 1637 struct ahc_transinfo *transinfo; 1638 u_int maxsync; 1639 1640 if ((ahc->features & AHC_ULTRA2) != 0) { 1641 if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0 1642 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) { 1643 maxsync = AHC_SYNCRATE_DT; 1644 } else { 1645 maxsync = AHC_SYNCRATE_ULTRA; 1646 /* Can't do DT on an SE bus */ 1647 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1648 } 1649 } else if ((ahc->features & AHC_ULTRA) != 0) { 1650 maxsync = AHC_SYNCRATE_ULTRA; 1651 } else { 1652 maxsync = AHC_SYNCRATE_FAST; 1653 } 1654 /* 1655 * Never allow a value higher than our current goal 1656 * period otherwise we may allow a target initiated 1657 * negotiation to go above the limit as set by the 1658 * user. In the case of an initiator initiated 1659 * sync negotiation, we limit based on the user 1660 * setting. This allows the system to still accept 1661 * incoming negotiations even if target initiated 1662 * negotiation is not performed. 1663 */ 1664 if (role == ROLE_TARGET) 1665 transinfo = &tinfo->user; 1666 else 1667 transinfo = &tinfo->goal; 1668 *ppr_options &= transinfo->ppr_options; 1669 if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) { 1670 maxsync = MAX(maxsync, AHC_SYNCRATE_ULTRA2); 1671 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1672 } 1673 if (transinfo->period == 0) { 1674 *period = 0; 1675 *ppr_options = 0; 1676 return (NULL); 1677 } 1678 *period = MAX(*period, transinfo->period); 1679 return (ahc_find_syncrate(ahc, period, ppr_options, maxsync)); 1680 } 1681 1682 /* 1683 * Look up the valid period to SCSIRATE conversion in our table. 1684 * Return the period and offset that should be sent to the target 1685 * if this was the beginning of an SDTR. 1686 */ 1687 struct ahc_syncrate * 1688 ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, 1689 u_int *ppr_options, u_int maxsync) 1690 { 1691 struct ahc_syncrate *syncrate; 1692 1693 if ((ahc->features & AHC_DT) == 0) 1694 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1695 1696 /* Skip all DT only entries if DT is not available */ 1697 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 1698 && maxsync < AHC_SYNCRATE_ULTRA2) 1699 maxsync = AHC_SYNCRATE_ULTRA2; 1700 1701 for (syncrate = &ahc_syncrates[maxsync]; 1702 syncrate->rate != NULL; 1703 syncrate++) { 1704 1705 /* 1706 * The Ultra2 table doesn't go as low 1707 * as for the Fast/Ultra cards. 1708 */ 1709 if ((ahc->features & AHC_ULTRA2) != 0 1710 && (syncrate->sxfr_u2 == 0)) 1711 break; 1712 1713 if (*period <= syncrate->period) { 1714 /* 1715 * When responding to a target that requests 1716 * sync, the requested rate may fall between 1717 * two rates that we can output, but still be 1718 * a rate that we can receive. Because of this, 1719 * we want to respond to the target with 1720 * the same rate that it sent to us even 1721 * if the period we use to send data to it 1722 * is lower. Only lower the response period 1723 * if we must. 1724 */ 1725 if (syncrate == &ahc_syncrates[maxsync]) 1726 *period = syncrate->period; 1727 1728 /* 1729 * At some speeds, we only support 1730 * ST transfers. 1731 */ 1732 if ((syncrate->sxfr_u2 & ST_SXFR) != 0) 1733 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1734 break; 1735 } 1736 } 1737 1738 if ((*period == 0) 1739 || (syncrate->rate == NULL) 1740 || ((ahc->features & AHC_ULTRA2) != 0 1741 && (syncrate->sxfr_u2 == 0))) { 1742 /* Use asynchronous transfers. */ 1743 *period = 0; 1744 syncrate = NULL; 1745 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1746 } 1747 return (syncrate); 1748 } 1749 1750 /* 1751 * Convert from an entry in our syncrate table to the SCSI equivalent 1752 * sync "period" factor. 1753 */ 1754 u_int 1755 ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync) 1756 { 1757 struct ahc_syncrate *syncrate; 1758 1759 if ((ahc->features & AHC_ULTRA2) != 0) 1760 scsirate &= SXFR_ULTRA2; 1761 else 1762 scsirate &= SXFR; 1763 1764 syncrate = &ahc_syncrates[maxsync]; 1765 while (syncrate->rate != NULL) { 1766 1767 if ((ahc->features & AHC_ULTRA2) != 0) { 1768 if (syncrate->sxfr_u2 == 0) 1769 break; 1770 else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2)) 1771 return (syncrate->period); 1772 } else if (scsirate == (syncrate->sxfr & SXFR)) { 1773 return (syncrate->period); 1774 } 1775 syncrate++; 1776 } 1777 return (0); /* async */ 1778 } 1779 1780 /* 1781 * Truncate the given synchronous offset to a value the 1782 * current adapter type and syncrate are capable of. 1783 */ 1784 void 1785 ahc_validate_offset(struct ahc_softc *ahc, 1786 struct ahc_initiator_tinfo *tinfo, 1787 struct ahc_syncrate *syncrate, 1788 u_int *offset, int wide, role_t role) 1789 { 1790 u_int maxoffset; 1791 1792 /* Limit offset to what we can do */ 1793 if (syncrate == NULL) { 1794 maxoffset = 0; 1795 } else if ((ahc->features & AHC_ULTRA2) != 0) { 1796 maxoffset = MAX_OFFSET_ULTRA2; 1797 } else { 1798 if (wide) 1799 maxoffset = MAX_OFFSET_16BIT; 1800 else 1801 maxoffset = MAX_OFFSET_8BIT; 1802 } 1803 *offset = MIN(*offset, maxoffset); 1804 if (tinfo != NULL) { 1805 if (role == ROLE_TARGET) 1806 *offset = MIN(*offset, tinfo->user.offset); 1807 else 1808 *offset = MIN(*offset, tinfo->goal.offset); 1809 } 1810 } 1811 1812 /* 1813 * Truncate the given transfer width parameter to a value the 1814 * current adapter type is capable of. 1815 */ 1816 void 1817 ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, 1818 u_int *bus_width, role_t role) 1819 { 1820 switch (*bus_width) { 1821 default: 1822 if (ahc->features & AHC_WIDE) { 1823 /* Respond Wide */ 1824 *bus_width = MSG_EXT_WDTR_BUS_16_BIT; 1825 break; 1826 } 1827 /* FALLTHROUGH */ 1828 case MSG_EXT_WDTR_BUS_8_BIT: 1829 *bus_width = MSG_EXT_WDTR_BUS_8_BIT; 1830 break; 1831 } 1832 if (tinfo != NULL) { 1833 if (role == ROLE_TARGET) 1834 *bus_width = MIN(tinfo->user.width, *bus_width); 1835 else 1836 *bus_width = MIN(tinfo->goal.width, *bus_width); 1837 } 1838 } 1839 1840 /* 1841 * Update the bitmask of targets for which the controller should 1842 * negotiate with at the next convenient oportunity. This currently 1843 * means the next time we send the initial identify messages for 1844 * a new transaction. 1845 */ 1846 int 1847 ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1848 struct ahc_tmode_tstate *tstate, 1849 struct ahc_initiator_tinfo *tinfo, ahc_neg_type neg_type) 1850 { 1851 u_int auto_negotiate_orig; 1852 1853 auto_negotiate_orig = tstate->auto_negotiate; 1854 if (neg_type == AHC_NEG_ALWAYS) { 1855 /* 1856 * Force our "current" settings to be 1857 * unknown so that unless a bus reset 1858 * occurs the need to renegotiate is 1859 * recorded persistently. 1860 */ 1861 if ((ahc->features & AHC_WIDE) != 0) 1862 tinfo->curr.width = AHC_WIDTH_UNKNOWN; 1863 tinfo->curr.period = AHC_PERIOD_UNKNOWN; 1864 tinfo->curr.offset = AHC_OFFSET_UNKNOWN; 1865 } 1866 if (tinfo->curr.period != tinfo->goal.period 1867 || tinfo->curr.width != tinfo->goal.width 1868 || tinfo->curr.offset != tinfo->goal.offset 1869 || tinfo->curr.ppr_options != tinfo->goal.ppr_options 1870 || (neg_type == AHC_NEG_IF_NON_ASYNC 1871 && (tinfo->goal.offset != 0 1872 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT 1873 || tinfo->goal.ppr_options != 0))) 1874 tstate->auto_negotiate |= devinfo->target_mask; 1875 else 1876 tstate->auto_negotiate &= ~devinfo->target_mask; 1877 1878 return (auto_negotiate_orig != tstate->auto_negotiate); 1879 } 1880 1881 /* 1882 * Update the user/goal/curr tables of synchronous negotiation 1883 * parameters as well as, in the case of a current or active update, 1884 * any data structures on the host controller. In the case of an 1885 * active update, the specified target is currently talking to us on 1886 * the bus, so the transfer parameter update must take effect 1887 * immediately. 1888 */ 1889 void 1890 ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1891 struct ahc_syncrate *syncrate, u_int period, 1892 u_int offset, u_int ppr_options, u_int type, int paused) 1893 { 1894 struct ahc_initiator_tinfo *tinfo; 1895 struct ahc_tmode_tstate *tstate; 1896 u_int old_period; 1897 u_int old_offset; 1898 u_int old_ppr; 1899 int active; 1900 int update_needed; 1901 1902 active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 1903 update_needed = 0; 1904 1905 if (syncrate == NULL) { 1906 period = 0; 1907 offset = 0; 1908 } 1909 1910 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1911 devinfo->target, &tstate); 1912 1913 if ((type & AHC_TRANS_USER) != 0) { 1914 tinfo->user.period = period; 1915 tinfo->user.offset = offset; 1916 tinfo->user.ppr_options = ppr_options; 1917 } 1918 1919 if ((type & AHC_TRANS_GOAL) != 0) { 1920 tinfo->goal.period = period; 1921 tinfo->goal.offset = offset; 1922 tinfo->goal.ppr_options = ppr_options; 1923 } 1924 1925 old_period = tinfo->curr.period; 1926 old_offset = tinfo->curr.offset; 1927 old_ppr = tinfo->curr.ppr_options; 1928 1929 if ((type & AHC_TRANS_CUR) != 0 1930 && (old_period != period 1931 || old_offset != offset 1932 || old_ppr != ppr_options)) { 1933 u_int scsirate; 1934 1935 update_needed++; 1936 scsirate = tinfo->scsirate; 1937 if ((ahc->features & AHC_ULTRA2) != 0) { 1938 1939 scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC); 1940 if (syncrate != NULL) { 1941 scsirate |= syncrate->sxfr_u2; 1942 if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) 1943 scsirate |= ENABLE_CRC; 1944 else 1945 scsirate |= SINGLE_EDGE; 1946 } 1947 } else { 1948 1949 scsirate &= ~(SXFR|SOFS); 1950 /* 1951 * Ensure Ultra mode is set properly for 1952 * this target. 1953 */ 1954 tstate->ultraenb &= ~devinfo->target_mask; 1955 if (syncrate != NULL) { 1956 if (syncrate->sxfr & ULTRA_SXFR) { 1957 tstate->ultraenb |= 1958 devinfo->target_mask; 1959 } 1960 scsirate |= syncrate->sxfr & SXFR; 1961 scsirate |= offset & SOFS; 1962 } 1963 if (active) { 1964 u_int sxfrctl0; 1965 1966 sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 1967 sxfrctl0 &= ~FAST20; 1968 if (tstate->ultraenb & devinfo->target_mask) 1969 sxfrctl0 |= FAST20; 1970 ahc_outb(ahc, SXFRCTL0, sxfrctl0); 1971 } 1972 } 1973 if (active) { 1974 ahc_outb(ahc, SCSIRATE, scsirate); 1975 if ((ahc->features & AHC_ULTRA2) != 0) 1976 ahc_outb(ahc, SCSIOFFSET, offset); 1977 } 1978 1979 tinfo->scsirate = scsirate; 1980 tinfo->curr.period = period; 1981 tinfo->curr.offset = offset; 1982 tinfo->curr.ppr_options = ppr_options; 1983 1984 ahc_send_async(ahc, devinfo->channel, devinfo->target, 1985 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); 1986 if (bootverbose) { 1987 if (offset != 0) { 1988 printf("%s: target %d synchronous at %sMHz%s, " 1989 "offset = 0x%x\n", ahc_name(ahc), 1990 devinfo->target, syncrate->rate, 1991 (ppr_options & MSG_EXT_PPR_DT_REQ) 1992 ? " DT" : "", offset); 1993 } else { 1994 printf("%s: target %d using " 1995 "asynchronous transfers\n", 1996 ahc_name(ahc), devinfo->target); 1997 } 1998 } 1999 } 2000 2001 update_needed += ahc_update_neg_request(ahc, devinfo, tstate, 2002 tinfo, AHC_NEG_TO_GOAL); 2003 2004 if (update_needed) 2005 ahc_update_pending_scbs(ahc); 2006 } 2007 2008 /* 2009 * Update the user/goal/curr tables of wide negotiation 2010 * parameters as well as, in the case of a current or active update, 2011 * any data structures on the host controller. In the case of an 2012 * active update, the specified target is currently talking to us on 2013 * the bus, so the transfer parameter update must take effect 2014 * immediately. 2015 */ 2016 void 2017 ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2018 u_int width, u_int type, int paused) 2019 { 2020 struct ahc_initiator_tinfo *tinfo; 2021 struct ahc_tmode_tstate *tstate; 2022 u_int oldwidth; 2023 int active; 2024 int update_needed; 2025 2026 active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 2027 update_needed = 0; 2028 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 2029 devinfo->target, &tstate); 2030 2031 if ((type & AHC_TRANS_USER) != 0) 2032 tinfo->user.width = width; 2033 2034 if ((type & AHC_TRANS_GOAL) != 0) 2035 tinfo->goal.width = width; 2036 2037 oldwidth = tinfo->curr.width; 2038 if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) { 2039 u_int scsirate; 2040 2041 update_needed++; 2042 scsirate = tinfo->scsirate; 2043 scsirate &= ~WIDEXFER; 2044 if (width == MSG_EXT_WDTR_BUS_16_BIT) 2045 scsirate |= WIDEXFER; 2046 2047 tinfo->scsirate = scsirate; 2048 2049 if (active) 2050 ahc_outb(ahc, SCSIRATE, scsirate); 2051 2052 tinfo->curr.width = width; 2053 2054 ahc_send_async(ahc, devinfo->channel, devinfo->target, 2055 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); 2056 if (bootverbose) { 2057 printf("%s: target %d using %dbit transfers\n", 2058 ahc_name(ahc), devinfo->target, 2059 8 * (0x01 << width)); 2060 } 2061 } 2062 2063 update_needed += ahc_update_neg_request(ahc, devinfo, tstate, 2064 tinfo, AHC_NEG_TO_GOAL); 2065 if (update_needed) 2066 ahc_update_pending_scbs(ahc); 2067 } 2068 2069 /* 2070 * Update the current state of tagged queuing for a given target. 2071 */ 2072 void 2073 ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2074 ahc_queue_alg alg) 2075 { 2076 ahc_platform_set_tags(ahc, devinfo, alg); 2077 ahc_send_async(ahc, devinfo->channel, devinfo->target, 2078 devinfo->lun, AC_TRANSFER_NEG, &alg); 2079 } 2080 2081 /* 2082 * When the transfer settings for a connection change, update any 2083 * in-transit SCBs to contain the new data so the hardware will 2084 * be set correctly during future (re)selections. 2085 */ 2086 static void 2087 ahc_update_pending_scbs(struct ahc_softc *ahc) 2088 { 2089 struct scb *pending_scb; 2090 int pending_scb_count; 2091 int i; 2092 int paused; 2093 u_int saved_scbptr; 2094 2095 /* 2096 * Traverse the pending SCB list and ensure that all of the 2097 * SCBs there have the proper settings. 2098 */ 2099 pending_scb_count = 0; 2100 LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) { 2101 struct ahc_devinfo devinfo; 2102 struct hardware_scb *pending_hscb; 2103 struct ahc_initiator_tinfo *tinfo; 2104 struct ahc_tmode_tstate *tstate; 2105 2106 ahc_scb_devinfo(ahc, &devinfo, pending_scb); 2107 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 2108 devinfo.our_scsiid, 2109 devinfo.target, &tstate); 2110 pending_hscb = pending_scb->hscb; 2111 pending_hscb->control &= ~ULTRAENB; 2112 if ((tstate->ultraenb & devinfo.target_mask) != 0) 2113 pending_hscb->control |= ULTRAENB; 2114 pending_hscb->scsirate = tinfo->scsirate; 2115 pending_hscb->scsioffset = tinfo->curr.offset; 2116 if ((tstate->auto_negotiate & devinfo.target_mask) == 0 2117 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { 2118 pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; 2119 pending_hscb->control &= ~MK_MESSAGE; 2120 } 2121 ahc_sync_scb(ahc, pending_scb, 2122 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2123 pending_scb_count++; 2124 } 2125 2126 if (pending_scb_count == 0) 2127 return; 2128 2129 if (ahc_is_paused(ahc)) { 2130 paused = 1; 2131 } else { 2132 paused = 0; 2133 ahc_pause(ahc); 2134 } 2135 2136 saved_scbptr = ahc_inb(ahc, SCBPTR); 2137 /* Ensure that the hscbs down on the card match the new information */ 2138 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 2139 struct hardware_scb *pending_hscb; 2140 u_int control; 2141 u_int scb_tag; 2142 2143 ahc_outb(ahc, SCBPTR, i); 2144 scb_tag = ahc_inb(ahc, SCB_TAG); 2145 pending_scb = ahc_lookup_scb(ahc, scb_tag); 2146 if (pending_scb == NULL) 2147 continue; 2148 2149 pending_hscb = pending_scb->hscb; 2150 control = ahc_inb(ahc, SCB_CONTROL); 2151 control &= ~(ULTRAENB|MK_MESSAGE); 2152 control |= pending_hscb->control & (ULTRAENB|MK_MESSAGE); 2153 ahc_outb(ahc, SCB_CONTROL, control); 2154 ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate); 2155 ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset); 2156 } 2157 ahc_outb(ahc, SCBPTR, saved_scbptr); 2158 2159 if (paused == 0) 2160 ahc_unpause(ahc); 2161 } 2162 2163 /**************************** Pathing Information *****************************/ 2164 static void 2165 ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2166 { 2167 u_int saved_scsiid; 2168 role_t role; 2169 int our_id; 2170 2171 if (ahc_inb(ahc, SSTAT0) & TARGET) 2172 role = ROLE_TARGET; 2173 else 2174 role = ROLE_INITIATOR; 2175 2176 if (role == ROLE_TARGET 2177 && (ahc->features & AHC_MULTI_TID) != 0 2178 && (ahc_inb(ahc, SEQ_FLAGS) 2179 & (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) { 2180 /* We were selected, so pull our id from TARGIDIN */ 2181 our_id = ahc_inb(ahc, TARGIDIN) & OID; 2182 } else if ((ahc->features & AHC_ULTRA2) != 0) 2183 our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID; 2184 else 2185 our_id = ahc_inb(ahc, SCSIID) & OID; 2186 2187 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); 2188 ahc_compile_devinfo(devinfo, 2189 our_id, 2190 SCSIID_TARGET(ahc, saved_scsiid), 2191 ahc_inb(ahc, SAVED_LUN), 2192 SCSIID_CHANNEL(ahc, saved_scsiid), 2193 role); 2194 } 2195 2196 struct ahc_phase_table_entry* 2197 ahc_lookup_phase_entry(int phase) 2198 { 2199 struct ahc_phase_table_entry *entry; 2200 struct ahc_phase_table_entry *last_entry; 2201 2202 /* 2203 * num_phases doesn't include the default entry which 2204 * will be returned if the phase doesn't match. 2205 */ 2206 last_entry = &ahc_phase_table[num_phases]; 2207 for (entry = ahc_phase_table; entry < last_entry; entry++) { 2208 if (phase == entry->phase) 2209 break; 2210 } 2211 return (entry); 2212 } 2213 2214 void 2215 ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target, 2216 u_int lun, char channel, role_t role) 2217 { 2218 devinfo->our_scsiid = our_id; 2219 devinfo->target = target; 2220 devinfo->lun = lun; 2221 devinfo->target_offset = target; 2222 devinfo->channel = channel; 2223 devinfo->role = role; 2224 if (channel == 'B') 2225 devinfo->target_offset += 8; 2226 devinfo->target_mask = (0x01 << devinfo->target_offset); 2227 } 2228 2229 void 2230 ahc_print_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2231 { 2232 printf("%s:%c:%d:%d: ", ahc_name(ahc), devinfo->channel, 2233 devinfo->target, devinfo->lun); 2234 } 2235 2236 static void 2237 ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2238 struct scb *scb) 2239 { 2240 role_t role; 2241 int our_id; 2242 2243 our_id = SCSIID_OUR_ID(scb->hscb->scsiid); 2244 role = ROLE_INITIATOR; 2245 if ((scb->flags & SCB_TARGET_SCB) != 0) 2246 role = ROLE_TARGET; 2247 ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb), 2248 SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role); 2249 } 2250 2251 2252 /************************ Message Phase Processing ****************************/ 2253 static void 2254 ahc_assert_atn(struct ahc_softc *ahc) 2255 { 2256 u_int scsisigo; 2257 2258 scsisigo = ATNO; 2259 if ((ahc->features & AHC_DT) == 0) 2260 scsisigo |= ahc_inb(ahc, SCSISIGI); 2261 ahc_outb(ahc, SCSISIGO, scsisigo); 2262 } 2263 2264 /* 2265 * When an initiator transaction with the MK_MESSAGE flag either reconnects 2266 * or enters the initial message out phase, we are interrupted. Fill our 2267 * outgoing message buffer with the appropriate message and beging handing 2268 * the message phase(s) manually. 2269 */ 2270 static void 2271 ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2272 struct scb *scb) 2273 { 2274 /* 2275 * To facilitate adding multiple messages together, 2276 * each routine should increment the index and len 2277 * variables instead of setting them explicitly. 2278 */ 2279 ahc->msgout_index = 0; 2280 ahc->msgout_len = 0; 2281 2282 if ((scb->flags & SCB_DEVICE_RESET) == 0 2283 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) { 2284 u_int identify_msg; 2285 2286 identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); 2287 if ((scb->hscb->control & DISCENB) != 0) 2288 identify_msg |= MSG_IDENTIFY_DISCFLAG; 2289 ahc->msgout_buf[ahc->msgout_index++] = identify_msg; 2290 ahc->msgout_len++; 2291 2292 if ((scb->hscb->control & TAG_ENB) != 0) { 2293 ahc->msgout_buf[ahc->msgout_index++] = 2294 scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); 2295 ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag; 2296 ahc->msgout_len += 2; 2297 } 2298 } 2299 2300 if (scb->flags & SCB_DEVICE_RESET) { 2301 ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET; 2302 ahc->msgout_len++; 2303 ahc_print_path(ahc, scb); 2304 printf("Bus Device Reset Message Sent\n"); 2305 /* 2306 * Clear our selection hardware in advance of 2307 * the busfree. We may have an entry in the waiting 2308 * Q for this target, and we don't want to go about 2309 * selecting while we handle the busfree and blow it 2310 * away. 2311 */ 2312 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 2313 } else if ((scb->flags & SCB_ABORT) != 0) { 2314 if ((scb->hscb->control & TAG_ENB) != 0) 2315 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG; 2316 else 2317 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT; 2318 ahc->msgout_len++; 2319 ahc_print_path(ahc, scb); 2320 printf("Abort%s Message Sent\n", 2321 (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); 2322 /* 2323 * Clear our selection hardware in advance of 2324 * the busfree. We may have an entry in the waiting 2325 * Q for this target, and we don't want to go about 2326 * selecting while we handle the busfree and blow it 2327 * away. 2328 */ 2329 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 2330 } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { 2331 ahc_build_transfer_msg(ahc, devinfo); 2332 } else { 2333 printf("ahc_intr: AWAITING_MSG for an SCB that " 2334 "does not have a waiting message\n"); 2335 printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, 2336 devinfo->target_mask); 2337 panic("SCB = %d, SCB Control = %x, MSG_OUT = %x " 2338 "SCB flags = %x", scb->hscb->tag, scb->hscb->control, 2339 ahc_inb(ahc, MSG_OUT), scb->flags); 2340 } 2341 2342 /* 2343 * Clear the MK_MESSAGE flag from the SCB so we aren't 2344 * asked to send this message again. 2345 */ 2346 ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE); 2347 scb->hscb->control &= ~MK_MESSAGE; 2348 ahc->msgout_index = 0; 2349 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2350 } 2351 2352 /* 2353 * Build an appropriate transfer negotiation message for the 2354 * currently active target. 2355 */ 2356 static void 2357 ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2358 { 2359 /* 2360 * We need to initiate transfer negotiations. 2361 * If our current and goal settings are identical, 2362 * we want to renegotiate due to a check condition. 2363 */ 2364 struct ahc_initiator_tinfo *tinfo; 2365 struct ahc_tmode_tstate *tstate; 2366 struct ahc_syncrate *rate; 2367 int dowide; 2368 int dosync; 2369 int doppr; 2370 u_int period; 2371 u_int ppr_options; 2372 u_int offset; 2373 2374 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 2375 devinfo->target, &tstate); 2376 /* 2377 * Filter our period based on the current connection. 2378 * If we can't perform DT transfers on this segment (not in LVD 2379 * mode for instance), then our decision to issue a PPR message 2380 * may change. 2381 */ 2382 period = tinfo->goal.period; 2383 offset = tinfo->goal.offset; 2384 ppr_options = tinfo->goal.ppr_options; 2385 /* Target initiated PPR is not allowed in the SCSI spec */ 2386 if (devinfo->role == ROLE_TARGET) 2387 ppr_options = 0; 2388 rate = ahc_devlimited_syncrate(ahc, tinfo, &period, 2389 &ppr_options, devinfo->role); 2390 dowide = tinfo->curr.width != tinfo->goal.width; 2391 dosync = tinfo->curr.offset != offset || tinfo->curr.period != period; 2392 /* 2393 * Only use PPR if we have options that need it, even if the device 2394 * claims to support it. There might be an expander in the way 2395 * that doesn't. 2396 */ 2397 doppr = ppr_options != 0; 2398 2399 if (!dowide && !dosync && !doppr) { 2400 dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; 2401 dosync = tinfo->goal.offset != 0; 2402 } 2403 2404 if (!dowide && !dosync && !doppr) { 2405 /* 2406 * Force async with a WDTR message if we have a wide bus, 2407 * or just issue an SDTR with a 0 offset. 2408 */ 2409 if ((ahc->features & AHC_WIDE) != 0) 2410 dowide = 1; 2411 else 2412 dosync = 1; 2413 2414 if (bootverbose) { 2415 ahc_print_devinfo(ahc, devinfo); 2416 printf("Ensuring async\n"); 2417 } 2418 } 2419 2420 /* Target initiated PPR is not allowed in the SCSI spec */ 2421 if (devinfo->role == ROLE_TARGET) 2422 doppr = 0; 2423 2424 /* 2425 * Both the PPR message and SDTR message require the 2426 * goal syncrate to be limited to what the target device 2427 * is capable of handling (based on whether an LVD->SE 2428 * expander is on the bus), so combine these two cases. 2429 * Regardless, guarantee that if we are using WDTR and SDTR 2430 * messages that WDTR comes first. 2431 */ 2432 if (doppr || (dosync && !dowide)) { 2433 2434 offset = tinfo->goal.offset; 2435 ahc_validate_offset(ahc, tinfo, rate, &offset, 2436 doppr ? tinfo->goal.width 2437 : tinfo->curr.width, 2438 devinfo->role); 2439 if (doppr) { 2440 ahc_construct_ppr(ahc, devinfo, period, offset, 2441 tinfo->goal.width, ppr_options); 2442 } else { 2443 ahc_construct_sdtr(ahc, devinfo, period, offset); 2444 } 2445 } else { 2446 ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width); 2447 } 2448 } 2449 2450 /* 2451 * Build a synchronous negotiation message in our message 2452 * buffer based on the input parameters. 2453 */ 2454 static void 2455 ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2456 u_int period, u_int offset) 2457 { 2458 if (offset == 0) 2459 period = AHC_ASYNC_XFER_PERIOD; 2460 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2461 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN; 2462 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR; 2463 ahc->msgout_buf[ahc->msgout_index++] = period; 2464 ahc->msgout_buf[ahc->msgout_index++] = offset; 2465 ahc->msgout_len += 5; 2466 if (bootverbose) { 2467 printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", 2468 ahc_name(ahc), devinfo->channel, devinfo->target, 2469 devinfo->lun, period, offset); 2470 } 2471 } 2472 2473 /* 2474 * Build a wide negotiation message in our message 2475 * buffer based on the input parameters. 2476 */ 2477 static void 2478 ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2479 u_int bus_width) 2480 { 2481 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2482 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN; 2483 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR; 2484 ahc->msgout_buf[ahc->msgout_index++] = bus_width; 2485 ahc->msgout_len += 4; 2486 if (bootverbose) { 2487 printf("(%s:%c:%d:%d): Sending WDTR %x\n", 2488 ahc_name(ahc), devinfo->channel, devinfo->target, 2489 devinfo->lun, bus_width); 2490 } 2491 } 2492 2493 /* 2494 * Build a parallel protocol request message in our message 2495 * buffer based on the input parameters. 2496 */ 2497 static void 2498 ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2499 u_int period, u_int offset, u_int bus_width, 2500 u_int ppr_options) 2501 { 2502 if (offset == 0) 2503 period = AHC_ASYNC_XFER_PERIOD; 2504 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2505 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR_LEN; 2506 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR; 2507 ahc->msgout_buf[ahc->msgout_index++] = period; 2508 ahc->msgout_buf[ahc->msgout_index++] = 0; 2509 ahc->msgout_buf[ahc->msgout_index++] = offset; 2510 ahc->msgout_buf[ahc->msgout_index++] = bus_width; 2511 ahc->msgout_buf[ahc->msgout_index++] = ppr_options; 2512 ahc->msgout_len += 8; 2513 if (bootverbose) { 2514 printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " 2515 "offset %x, ppr_options %x\n", ahc_name(ahc), 2516 devinfo->channel, devinfo->target, devinfo->lun, 2517 bus_width, period, offset, ppr_options); 2518 } 2519 } 2520 2521 /* 2522 * Clear any active message state. 2523 */ 2524 static void 2525 ahc_clear_msg_state(struct ahc_softc *ahc) 2526 { 2527 ahc->msgout_len = 0; 2528 ahc->msgin_index = 0; 2529 ahc->msg_type = MSG_TYPE_NONE; 2530 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) { 2531 /* 2532 * The target didn't care to respond to our 2533 * message request, so clear ATN. 2534 */ 2535 ahc_outb(ahc, CLRSINT1, CLRATNO); 2536 } 2537 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 2538 ahc_outb(ahc, SEQ_FLAGS2, 2539 ahc_inb(ahc, SEQ_FLAGS2) & ~TARGET_MSG_PENDING); 2540 } 2541 2542 static void 2543 ahc_handle_proto_violation(struct ahc_softc *ahc) 2544 { 2545 struct ahc_devinfo devinfo; 2546 struct scb *scb; 2547 u_int scbid; 2548 u_int seq_flags; 2549 u_int curphase; 2550 u_int lastphase; 2551 int found; 2552 2553 ahc_fetch_devinfo(ahc, &devinfo); 2554 scbid = ahc_inb(ahc, SCB_TAG); 2555 scb = ahc_lookup_scb(ahc, scbid); 2556 seq_flags = ahc_inb(ahc, SEQ_FLAGS); 2557 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 2558 lastphase = ahc_inb(ahc, LASTPHASE); 2559 if ((seq_flags & NOT_IDENTIFIED) != 0) { 2560 2561 /* 2562 * The reconnecting target either did not send an 2563 * identify message, or did, but we didn't find an SCB 2564 * to match. 2565 */ 2566 ahc_print_devinfo(ahc, &devinfo); 2567 printf("Target did not send an IDENTIFY message. " 2568 "LASTPHASE = 0x%x.\n", lastphase); 2569 scb = NULL; 2570 } else if (scb == NULL) { 2571 /* 2572 * We don't seem to have an SCB active for this 2573 * transaction. Print an error and reset the bus. 2574 */ 2575 ahc_print_devinfo(ahc, &devinfo); 2576 printf("No SCB found during protocol violation\n"); 2577 goto proto_violation_reset; 2578 } else { 2579 ahc_set_transaction_status(scb, CAM_SEQUENCE_FAIL); 2580 if ((seq_flags & NO_CDB_SENT) != 0) { 2581 ahc_print_path(ahc, scb); 2582 printf("No or incomplete CDB sent to device.\n"); 2583 } else if ((ahc_inb(ahc, SCB_CONTROL) & STATUS_RCVD) == 0) { 2584 /* 2585 * The target never bothered to provide status to 2586 * us prior to completing the command. Since we don't 2587 * know the disposition of this command, we must attempt 2588 * to abort it. Assert ATN and prepare to send an abort 2589 * message. 2590 */ 2591 ahc_print_path(ahc, scb); 2592 printf("Completed command without status.\n"); 2593 } else { 2594 ahc_print_path(ahc, scb); 2595 printf("Unknown protocol violation.\n"); 2596 ahc_dump_card_state(ahc); 2597 } 2598 } 2599 if ((lastphase & ~P_DATAIN_DT) == 0 2600 || lastphase == P_COMMAND) { 2601 proto_violation_reset: 2602 /* 2603 * Target either went directly to data/command 2604 * phase or didn't respond to our ATN. 2605 * The only safe thing to do is to blow 2606 * it away with a bus reset. 2607 */ 2608 found = ahc_reset_channel(ahc, 'A', TRUE); 2609 printf("%s: Issued Channel %c Bus Reset. " 2610 "%d SCBs aborted\n", ahc_name(ahc), 'A', found); 2611 } else { 2612 /* 2613 * Leave the selection hardware off in case 2614 * this abort attempt will affect yet to 2615 * be sent commands. 2616 */ 2617 ahc_outb(ahc, SCSISEQ, 2618 ahc_inb(ahc, SCSISEQ) & ~ENSELO); 2619 ahc_assert_atn(ahc); 2620 ahc_outb(ahc, MSG_OUT, HOST_MSG); 2621 if (scb == NULL) { 2622 ahc_print_devinfo(ahc, &devinfo); 2623 ahc->msgout_buf[0] = MSG_ABORT_TASK; 2624 ahc->msgout_len = 1; 2625 ahc->msgout_index = 0; 2626 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2627 } else { 2628 ahc_print_path(ahc, scb); 2629 scb->flags |= SCB_ABORT; 2630 } 2631 printf("Protocol violation %s. Attempting to abort.\n", 2632 ahc_lookup_phase_entry(curphase)->phasemsg); 2633 } 2634 } 2635 2636 /* 2637 * Manual message loop handler. 2638 */ 2639 static void 2640 ahc_handle_message_phase(struct ahc_softc *ahc) 2641 { 2642 struct ahc_devinfo devinfo; 2643 u_int bus_phase; 2644 int end_session; 2645 2646 ahc_fetch_devinfo(ahc, &devinfo); 2647 end_session = FALSE; 2648 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 2649 2650 reswitch: 2651 switch (ahc->msg_type) { 2652 case MSG_TYPE_INITIATOR_MSGOUT: 2653 { 2654 int lastbyte; 2655 int phasemis; 2656 int msgdone; 2657 2658 if (ahc->msgout_len == 0) 2659 panic("HOST_MSG_LOOP interrupt with no active message"); 2660 2661 #ifdef AHC_DEBUG 2662 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2663 ahc_print_devinfo(ahc, &devinfo); 2664 printf("INITIATOR_MSG_OUT"); 2665 } 2666 #endif 2667 phasemis = bus_phase != P_MESGOUT; 2668 if (phasemis) { 2669 #ifdef AHC_DEBUG 2670 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2671 printf(" PHASEMIS %s\n", 2672 ahc_lookup_phase_entry(bus_phase) 2673 ->phasemsg); 2674 } 2675 #endif 2676 if (bus_phase == P_MESGIN) { 2677 /* 2678 * Change gears and see if 2679 * this messages is of interest to 2680 * us or should be passed back to 2681 * the sequencer. 2682 */ 2683 ahc_outb(ahc, CLRSINT1, CLRATNO); 2684 ahc->send_msg_perror = FALSE; 2685 ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN; 2686 ahc->msgin_index = 0; 2687 goto reswitch; 2688 } 2689 end_session = TRUE; 2690 break; 2691 } 2692 2693 if (ahc->send_msg_perror) { 2694 ahc_outb(ahc, CLRSINT1, CLRATNO); 2695 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2696 #ifdef AHC_DEBUG 2697 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) 2698 printf(" byte 0x%x\n", ahc->send_msg_perror); 2699 #endif 2700 ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR); 2701 break; 2702 } 2703 2704 msgdone = ahc->msgout_index == ahc->msgout_len; 2705 if (msgdone) { 2706 /* 2707 * The target has requested a retry. 2708 * Re-assert ATN, reset our message index to 2709 * 0, and try again. 2710 */ 2711 ahc->msgout_index = 0; 2712 ahc_assert_atn(ahc); 2713 } 2714 2715 lastbyte = ahc->msgout_index == (ahc->msgout_len - 1); 2716 if (lastbyte) { 2717 /* Last byte is signified by dropping ATN */ 2718 ahc_outb(ahc, CLRSINT1, CLRATNO); 2719 } 2720 2721 /* 2722 * Clear our interrupt status and present 2723 * the next byte on the bus. 2724 */ 2725 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2726 #ifdef AHC_DEBUG 2727 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) 2728 printf(" byte 0x%x\n", 2729 ahc->msgout_buf[ahc->msgout_index]); 2730 #endif 2731 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 2732 break; 2733 } 2734 case MSG_TYPE_INITIATOR_MSGIN: 2735 { 2736 int phasemis; 2737 int message_done; 2738 2739 #ifdef AHC_DEBUG 2740 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2741 ahc_print_devinfo(ahc, &devinfo); 2742 printf("INITIATOR_MSG_IN"); 2743 } 2744 #endif 2745 phasemis = bus_phase != P_MESGIN; 2746 if (phasemis) { 2747 #ifdef AHC_DEBUG 2748 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2749 printf(" PHASEMIS %s\n", 2750 ahc_lookup_phase_entry(bus_phase) 2751 ->phasemsg); 2752 } 2753 #endif 2754 ahc->msgin_index = 0; 2755 if (bus_phase == P_MESGOUT 2756 && (ahc->send_msg_perror == TRUE 2757 || (ahc->msgout_len != 0 2758 && ahc->msgout_index == 0))) { 2759 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2760 goto reswitch; 2761 } 2762 end_session = TRUE; 2763 break; 2764 } 2765 2766 /* Pull the byte in without acking it */ 2767 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL); 2768 #ifdef AHC_DEBUG 2769 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) 2770 printf(" byte 0x%x\n", 2771 ahc->msgin_buf[ahc->msgin_index]); 2772 #endif 2773 2774 message_done = ahc_parse_msg(ahc, &devinfo); 2775 2776 if (message_done) { 2777 /* 2778 * Clear our incoming message buffer in case there 2779 * is another message following this one. 2780 */ 2781 ahc->msgin_index = 0; 2782 2783 /* 2784 * If this message illicited a response, 2785 * assert ATN so the target takes us to the 2786 * message out phase. 2787 */ 2788 if (ahc->msgout_len != 0) { 2789 #ifdef AHC_DEBUG 2790 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2791 ahc_print_devinfo(ahc, &devinfo); 2792 printf("Asserting ATN for response\n"); 2793 } 2794 #endif 2795 ahc_assert_atn(ahc); 2796 } 2797 } else 2798 ahc->msgin_index++; 2799 2800 if (message_done == MSGLOOP_TERMINATED) { 2801 end_session = TRUE; 2802 } else { 2803 /* Ack the byte */ 2804 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2805 ahc_inb(ahc, SCSIDATL); 2806 } 2807 break; 2808 } 2809 case MSG_TYPE_TARGET_MSGIN: 2810 { 2811 int msgdone; 2812 int msgout_request; 2813 2814 if (ahc->msgout_len == 0) 2815 panic("Target MSGIN with no active message"); 2816 2817 /* 2818 * If we interrupted a mesgout session, the initiator 2819 * will not know this until our first REQ. So, we 2820 * only honor mesgout requests after we've sent our 2821 * first byte. 2822 */ 2823 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0 2824 && ahc->msgout_index > 0) 2825 msgout_request = TRUE; 2826 else 2827 msgout_request = FALSE; 2828 2829 if (msgout_request) { 2830 2831 /* 2832 * Change gears and see if 2833 * this messages is of interest to 2834 * us or should be passed back to 2835 * the sequencer. 2836 */ 2837 ahc->msg_type = MSG_TYPE_TARGET_MSGOUT; 2838 ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO); 2839 ahc->msgin_index = 0; 2840 /* Dummy read to REQ for first byte */ 2841 ahc_inb(ahc, SCSIDATL); 2842 ahc_outb(ahc, SXFRCTL0, 2843 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2844 break; 2845 } 2846 2847 msgdone = ahc->msgout_index == ahc->msgout_len; 2848 if (msgdone) { 2849 ahc_outb(ahc, SXFRCTL0, 2850 ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 2851 end_session = TRUE; 2852 break; 2853 } 2854 2855 /* 2856 * Present the next byte on the bus. 2857 */ 2858 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2859 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 2860 break; 2861 } 2862 case MSG_TYPE_TARGET_MSGOUT: 2863 { 2864 int lastbyte; 2865 int msgdone; 2866 2867 /* 2868 * The initiator signals that this is 2869 * the last byte by dropping ATN. 2870 */ 2871 lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0; 2872 2873 /* 2874 * Read the latched byte, but turn off SPIOEN first 2875 * so that we don't inadvertently cause a REQ for the 2876 * next byte. 2877 */ 2878 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 2879 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL); 2880 msgdone = ahc_parse_msg(ahc, &devinfo); 2881 if (msgdone == MSGLOOP_TERMINATED) { 2882 /* 2883 * The message is *really* done in that it caused 2884 * us to go to bus free. The sequencer has already 2885 * been reset at this point, so pull the ejection 2886 * handle. 2887 */ 2888 return; 2889 } 2890 2891 ahc->msgin_index++; 2892 2893 /* 2894 * XXX Read spec about initiator dropping ATN too soon 2895 * and use msgdone to detect it. 2896 */ 2897 if (msgdone == MSGLOOP_MSGCOMPLETE) { 2898 ahc->msgin_index = 0; 2899 2900 /* 2901 * If this message illicited a response, transition 2902 * to the Message in phase and send it. 2903 */ 2904 if (ahc->msgout_len != 0) { 2905 ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO); 2906 ahc_outb(ahc, SXFRCTL0, 2907 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2908 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 2909 ahc->msgin_index = 0; 2910 break; 2911 } 2912 } 2913 2914 if (lastbyte) 2915 end_session = TRUE; 2916 else { 2917 /* Ask for the next byte. */ 2918 ahc_outb(ahc, SXFRCTL0, 2919 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2920 } 2921 2922 break; 2923 } 2924 default: 2925 panic("Unknown REQINIT message type"); 2926 } 2927 2928 if (end_session) { 2929 ahc_clear_msg_state(ahc); 2930 ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP); 2931 } else 2932 ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP); 2933 } 2934 2935 /* 2936 * See if we sent a particular extended message to the target. 2937 * If "full" is true, return true only if the target saw the full 2938 * message. If "full" is false, return true if the target saw at 2939 * least the first byte of the message. 2940 */ 2941 static int 2942 ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full) 2943 { 2944 int found; 2945 u_int index; 2946 2947 found = FALSE; 2948 index = 0; 2949 2950 while (index < ahc->msgout_len) { 2951 if (ahc->msgout_buf[index] == MSG_EXTENDED) { 2952 u_int end_index; 2953 2954 end_index = index + 1 + ahc->msgout_buf[index + 1]; 2955 if (ahc->msgout_buf[index+2] == msgval 2956 && type == AHCMSG_EXT) { 2957 2958 if (full) { 2959 if (ahc->msgout_index > end_index) 2960 found = TRUE; 2961 } else if (ahc->msgout_index > index) 2962 found = TRUE; 2963 } 2964 index = end_index; 2965 } else if (ahc->msgout_buf[index] >= MSG_SIMPLE_TASK 2966 && ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) { 2967 2968 /* Skip tag type and tag id or residue param*/ 2969 index += 2; 2970 } else { 2971 /* Single byte message */ 2972 if (type == AHCMSG_1B 2973 && ahc->msgout_buf[index] == msgval 2974 && ahc->msgout_index > index) 2975 found = TRUE; 2976 index++; 2977 } 2978 2979 if (found) 2980 break; 2981 } 2982 return (found); 2983 } 2984 2985 /* 2986 * Wait for a complete incoming message, parse it, and respond accordingly. 2987 */ 2988 static int 2989 ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2990 { 2991 struct ahc_initiator_tinfo *tinfo; 2992 struct ahc_tmode_tstate *tstate; 2993 int reject; 2994 int done; 2995 int response; 2996 u_int targ_scsirate; 2997 2998 done = MSGLOOP_IN_PROG; 2999 response = FALSE; 3000 reject = FALSE; 3001 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 3002 devinfo->target, &tstate); 3003 targ_scsirate = tinfo->scsirate; 3004 3005 /* 3006 * Parse as much of the message as is available, 3007 * rejecting it if we don't support it. When 3008 * the entire message is available and has been 3009 * handled, return MSGLOOP_MSGCOMPLETE, indicating 3010 * that we have parsed an entire message. 3011 * 3012 * In the case of extended messages, we accept the length 3013 * byte outright and perform more checking once we know the 3014 * extended message type. 3015 */ 3016 switch (ahc->msgin_buf[0]) { 3017 case MSG_DISCONNECT: 3018 case MSG_SAVEDATAPOINTER: 3019 case MSG_CMDCOMPLETE: 3020 case MSG_RESTOREPOINTERS: 3021 case MSG_IGN_WIDE_RESIDUE: 3022 /* 3023 * End our message loop as these are messages 3024 * the sequencer handles on its own. 3025 */ 3026 done = MSGLOOP_TERMINATED; 3027 break; 3028 case MSG_MESSAGE_REJECT: 3029 response = ahc_handle_msg_reject(ahc, devinfo); 3030 /* FALLTHROUGH */ 3031 case MSG_NOOP: 3032 done = MSGLOOP_MSGCOMPLETE; 3033 break; 3034 case MSG_EXTENDED: 3035 { 3036 /* Wait for enough of the message to begin validation */ 3037 if (ahc->msgin_index < 2) 3038 break; 3039 switch (ahc->msgin_buf[2]) { 3040 case MSG_EXT_SDTR: 3041 { 3042 struct ahc_syncrate *syncrate; 3043 u_int period; 3044 u_int ppr_options; 3045 u_int offset; 3046 u_int saved_offset; 3047 3048 if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) { 3049 reject = TRUE; 3050 break; 3051 } 3052 3053 /* 3054 * Wait until we have both args before validating 3055 * and acting on this message. 3056 * 3057 * Add one to MSG_EXT_SDTR_LEN to account for 3058 * the extended message preamble. 3059 */ 3060 if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1)) 3061 break; 3062 3063 period = ahc->msgin_buf[3]; 3064 ppr_options = 0; 3065 saved_offset = offset = ahc->msgin_buf[4]; 3066 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, 3067 &ppr_options, 3068 devinfo->role); 3069 ahc_validate_offset(ahc, tinfo, syncrate, &offset, 3070 targ_scsirate & WIDEXFER, 3071 devinfo->role); 3072 if (bootverbose) { 3073 printf("(%s:%c:%d:%d): Received " 3074 "SDTR period %x, offset %x\n\t" 3075 "Filtered to period %x, offset %x\n", 3076 ahc_name(ahc), devinfo->channel, 3077 devinfo->target, devinfo->lun, 3078 ahc->msgin_buf[3], saved_offset, 3079 period, offset); 3080 } 3081 ahc_set_syncrate(ahc, devinfo, 3082 syncrate, period, 3083 offset, ppr_options, 3084 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3085 /*paused*/TRUE); 3086 3087 /* 3088 * See if we initiated Sync Negotiation 3089 * and didn't have to fall down to async 3090 * transfers. 3091 */ 3092 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) { 3093 /* We started it */ 3094 if (saved_offset != offset) { 3095 /* Went too low - force async */ 3096 reject = TRUE; 3097 } 3098 } else { 3099 /* 3100 * Send our own SDTR in reply 3101 */ 3102 if (bootverbose 3103 && devinfo->role == ROLE_INITIATOR) { 3104 printf("(%s:%c:%d:%d): Target " 3105 "Initiated SDTR\n", 3106 ahc_name(ahc), devinfo->channel, 3107 devinfo->target, devinfo->lun); 3108 } 3109 ahc->msgout_index = 0; 3110 ahc->msgout_len = 0; 3111 ahc_construct_sdtr(ahc, devinfo, 3112 period, offset); 3113 ahc->msgout_index = 0; 3114 response = TRUE; 3115 } 3116 done = MSGLOOP_MSGCOMPLETE; 3117 break; 3118 } 3119 case MSG_EXT_WDTR: 3120 { 3121 u_int bus_width; 3122 u_int saved_width; 3123 u_int sending_reply; 3124 3125 sending_reply = FALSE; 3126 if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) { 3127 reject = TRUE; 3128 break; 3129 } 3130 3131 /* 3132 * Wait until we have our arg before validating 3133 * and acting on this message. 3134 * 3135 * Add one to MSG_EXT_WDTR_LEN to account for 3136 * the extended message preamble. 3137 */ 3138 if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1)) 3139 break; 3140 3141 bus_width = ahc->msgin_buf[3]; 3142 saved_width = bus_width; 3143 ahc_validate_width(ahc, tinfo, &bus_width, 3144 devinfo->role); 3145 if (bootverbose) { 3146 printf("(%s:%c:%d:%d): Received WDTR " 3147 "%x filtered to %x\n", 3148 ahc_name(ahc), devinfo->channel, 3149 devinfo->target, devinfo->lun, 3150 saved_width, bus_width); 3151 } 3152 3153 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) { 3154 /* 3155 * Don't send a WDTR back to the 3156 * target, since we asked first. 3157 * If the width went higher than our 3158 * request, reject it. 3159 */ 3160 if (saved_width > bus_width) { 3161 reject = TRUE; 3162 printf("(%s:%c:%d:%d): requested %dBit " 3163 "transfers. Rejecting...\n", 3164 ahc_name(ahc), devinfo->channel, 3165 devinfo->target, devinfo->lun, 3166 8 * (0x01 << bus_width)); 3167 bus_width = 0; 3168 } 3169 } else { 3170 /* 3171 * Send our own WDTR in reply 3172 */ 3173 if (bootverbose 3174 && devinfo->role == ROLE_INITIATOR) { 3175 printf("(%s:%c:%d:%d): Target " 3176 "Initiated WDTR\n", 3177 ahc_name(ahc), devinfo->channel, 3178 devinfo->target, devinfo->lun); 3179 } 3180 ahc->msgout_index = 0; 3181 ahc->msgout_len = 0; 3182 ahc_construct_wdtr(ahc, devinfo, bus_width); 3183 ahc->msgout_index = 0; 3184 response = TRUE; 3185 sending_reply = TRUE; 3186 } 3187 /* 3188 * After a wide message, we are async, but 3189 * some devices don't seem to honor this portion 3190 * of the spec. Force a renegotiation of the 3191 * sync component of our transfer agreement even 3192 * if our goal is async. By updating our width 3193 * after forcing the negotiation, we avoid 3194 * renegotiating for width. 3195 */ 3196 ahc_update_neg_request(ahc, devinfo, tstate, 3197 tinfo, AHC_NEG_ALWAYS); 3198 ahc_set_width(ahc, devinfo, bus_width, 3199 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3200 /*paused*/TRUE); 3201 if (sending_reply == FALSE && reject == FALSE) { 3202 3203 /* 3204 * We will always have an SDTR to send. 3205 */ 3206 ahc->msgout_index = 0; 3207 ahc->msgout_len = 0; 3208 ahc_build_transfer_msg(ahc, devinfo); 3209 ahc->msgout_index = 0; 3210 response = TRUE; 3211 } 3212 done = MSGLOOP_MSGCOMPLETE; 3213 break; 3214 } 3215 case MSG_EXT_PPR: 3216 { 3217 struct ahc_syncrate *syncrate; 3218 u_int period; 3219 u_int offset; 3220 u_int bus_width; 3221 u_int ppr_options; 3222 u_int saved_width; 3223 u_int saved_offset; 3224 u_int saved_ppr_options; 3225 3226 if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) { 3227 reject = TRUE; 3228 break; 3229 } 3230 3231 /* 3232 * Wait until we have all args before validating 3233 * and acting on this message. 3234 * 3235 * Add one to MSG_EXT_PPR_LEN to account for 3236 * the extended message preamble. 3237 */ 3238 if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1)) 3239 break; 3240 3241 period = ahc->msgin_buf[3]; 3242 offset = ahc->msgin_buf[5]; 3243 bus_width = ahc->msgin_buf[6]; 3244 saved_width = bus_width; 3245 ppr_options = ahc->msgin_buf[7]; 3246 /* 3247 * According to the spec, a DT only 3248 * period factor with no DT option 3249 * set implies async. 3250 */ 3251 if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 3252 && period == 9) 3253 offset = 0; 3254 saved_ppr_options = ppr_options; 3255 saved_offset = offset; 3256 3257 /* 3258 * Mask out any options we don't support 3259 * on any controller. Transfer options are 3260 * only available if we are negotiating wide. 3261 */ 3262 ppr_options &= MSG_EXT_PPR_DT_REQ; 3263 if (bus_width == 0) 3264 ppr_options = 0; 3265 3266 ahc_validate_width(ahc, tinfo, &bus_width, 3267 devinfo->role); 3268 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, 3269 &ppr_options, 3270 devinfo->role); 3271 ahc_validate_offset(ahc, tinfo, syncrate, 3272 &offset, bus_width, 3273 devinfo->role); 3274 3275 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) { 3276 /* 3277 * If we are unable to do any of the 3278 * requested options (we went too low), 3279 * then we'll have to reject the message. 3280 */ 3281 if (saved_width > bus_width 3282 || saved_offset != offset 3283 || saved_ppr_options != ppr_options) { 3284 reject = TRUE; 3285 period = 0; 3286 offset = 0; 3287 bus_width = 0; 3288 ppr_options = 0; 3289 syncrate = NULL; 3290 } 3291 } else { 3292 if (devinfo->role != ROLE_TARGET) 3293 printf("(%s:%c:%d:%d): Target " 3294 "Initiated PPR\n", 3295 ahc_name(ahc), devinfo->channel, 3296 devinfo->target, devinfo->lun); 3297 else 3298 printf("(%s:%c:%d:%d): Initiator " 3299 "Initiated PPR\n", 3300 ahc_name(ahc), devinfo->channel, 3301 devinfo->target, devinfo->lun); 3302 ahc->msgout_index = 0; 3303 ahc->msgout_len = 0; 3304 ahc_construct_ppr(ahc, devinfo, period, offset, 3305 bus_width, ppr_options); 3306 ahc->msgout_index = 0; 3307 response = TRUE; 3308 } 3309 if (bootverbose) { 3310 printf("(%s:%c:%d:%d): Received PPR width %x, " 3311 "period %x, offset %x,options %x\n" 3312 "\tFiltered to width %x, period %x, " 3313 "offset %x, options %x\n", 3314 ahc_name(ahc), devinfo->channel, 3315 devinfo->target, devinfo->lun, 3316 saved_width, ahc->msgin_buf[3], 3317 saved_offset, saved_ppr_options, 3318 bus_width, period, offset, ppr_options); 3319 } 3320 ahc_set_width(ahc, devinfo, bus_width, 3321 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3322 /*paused*/TRUE); 3323 ahc_set_syncrate(ahc, devinfo, 3324 syncrate, period, 3325 offset, ppr_options, 3326 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3327 /*paused*/TRUE); 3328 done = MSGLOOP_MSGCOMPLETE; 3329 break; 3330 } 3331 default: 3332 /* Unknown extended message. Reject it. */ 3333 reject = TRUE; 3334 break; 3335 } 3336 break; 3337 } 3338 #ifdef AHC_TARGET_MODE 3339 case MSG_BUS_DEV_RESET: 3340 ahc_handle_devreset(ahc, devinfo, 3341 CAM_BDR_SENT, 3342 "Bus Device Reset Received", 3343 /*verbose_level*/0); 3344 ahc_restart(ahc); 3345 done = MSGLOOP_TERMINATED; 3346 break; 3347 case MSG_ABORT_TAG: 3348 case MSG_ABORT: 3349 case MSG_CLEAR_QUEUE: 3350 { 3351 int tag; 3352 3353 /* Target mode messages */ 3354 if (devinfo->role != ROLE_TARGET) { 3355 reject = TRUE; 3356 break; 3357 } 3358 tag = SCB_LIST_NULL; 3359 if (ahc->msgin_buf[0] == MSG_ABORT_TAG) 3360 tag = ahc_inb(ahc, INITIATOR_TAG); 3361 ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 3362 devinfo->lun, tag, ROLE_TARGET, 3363 CAM_REQ_ABORTED); 3364 3365 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 3366 if (tstate != NULL) { 3367 struct ahc_tmode_lstate* lstate; 3368 3369 lstate = tstate->enabled_luns[devinfo->lun]; 3370 if (lstate != NULL) { 3371 ahc_queue_lstate_event(ahc, lstate, 3372 devinfo->our_scsiid, 3373 ahc->msgin_buf[0], 3374 /*arg*/tag); 3375 ahc_send_lstate_events(ahc, lstate); 3376 } 3377 } 3378 ahc_restart(ahc); 3379 done = MSGLOOP_TERMINATED; 3380 break; 3381 } 3382 #endif 3383 case MSG_TERM_IO_PROC: 3384 default: 3385 reject = TRUE; 3386 break; 3387 } 3388 3389 if (reject) { 3390 /* 3391 * Setup to reject the message. 3392 */ 3393 ahc->msgout_index = 0; 3394 ahc->msgout_len = 1; 3395 ahc->msgout_buf[0] = MSG_MESSAGE_REJECT; 3396 done = MSGLOOP_MSGCOMPLETE; 3397 response = TRUE; 3398 } 3399 3400 if (done != MSGLOOP_IN_PROG && !response) 3401 /* Clear the outgoing message buffer */ 3402 ahc->msgout_len = 0; 3403 3404 return (done); 3405 } 3406 3407 /* 3408 * Process a message reject message. 3409 */ 3410 static int 3411 ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 3412 { 3413 /* 3414 * What we care about here is if we had an 3415 * outstanding SDTR or WDTR message for this 3416 * target. If we did, this is a signal that 3417 * the target is refusing negotiation. 3418 */ 3419 struct scb *scb; 3420 struct ahc_initiator_tinfo *tinfo; 3421 struct ahc_tmode_tstate *tstate; 3422 u_int scb_index; 3423 u_int last_msg; 3424 int response = 0; 3425 3426 scb_index = ahc_inb(ahc, SCB_TAG); 3427 scb = ahc_lookup_scb(ahc, scb_index); 3428 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, 3429 devinfo->our_scsiid, 3430 devinfo->target, &tstate); 3431 /* Might be necessary */ 3432 last_msg = ahc_inb(ahc, LAST_MSG); 3433 3434 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) { 3435 /* 3436 * Target does not support the PPR message. 3437 * Attempt to negotiate SPI-2 style. 3438 */ 3439 if (bootverbose) { 3440 printf("(%s:%c:%d:%d): PPR Rejected. " 3441 "Trying WDTR/SDTR\n", 3442 ahc_name(ahc), devinfo->channel, 3443 devinfo->target, devinfo->lun); 3444 } 3445 tinfo->goal.ppr_options = 0; 3446 tinfo->curr.transport_version = 2; 3447 tinfo->goal.transport_version = 2; 3448 ahc->msgout_index = 0; 3449 ahc->msgout_len = 0; 3450 ahc_build_transfer_msg(ahc, devinfo); 3451 ahc->msgout_index = 0; 3452 response = 1; 3453 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) { 3454 3455 /* note 8bit xfers */ 3456 printf("(%s:%c:%d:%d): refuses WIDE negotiation. Using " 3457 "8bit transfers\n", ahc_name(ahc), 3458 devinfo->channel, devinfo->target, devinfo->lun); 3459 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 3460 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3461 /*paused*/TRUE); 3462 /* 3463 * No need to clear the sync rate. If the target 3464 * did not accept the command, our syncrate is 3465 * unaffected. If the target started the negotiation, 3466 * but rejected our response, we already cleared the 3467 * sync rate before sending our WDTR. 3468 */ 3469 if (tinfo->goal.offset != tinfo->curr.offset) { 3470 3471 /* Start the sync negotiation */ 3472 ahc->msgout_index = 0; 3473 ahc->msgout_len = 0; 3474 ahc_build_transfer_msg(ahc, devinfo); 3475 ahc->msgout_index = 0; 3476 response = 1; 3477 } 3478 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) { 3479 /* note asynch xfers and clear flag */ 3480 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0, 3481 /*offset*/0, /*ppr_options*/0, 3482 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3483 /*paused*/TRUE); 3484 printf("(%s:%c:%d:%d): refuses synchronous negotiation. " 3485 "Using asynchronous transfers\n", 3486 ahc_name(ahc), devinfo->channel, 3487 devinfo->target, devinfo->lun); 3488 } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) { 3489 int tag_type; 3490 int mask; 3491 3492 tag_type = (scb->hscb->control & MSG_SIMPLE_TASK); 3493 3494 if (tag_type == MSG_SIMPLE_TASK) { 3495 printf("(%s:%c:%d:%d): refuses tagged commands. " 3496 "Performing non-tagged I/O\n", ahc_name(ahc), 3497 devinfo->channel, devinfo->target, devinfo->lun); 3498 ahc_set_tags(ahc, devinfo, AHC_QUEUE_NONE); 3499 mask = ~0x23; 3500 } else { 3501 printf("(%s:%c:%d:%d): refuses %s tagged commands. " 3502 "Performing simple queue tagged I/O only\n", 3503 ahc_name(ahc), devinfo->channel, devinfo->target, 3504 devinfo->lun, tag_type == MSG_ORDERED_TASK 3505 ? "ordered" : "head of queue"); 3506 ahc_set_tags(ahc, devinfo, AHC_QUEUE_BASIC); 3507 mask = ~0x03; 3508 } 3509 3510 /* 3511 * Resend the identify for this CCB as the target 3512 * may believe that the selection is invalid otherwise. 3513 */ 3514 ahc_outb(ahc, SCB_CONTROL, 3515 ahc_inb(ahc, SCB_CONTROL) & mask); 3516 scb->hscb->control &= mask; 3517 ahc_set_transaction_tag(scb, /*enabled*/FALSE, 3518 /*type*/MSG_SIMPLE_TASK); 3519 ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG); 3520 ahc_assert_atn(ahc); 3521 3522 /* 3523 * This transaction is now at the head of 3524 * the untagged queue for this target. 3525 */ 3526 if ((ahc->flags & AHC_SCB_BTT) == 0) { 3527 struct scb_tailq *untagged_q; 3528 3529 untagged_q = 3530 &(ahc->untagged_queues[devinfo->target_offset]); 3531 TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe); 3532 scb->flags |= SCB_UNTAGGEDQ; 3533 } 3534 ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), 3535 scb->hscb->tag); 3536 3537 /* 3538 * Requeue all tagged commands for this target 3539 * currently in our posession so they can be 3540 * converted to untagged commands. 3541 */ 3542 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), 3543 SCB_GET_CHANNEL(ahc, scb), 3544 SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, 3545 ROLE_INITIATOR, CAM_REQUEUE_REQ, 3546 SEARCH_COMPLETE); 3547 } else { 3548 /* 3549 * Otherwise, we ignore it. 3550 */ 3551 printf("%s:%c:%d: Message reject for %x -- ignored\n", 3552 ahc_name(ahc), devinfo->channel, devinfo->target, 3553 last_msg); 3554 } 3555 return (response); 3556 } 3557 3558 /* 3559 * Process an ingnore wide residue message. 3560 */ 3561 static void 3562 ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 3563 { 3564 u_int scb_index; 3565 struct scb *scb; 3566 3567 scb_index = ahc_inb(ahc, SCB_TAG); 3568 scb = ahc_lookup_scb(ahc, scb_index); 3569 /* 3570 * XXX Actually check data direction in the sequencer? 3571 * Perhaps add datadir to some spare bits in the hscb? 3572 */ 3573 if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0 3574 || ahc_get_transfer_dir(scb) != CAM_DIR_IN) { 3575 /* 3576 * Ignore the message if we haven't 3577 * seen an appropriate data phase yet. 3578 */ 3579 } else { 3580 /* 3581 * If the residual occurred on the last 3582 * transfer and the transfer request was 3583 * expected to end on an odd count, do 3584 * nothing. Otherwise, subtract a byte 3585 * and update the residual count accordingly. 3586 */ 3587 uint32_t sgptr; 3588 3589 sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR); 3590 if ((sgptr & SG_LIST_NULL) != 0 3591 && (ahc_inb(ahc, SCB_LUN) & SCB_XFERLEN_ODD) != 0) { 3592 /* 3593 * If the residual occurred on the last 3594 * transfer and the transfer request was 3595 * expected to end on an odd count, do 3596 * nothing. 3597 */ 3598 } else { 3599 struct ahc_dma_seg *sg; 3600 uint32_t data_cnt; 3601 uint32_t data_addr; 3602 uint32_t sglen; 3603 3604 /* Pull in all of the sgptr */ 3605 sgptr = ahc_inl(ahc, SCB_RESIDUAL_SGPTR); 3606 data_cnt = ahc_inl(ahc, SCB_RESIDUAL_DATACNT); 3607 3608 if ((sgptr & SG_LIST_NULL) != 0) { 3609 /* 3610 * The residual data count is not updated 3611 * for the command run to completion case. 3612 * Explicitly zero the count. 3613 */ 3614 data_cnt &= ~AHC_SG_LEN_MASK; 3615 } 3616 3617 data_addr = ahc_inl(ahc, SHADDR); 3618 3619 data_cnt += 1; 3620 data_addr -= 1; 3621 sgptr &= SG_PTR_MASK; 3622 3623 sg = ahc_sg_bus_to_virt(scb, sgptr); 3624 3625 /* 3626 * The residual sg ptr points to the next S/G 3627 * to load so we must go back one. 3628 */ 3629 sg--; 3630 sglen = ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; 3631 if (sg != scb->sg_list 3632 && sglen < (data_cnt & AHC_SG_LEN_MASK)) { 3633 3634 sg--; 3635 sglen = ahc_le32toh(sg->len); 3636 /* 3637 * Preserve High Address and SG_LIST bits 3638 * while setting the count to 1. 3639 */ 3640 data_cnt = 1 | (sglen & (~AHC_SG_LEN_MASK)); 3641 data_addr = ahc_le32toh(sg->addr) 3642 + (sglen & AHC_SG_LEN_MASK) - 1; 3643 3644 /* 3645 * Increment sg so it points to the 3646 * "next" sg. 3647 */ 3648 sg++; 3649 sgptr = ahc_sg_virt_to_bus(scb, sg); 3650 } 3651 ahc_outl(ahc, SCB_RESIDUAL_SGPTR, sgptr); 3652 ahc_outl(ahc, SCB_RESIDUAL_DATACNT, data_cnt); 3653 /* 3654 * Toggle the "oddness" of the transfer length 3655 * to handle this mid-transfer ignore wide 3656 * residue. This ensures that the oddness is 3657 * correct for subsequent data transfers. 3658 */ 3659 ahc_outb(ahc, SCB_LUN, 3660 ahc_inb(ahc, SCB_LUN) ^ SCB_XFERLEN_ODD); 3661 } 3662 } 3663 } 3664 3665 3666 /* 3667 * Reinitialize the data pointers for the active transfer 3668 * based on its current residual. 3669 */ 3670 static void 3671 ahc_reinitialize_dataptrs(struct ahc_softc *ahc) 3672 { 3673 struct scb *scb; 3674 struct ahc_dma_seg *sg; 3675 u_int scb_index; 3676 uint32_t sgptr; 3677 uint32_t resid; 3678 uint32_t dataptr; 3679 3680 scb_index = ahc_inb(ahc, SCB_TAG); 3681 scb = ahc_lookup_scb(ahc, scb_index); 3682 sgptr = (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24) 3683 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16) 3684 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8) 3685 | ahc_inb(ahc, SCB_RESIDUAL_SGPTR); 3686 3687 sgptr &= SG_PTR_MASK; 3688 sg = ahc_sg_bus_to_virt(scb, sgptr); 3689 3690 /* The residual sg_ptr always points to the next sg */ 3691 sg--; 3692 3693 resid = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 2) << 16) 3694 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 1) << 8) 3695 | ahc_inb(ahc, SCB_RESIDUAL_DATACNT); 3696 3697 dataptr = ahc_le32toh(sg->addr) 3698 + (ahc_le32toh(sg->len) & AHC_SG_LEN_MASK) 3699 - resid; 3700 if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { 3701 u_int dscommand1; 3702 3703 dscommand1 = ahc_inb(ahc, DSCOMMAND1); 3704 ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); 3705 ahc_outb(ahc, HADDR, 3706 (ahc_le32toh(sg->len) >> 24) & SG_HIGH_ADDR_BITS); 3707 ahc_outb(ahc, DSCOMMAND1, dscommand1); 3708 } 3709 ahc_outb(ahc, HADDR + 3, dataptr >> 24); 3710 ahc_outb(ahc, HADDR + 2, dataptr >> 16); 3711 ahc_outb(ahc, HADDR + 1, dataptr >> 8); 3712 ahc_outb(ahc, HADDR, dataptr); 3713 ahc_outb(ahc, HCNT + 2, resid >> 16); 3714 ahc_outb(ahc, HCNT + 1, resid >> 8); 3715 ahc_outb(ahc, HCNT, resid); 3716 if ((ahc->features & AHC_ULTRA2) == 0) { 3717 ahc_outb(ahc, STCNT + 2, resid >> 16); 3718 ahc_outb(ahc, STCNT + 1, resid >> 8); 3719 ahc_outb(ahc, STCNT, resid); 3720 } 3721 } 3722 3723 /* 3724 * Handle the effects of issuing a bus device reset message. 3725 */ 3726 static void 3727 ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 3728 cam_status status, char *message, int verbose_level) 3729 { 3730 #ifdef AHC_TARGET_MODE 3731 struct ahc_tmode_tstate* tstate; 3732 u_int lun; 3733 #endif 3734 int found; 3735 3736 found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 3737 CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role, 3738 status); 3739 3740 #ifdef AHC_TARGET_MODE 3741 /* 3742 * Send an immediate notify ccb to all target mord peripheral 3743 * drivers affected by this action. 3744 */ 3745 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 3746 if (tstate != NULL) { 3747 for (lun = 0; lun < AHC_NUM_LUNS; lun++) { 3748 struct ahc_tmode_lstate* lstate; 3749 3750 lstate = tstate->enabled_luns[lun]; 3751 if (lstate == NULL) 3752 continue; 3753 3754 ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid, 3755 MSG_BUS_DEV_RESET, /*arg*/0); 3756 ahc_send_lstate_events(ahc, lstate); 3757 } 3758 } 3759 #endif 3760 3761 /* 3762 * Go back to async/narrow transfers and renegotiate. 3763 */ 3764 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 3765 AHC_TRANS_CUR, /*paused*/TRUE); 3766 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, 3767 /*period*/0, /*offset*/0, /*ppr_options*/0, 3768 AHC_TRANS_CUR, /*paused*/TRUE); 3769 3770 ahc_send_async(ahc, devinfo->channel, devinfo->target, 3771 CAM_LUN_WILDCARD, AC_SENT_BDR, NULL); 3772 3773 if (message != NULL 3774 && (verbose_level <= bootverbose)) 3775 printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc), 3776 message, devinfo->channel, devinfo->target, found); 3777 } 3778 3779 #ifdef AHC_TARGET_MODE 3780 static void 3781 ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 3782 struct scb *scb) 3783 { 3784 3785 /* 3786 * To facilitate adding multiple messages together, 3787 * each routine should increment the index and len 3788 * variables instead of setting them explicitly. 3789 */ 3790 ahc->msgout_index = 0; 3791 ahc->msgout_len = 0; 3792 3793 if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) 3794 ahc_build_transfer_msg(ahc, devinfo); 3795 else 3796 panic("ahc_intr: AWAITING target message with no message"); 3797 3798 ahc->msgout_index = 0; 3799 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 3800 } 3801 #endif 3802 /**************************** Initialization **********************************/ 3803 /* 3804 * Allocate a controller structure for a new device 3805 * and perform initial initializion. 3806 */ 3807 struct ahc_softc * 3808 ahc_alloc(void *platform_arg, char *name) 3809 { 3810 struct ahc_softc *ahc; 3811 int i; 3812 3813 #ifndef __FreeBSD__ 3814 ahc = malloc(sizeof(*ahc), M_DEVBUF, M_NOWAIT); 3815 if (!ahc) { 3816 printf("aic7xxx: cannot malloc softc!\n"); 3817 free(name, M_DEVBUF); 3818 return NULL; 3819 } 3820 #else 3821 ahc = device_get_softc((device_t)platform_arg); 3822 #endif 3823 memset(ahc, 0, sizeof(*ahc)); 3824 ahc->seep_config = malloc(sizeof(*ahc->seep_config), 3825 M_DEVBUF, M_NOWAIT); 3826 if (ahc->seep_config == NULL) { 3827 #ifndef __FreeBSD__ 3828 free(ahc, M_DEVBUF); 3829 #endif 3830 free(name, M_DEVBUF); 3831 return (NULL); 3832 } 3833 LIST_INIT(&ahc->pending_scbs); 3834 /* We don't know our unit number until the OSM sets it */ 3835 ahc->name = name; 3836 ahc->unit = -1; 3837 ahc->description = NULL; 3838 ahc->channel = 'A'; 3839 ahc->channel_b = 'B'; 3840 ahc->chip = AHC_NONE; 3841 ahc->features = AHC_FENONE; 3842 ahc->bugs = AHC_BUGNONE; 3843 ahc->flags = AHC_FNONE; 3844 /* 3845 * Default to all error reporting enabled with the 3846 * sequencer operating at its fastest speed. 3847 * The bus attach code may modify this. 3848 */ 3849 ahc->seqctl = FASTMODE; 3850 3851 for (i = 0; i < AHC_NUM_TARGETS; i++) 3852 TAILQ_INIT(&ahc->untagged_queues[i]); 3853 if (ahc_platform_alloc(ahc, platform_arg) != 0) { 3854 ahc_free(ahc); 3855 ahc = NULL; 3856 } 3857 return (ahc); 3858 } 3859 3860 int 3861 ahc_softc_init(struct ahc_softc *ahc) 3862 { 3863 3864 /* The IRQMS bit is only valid on VL and EISA chips */ 3865 if ((ahc->chip & AHC_PCI) == 0) 3866 ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS; 3867 else 3868 ahc->unpause = 0; 3869 ahc->pause = ahc->unpause | PAUSE; 3870 /* XXX The shared scb data stuff should be deprecated */ 3871 if (ahc->scb_data == NULL) { 3872 ahc->scb_data = malloc(sizeof(*ahc->scb_data), 3873 M_DEVBUF, M_NOWAIT); 3874 if (ahc->scb_data == NULL) 3875 return (ENOMEM); 3876 memset(ahc->scb_data, 0, sizeof(*ahc->scb_data)); 3877 } 3878 3879 return (0); 3880 } 3881 3882 void 3883 ahc_softc_insert(struct ahc_softc *ahc) 3884 { 3885 struct ahc_softc *list_ahc; 3886 3887 #if AHC_PCI_CONFIG > 0 3888 /* 3889 * Second Function PCI devices need to inherit some 3890 * settings from function 0. 3891 */ 3892 if ((ahc->chip & AHC_BUS_MASK) == AHC_PCI 3893 && (ahc->features & AHC_MULTI_FUNC) != 0) { 3894 TAILQ_FOREACH(list_ahc, &ahc_tailq, links) { 3895 ahc_dev_softc_t list_pci; 3896 ahc_dev_softc_t pci; 3897 3898 list_pci = list_ahc->dev_softc; 3899 pci = ahc->dev_softc; 3900 if (ahc_get_pci_slot(list_pci) == ahc_get_pci_slot(pci) 3901 && ahc_get_pci_bus(list_pci) == ahc_get_pci_bus(pci)) { 3902 struct ahc_softc *master; 3903 struct ahc_softc *slave; 3904 3905 if (ahc_get_pci_function(list_pci) == 0) { 3906 master = list_ahc; 3907 slave = ahc; 3908 } else { 3909 master = ahc; 3910 slave = list_ahc; 3911 } 3912 slave->flags &= ~AHC_BIOS_ENABLED; 3913 slave->flags |= 3914 master->flags & AHC_BIOS_ENABLED; 3915 slave->flags &= ~AHC_PRIMARY_CHANNEL; 3916 slave->flags |= 3917 master->flags & AHC_PRIMARY_CHANNEL; 3918 break; 3919 } 3920 } 3921 } 3922 #endif 3923 3924 /* 3925 * Insertion sort into our list of softcs. 3926 */ 3927 list_ahc = TAILQ_FIRST(&ahc_tailq); 3928 while (list_ahc != NULL 3929 && ahc_softc_comp(ahc, list_ahc) <= 0) 3930 list_ahc = TAILQ_NEXT(list_ahc, links); 3931 if (list_ahc != NULL) 3932 TAILQ_INSERT_BEFORE(list_ahc, ahc, links); 3933 else 3934 TAILQ_INSERT_TAIL(&ahc_tailq, ahc, links); 3935 ahc->init_level++; 3936 } 3937 3938 /* 3939 * Verify that the passed in softc pointer is for a 3940 * controller that is still configured. 3941 */ 3942 struct ahc_softc * 3943 ahc_find_softc(struct ahc_softc *ahc) 3944 { 3945 struct ahc_softc *list_ahc; 3946 3947 TAILQ_FOREACH(list_ahc, &ahc_tailq, links) { 3948 if (list_ahc == ahc) 3949 return (ahc); 3950 } 3951 return (NULL); 3952 } 3953 3954 void 3955 ahc_set_unit(struct ahc_softc *ahc, int unit) 3956 { 3957 ahc->unit = unit; 3958 } 3959 3960 void 3961 ahc_set_name(struct ahc_softc *ahc, char *name) 3962 { 3963 if (ahc->name != NULL) 3964 free(ahc->name, M_DEVBUF); 3965 ahc->name = name; 3966 } 3967 3968 void 3969 ahc_free(struct ahc_softc *ahc) 3970 { 3971 int i; 3972 3973 switch (ahc->init_level) { 3974 default: 3975 case 5: 3976 ahc_shutdown(ahc); 3977 TAILQ_REMOVE(&ahc_tailq, ahc, links); 3978 /* FALLTHROUGH */ 3979 case 4: 3980 ahc_dmamap_unload(ahc, ahc->shared_data_dmat, 3981 ahc->shared_data_dmamap); 3982 /* FALLTHROUGH */ 3983 case 3: 3984 ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo, 3985 ahc->shared_data_dmamap); 3986 ahc_dmamap_destroy(ahc, ahc->shared_data_dmat, 3987 ahc->shared_data_dmamap); 3988 /* FALLTHROUGH */ 3989 case 2: 3990 ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat); 3991 case 1: 3992 #ifndef __linux__ 3993 ahc_dma_tag_destroy(ahc, ahc->buffer_dmat); 3994 #endif 3995 break; 3996 case 0: 3997 break; 3998 } 3999 4000 #ifndef __linux__ 4001 ahc_dma_tag_destroy(ahc, ahc->parent_dmat); 4002 #endif 4003 ahc_platform_free(ahc); 4004 ahc_fini_scbdata(ahc); 4005 for (i = 0; i < AHC_NUM_TARGETS; i++) { 4006 struct ahc_tmode_tstate *tstate; 4007 4008 tstate = ahc->enabled_targets[i]; 4009 if (tstate != NULL) { 4010 #ifdef AHC_TARGET_MODE 4011 int j; 4012 4013 for (j = 0; j < AHC_NUM_LUNS; j++) { 4014 struct ahc_tmode_lstate *lstate; 4015 4016 lstate = tstate->enabled_luns[j]; 4017 if (lstate != NULL) { 4018 xpt_free_path(lstate->path); 4019 free(lstate, M_DEVBUF); 4020 } 4021 } 4022 #endif 4023 free(tstate, M_DEVBUF); 4024 } 4025 } 4026 #ifdef AHC_TARGET_MODE 4027 if (ahc->black_hole != NULL) { 4028 xpt_free_path(ahc->black_hole->path); 4029 free(ahc->black_hole, M_DEVBUF); 4030 } 4031 #endif 4032 if (ahc->name != NULL) 4033 free(ahc->name, M_DEVBUF); 4034 if (ahc->seep_config != NULL) 4035 free(ahc->seep_config, M_DEVBUF); 4036 #ifndef __FreeBSD__ 4037 free(ahc, M_DEVBUF); 4038 #endif 4039 return; 4040 } 4041 4042 void 4043 ahc_shutdown(void *arg) 4044 { 4045 struct ahc_softc *ahc; 4046 int i; 4047 4048 ahc = (struct ahc_softc *)arg; 4049 4050 /* This will reset most registers to 0, but not all */ 4051 ahc_reset(ahc, /*reinit*/FALSE); 4052 ahc_outb(ahc, SCSISEQ, 0); 4053 ahc_outb(ahc, SXFRCTL0, 0); 4054 ahc_outb(ahc, DSPCISTATUS, 0); 4055 4056 for (i = TARG_SCSIRATE; i < SCSICONF; i++) 4057 ahc_outb(ahc, i, 0); 4058 } 4059 4060 /* 4061 * Reset the controller and record some information about it 4062 * that is only available just after a reset. If "reinit" is 4063 * non-zero, this reset occured after initial configuration 4064 * and the caller requests that the chip be fully reinitialized 4065 * to a runable state. Chip interrupts are *not* enabled after 4066 * a reinitialization. The caller must enable interrupts via 4067 * ahc_intr_enable(). 4068 */ 4069 int 4070 ahc_reset(struct ahc_softc *ahc, int reinit) 4071 { 4072 u_int sblkctl; 4073 u_int sxfrctl1_a, sxfrctl1_b; 4074 int error; 4075 int wait; 4076 4077 /* 4078 * Preserve the value of the SXFRCTL1 register for all channels. 4079 * It contains settings that affect termination and we don't want 4080 * to disturb the integrity of the bus. 4081 */ 4082 ahc_pause(ahc); 4083 if ((ahc_inb(ahc, HCNTRL) & CHIPRST) != 0) { 4084 /* 4085 * The chip has not been initialized since 4086 * PCI/EISA/VLB bus reset. Don't trust 4087 * "left over BIOS data". 4088 */ 4089 ahc->flags |= AHC_NO_BIOS_INIT; 4090 } 4091 sxfrctl1_b = 0; 4092 if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) { 4093 u_int sblkctl; 4094 4095 /* 4096 * Save channel B's settings in case this chip 4097 * is setup for TWIN channel operation. 4098 */ 4099 sblkctl = ahc_inb(ahc, SBLKCTL); 4100 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); 4101 sxfrctl1_b = ahc_inb(ahc, SXFRCTL1); 4102 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); 4103 } 4104 sxfrctl1_a = ahc_inb(ahc, SXFRCTL1); 4105 4106 ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause); 4107 4108 /* 4109 * Ensure that the reset has finished. We delay 1000us 4110 * prior to reading the register to make sure the chip 4111 * has sufficiently completed its reset to handle register 4112 * accesses. 4113 */ 4114 wait = 1000; 4115 do { 4116 ahc_delay(1000); 4117 } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK)); 4118 4119 if (wait == 0) { 4120 printf("%s: WARNING - Failed chip reset! " 4121 "Trying to initialize anyway.\n", ahc_name(ahc)); 4122 } 4123 ahc_outb(ahc, HCNTRL, ahc->pause); 4124 4125 /* Determine channel configuration */ 4126 sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE); 4127 /* No Twin Channel PCI cards */ 4128 if ((ahc->chip & AHC_PCI) != 0) 4129 sblkctl &= ~SELBUSB; 4130 switch (sblkctl) { 4131 case 0: 4132 /* Single Narrow Channel */ 4133 break; 4134 case 2: 4135 /* Wide Channel */ 4136 ahc->features |= AHC_WIDE; 4137 break; 4138 case 8: 4139 /* Twin Channel */ 4140 ahc->features |= AHC_TWIN; 4141 break; 4142 default: 4143 printf(" Unsupported adapter type. Ignoring\n"); 4144 return(-1); 4145 } 4146 4147 /* 4148 * Reload sxfrctl1. 4149 * 4150 * We must always initialize STPWEN to 1 before we 4151 * restore the saved values. STPWEN is initialized 4152 * to a tri-state condition which can only be cleared 4153 * by turning it on. 4154 */ 4155 if ((ahc->features & AHC_TWIN) != 0) { 4156 u_int sblkctl; 4157 4158 sblkctl = ahc_inb(ahc, SBLKCTL); 4159 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); 4160 ahc_outb(ahc, SXFRCTL1, sxfrctl1_b); 4161 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); 4162 } 4163 ahc_outb(ahc, SXFRCTL1, sxfrctl1_a); 4164 4165 error = 0; 4166 if (reinit != 0) 4167 /* 4168 * If a recovery action has forced a chip reset, 4169 * re-initialize the chip to our liking. 4170 */ 4171 error = ahc->bus_chip_init(ahc); 4172 #ifdef AHC_DUMP_SEQ 4173 else 4174 ahc_dumpseq(ahc); 4175 #endif 4176 4177 return (error); 4178 } 4179 4180 /* 4181 * Determine the number of SCBs available on the controller 4182 */ 4183 int 4184 ahc_probe_scbs(struct ahc_softc *ahc) { 4185 int i; 4186 4187 for (i = 0; i < AHC_SCB_MAX; i++) { 4188 4189 ahc_outb(ahc, SCBPTR, i); 4190 ahc_outb(ahc, SCB_BASE, i); 4191 if (ahc_inb(ahc, SCB_BASE) != i) 4192 break; 4193 ahc_outb(ahc, SCBPTR, 0); 4194 if (ahc_inb(ahc, SCB_BASE) != 0) 4195 break; 4196 } 4197 return (i); 4198 } 4199 4200 static void 4201 ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 4202 { 4203 bus_addr_t *baddr; 4204 4205 baddr = (bus_addr_t *)arg; 4206 *baddr = segs->ds_addr; 4207 } 4208 4209 static void 4210 ahc_build_free_scb_list(struct ahc_softc *ahc) 4211 { 4212 int scbsize; 4213 int i; 4214 4215 scbsize = 32; 4216 if ((ahc->flags & AHC_LSCBS_ENABLED) != 0) 4217 scbsize = 64; 4218 4219 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 4220 int j; 4221 4222 ahc_outb(ahc, SCBPTR, i); 4223 4224 /* 4225 * Touch all SCB bytes to avoid parity errors 4226 * should one of our debugging routines read 4227 * an otherwise uninitiatlized byte. 4228 */ 4229 for (j = 0; j < scbsize; j++) 4230 ahc_outb(ahc, SCB_BASE+j, 0xFF); 4231 4232 /* Clear the control byte. */ 4233 ahc_outb(ahc, SCB_CONTROL, 0); 4234 4235 /* Set the next pointer */ 4236 if ((ahc->flags & AHC_PAGESCBS) != 0) 4237 ahc_outb(ahc, SCB_NEXT, i+1); 4238 else 4239 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); 4240 4241 /* Make the tag number, SCSIID, and lun invalid */ 4242 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 4243 ahc_outb(ahc, SCB_SCSIID, 0xFF); 4244 ahc_outb(ahc, SCB_LUN, 0xFF); 4245 } 4246 4247 if ((ahc->flags & AHC_PAGESCBS) != 0) { 4248 /* SCB 0 heads the free list. */ 4249 ahc_outb(ahc, FREE_SCBH, 0); 4250 } else { 4251 /* No free list. */ 4252 ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL); 4253 } 4254 4255 /* Make sure that the last SCB terminates the free list */ 4256 ahc_outb(ahc, SCBPTR, i-1); 4257 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); 4258 } 4259 4260 static int 4261 ahc_init_scbdata(struct ahc_softc *ahc) 4262 { 4263 struct scb_data *scb_data; 4264 4265 scb_data = ahc->scb_data; 4266 SLIST_INIT(&scb_data->free_scbs); 4267 SLIST_INIT(&scb_data->sg_maps); 4268 4269 /* Allocate SCB resources */ 4270 scb_data->scbarray = 4271 (struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, 4272 M_DEVBUF, M_NOWAIT); 4273 if (scb_data->scbarray == NULL) 4274 return (ENOMEM); 4275 memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC); 4276 4277 /* Determine the number of hardware SCBs and initialize them */ 4278 4279 scb_data->maxhscbs = ahc_probe_scbs(ahc); 4280 if (ahc->scb_data->maxhscbs == 0) { 4281 printf("%s: No SCB space found\n", ahc_name(ahc)); 4282 return (ENXIO); 4283 } 4284 4285 /* 4286 * Create our DMA tags. These tags define the kinds of device 4287 * accessible memory allocations and memory mappings we will 4288 * need to perform during normal operation. 4289 * 4290 * Unless we need to further restrict the allocation, we rely 4291 * on the restrictions of the parent dmat, hence the common 4292 * use of MAXADDR and MAXSIZE. 4293 */ 4294 4295 /* DMA tag for our hardware scb structures */ 4296 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 4297 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4298 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 4299 /*highaddr*/BUS_SPACE_MAXADDR, 4300 /*filter*/NULL, /*filterarg*/NULL, 4301 AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb), 4302 /*nsegments*/1, 4303 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 4304 /*flags*/0, &scb_data->hscb_dmat) != 0) { 4305 goto error_exit; 4306 } 4307 4308 scb_data->init_level++; 4309 4310 /* Allocation for our hscbs */ 4311 if (ahc_dmamem_alloc(ahc, scb_data->hscb_dmat, 4312 (void **)&scb_data->hscbs, 4313 BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) { 4314 goto error_exit; 4315 } 4316 4317 scb_data->init_level++; 4318 4319 /* And permanently map them */ 4320 ahc_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap, 4321 scb_data->hscbs, 4322 AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb), 4323 ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0); 4324 4325 scb_data->init_level++; 4326 4327 /* DMA tag for our sense buffers */ 4328 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 4329 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4330 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 4331 /*highaddr*/BUS_SPACE_MAXADDR, 4332 /*filter*/NULL, /*filterarg*/NULL, 4333 AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data), 4334 /*nsegments*/1, 4335 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 4336 /*flags*/0, &scb_data->sense_dmat) != 0) { 4337 goto error_exit; 4338 } 4339 4340 scb_data->init_level++; 4341 4342 /* Allocate them */ 4343 if (ahc_dmamem_alloc(ahc, scb_data->sense_dmat, 4344 (void **)&scb_data->sense, 4345 BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) { 4346 goto error_exit; 4347 } 4348 4349 scb_data->init_level++; 4350 4351 /* And permanently map them */ 4352 ahc_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap, 4353 scb_data->sense, 4354 AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data), 4355 ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0); 4356 4357 scb_data->init_level++; 4358 4359 /* DMA tag for our S/G structures. We allocate in page sized chunks */ 4360 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/8, 4361 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4362 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 4363 /*highaddr*/BUS_SPACE_MAXADDR, 4364 /*filter*/NULL, /*filterarg*/NULL, 4365 PAGE_SIZE, /*nsegments*/1, 4366 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 4367 /*flags*/0, &scb_data->sg_dmat) != 0) { 4368 goto error_exit; 4369 } 4370 4371 scb_data->init_level++; 4372 4373 /* Perform initial CCB allocation */ 4374 memset(scb_data->hscbs, 0, 4375 AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb)); 4376 ahc_alloc_scbs(ahc); 4377 4378 if (scb_data->numscbs == 0) { 4379 printf("%s: ahc_init_scbdata - " 4380 "Unable to allocate initial scbs\n", 4381 ahc_name(ahc)); 4382 goto error_exit; 4383 } 4384 4385 /* 4386 * Reserve the next queued SCB. 4387 */ 4388 ahc->next_queued_scb = ahc_get_scb(ahc); 4389 4390 /* 4391 * Note that we were successfull 4392 */ 4393 return (0); 4394 4395 error_exit: 4396 4397 return (ENOMEM); 4398 } 4399 4400 static void 4401 ahc_fini_scbdata(struct ahc_softc *ahc) 4402 { 4403 struct scb_data *scb_data; 4404 4405 scb_data = ahc->scb_data; 4406 if (scb_data == NULL) 4407 return; 4408 4409 switch (scb_data->init_level) { 4410 default: 4411 case 7: 4412 { 4413 struct sg_map_node *sg_map; 4414 4415 while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) { 4416 SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); 4417 ahc_dmamap_unload(ahc, scb_data->sg_dmat, 4418 sg_map->sg_dmamap); 4419 ahc_dmamem_free(ahc, scb_data->sg_dmat, 4420 sg_map->sg_vaddr, 4421 sg_map->sg_dmamap); 4422 free(sg_map, M_DEVBUF); 4423 } 4424 ahc_dma_tag_destroy(ahc, scb_data->sg_dmat); 4425 } 4426 case 6: 4427 ahc_dmamap_unload(ahc, scb_data->sense_dmat, 4428 scb_data->sense_dmamap); 4429 case 5: 4430 ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense, 4431 scb_data->sense_dmamap); 4432 ahc_dmamap_destroy(ahc, scb_data->sense_dmat, 4433 scb_data->sense_dmamap); 4434 case 4: 4435 ahc_dma_tag_destroy(ahc, scb_data->sense_dmat); 4436 case 3: 4437 ahc_dmamap_unload(ahc, scb_data->hscb_dmat, 4438 scb_data->hscb_dmamap); 4439 case 2: 4440 ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs, 4441 scb_data->hscb_dmamap); 4442 ahc_dmamap_destroy(ahc, scb_data->hscb_dmat, 4443 scb_data->hscb_dmamap); 4444 case 1: 4445 ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat); 4446 break; 4447 case 0: 4448 break; 4449 } 4450 if (scb_data->scbarray != NULL) 4451 free(scb_data->scbarray, M_DEVBUF); 4452 } 4453 4454 void 4455 ahc_alloc_scbs(struct ahc_softc *ahc) 4456 { 4457 struct scb_data *scb_data; 4458 struct scb *next_scb; 4459 struct sg_map_node *sg_map; 4460 bus_addr_t physaddr; 4461 struct ahc_dma_seg *segs; 4462 int newcount; 4463 int i; 4464 4465 scb_data = ahc->scb_data; 4466 if (scb_data->numscbs >= AHC_SCB_MAX_ALLOC) 4467 /* Can't allocate any more */ 4468 return; 4469 4470 next_scb = &scb_data->scbarray[scb_data->numscbs]; 4471 4472 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); 4473 4474 if (sg_map == NULL) 4475 return; 4476 4477 /* Allocate S/G space for the next batch of SCBS */ 4478 if (ahc_dmamem_alloc(ahc, scb_data->sg_dmat, 4479 (void **)&sg_map->sg_vaddr, 4480 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { 4481 free(sg_map, M_DEVBUF); 4482 return; 4483 } 4484 4485 SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); 4486 4487 ahc_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap, 4488 sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb, 4489 &sg_map->sg_physaddr, /*flags*/0); 4490 4491 segs = sg_map->sg_vaddr; 4492 physaddr = sg_map->sg_physaddr; 4493 4494 newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg))); 4495 newcount = MIN(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs)); 4496 for (i = 0; i < newcount; i++) { 4497 struct scb_platform_data *pdata; 4498 #ifndef __linux__ 4499 int error; 4500 #endif 4501 pdata = (struct scb_platform_data *)malloc(sizeof(*pdata), 4502 M_DEVBUF, M_NOWAIT); 4503 if (pdata == NULL) 4504 break; 4505 next_scb->platform_data = pdata; 4506 next_scb->sg_map = sg_map; 4507 next_scb->sg_list = segs; 4508 /* 4509 * The sequencer always starts with the second entry. 4510 * The first entry is embedded in the scb. 4511 */ 4512 next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg); 4513 next_scb->ahc_softc = ahc; 4514 next_scb->flags = SCB_FREE; 4515 #ifndef __linux__ 4516 error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0, 4517 &next_scb->dmamap); 4518 if (error != 0) 4519 break; 4520 #endif 4521 next_scb->hscb = &scb_data->hscbs[scb_data->numscbs]; 4522 next_scb->hscb->tag = ahc->scb_data->numscbs; 4523 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, 4524 next_scb, links.sle); 4525 segs += AHC_NSEG; 4526 physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg)); 4527 next_scb++; 4528 ahc->scb_data->numscbs++; 4529 } 4530 } 4531 4532 void 4533 ahc_controller_info(struct ahc_softc *ahc, char *buf) 4534 { 4535 int len; 4536 4537 len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]); 4538 buf += len; 4539 if ((ahc->features & AHC_TWIN) != 0) 4540 len = sprintf(buf, "Twin Channel, A SCSI Id=%d, " 4541 "B SCSI Id=%d, primary %c, ", 4542 ahc->our_id, ahc->our_id_b, 4543 (ahc->flags & AHC_PRIMARY_CHANNEL) + 'A'); 4544 else { 4545 const char *speed; 4546 const char *type; 4547 4548 speed = ""; 4549 if ((ahc->features & AHC_ULTRA) != 0) { 4550 speed = "Ultra "; 4551 } else if ((ahc->features & AHC_DT) != 0) { 4552 speed = "Ultra160 "; 4553 } else if ((ahc->features & AHC_ULTRA2) != 0) { 4554 speed = "Ultra2 "; 4555 } 4556 if ((ahc->features & AHC_WIDE) != 0) { 4557 type = "Wide"; 4558 } else { 4559 type = "Single"; 4560 } 4561 len = sprintf(buf, "%s%s Channel %c, SCSI Id=%d, ", 4562 speed, type, ahc->channel, ahc->our_id); 4563 } 4564 buf += len; 4565 4566 if ((ahc->flags & AHC_PAGESCBS) != 0) 4567 sprintf(buf, "%d/%d SCBs", 4568 ahc->scb_data->maxhscbs, AHC_MAX_QUEUE); 4569 else 4570 sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs); 4571 } 4572 4573 int 4574 ahc_chip_init(struct ahc_softc *ahc) 4575 { 4576 int term; 4577 int error; 4578 u_int i; 4579 u_int scsi_conf; 4580 u_int scsiseq_template; 4581 uint32_t physaddr; 4582 4583 ahc_outb(ahc, SEQ_FLAGS, 0); 4584 ahc_outb(ahc, SEQ_FLAGS2, 0); 4585 4586 /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/ 4587 if (ahc->features & AHC_TWIN) { 4588 4589 /* 4590 * Setup Channel B first. 4591 */ 4592 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); 4593 term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0; 4594 ahc_outb(ahc, SCSIID, ahc->our_id_b); 4595 scsi_conf = ahc_inb(ahc, SCSICONF + 1); 4596 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 4597 |term|ahc->seltime_b|ENSTIMER|ACTNEGEN); 4598 if ((ahc->features & AHC_ULTRA2) != 0) 4599 ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); 4600 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 4601 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 4602 4603 /* Select Channel A */ 4604 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 4605 } 4606 term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0; 4607 if ((ahc->features & AHC_ULTRA2) != 0) 4608 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); 4609 else 4610 ahc_outb(ahc, SCSIID, ahc->our_id); 4611 scsi_conf = ahc_inb(ahc, SCSICONF); 4612 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 4613 |term|ahc->seltime 4614 |ENSTIMER|ACTNEGEN); 4615 if ((ahc->features & AHC_ULTRA2) != 0) 4616 ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); 4617 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 4618 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 4619 4620 /* There are no untagged SCBs active yet. */ 4621 for (i = 0; i < 16; i++) { 4622 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0)); 4623 if ((ahc->flags & AHC_SCB_BTT) != 0) { 4624 int lun; 4625 4626 /* 4627 * The SCB based BTT allows an entry per 4628 * target and lun pair. 4629 */ 4630 for (lun = 1; lun < AHC_NUM_LUNS; lun++) 4631 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun)); 4632 } 4633 } 4634 4635 /* All of our queues are empty */ 4636 for (i = 0; i < 256; i++) 4637 ahc->qoutfifo[i] = SCB_LIST_NULL; 4638 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_PREREAD); 4639 4640 for (i = 0; i < 256; i++) 4641 ahc->qinfifo[i] = SCB_LIST_NULL; 4642 4643 if ((ahc->features & AHC_MULTI_TID) != 0) { 4644 ahc_outb(ahc, TARGID, 0); 4645 ahc_outb(ahc, TARGID + 1, 0); 4646 } 4647 4648 /* 4649 * Tell the sequencer where it can find our arrays in memory. 4650 */ 4651 physaddr = ahc->scb_data->hscb_busaddr; 4652 ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF); 4653 ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF); 4654 ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF); 4655 ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF); 4656 4657 physaddr = ahc->shared_data_busaddr; 4658 ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF); 4659 ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF); 4660 ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF); 4661 ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF); 4662 4663 /* 4664 * Initialize the group code to command length table. 4665 * This overrides the values in TARG_SCSIRATE, so only 4666 * setup the table after we have processed that information. 4667 */ 4668 ahc_outb(ahc, CMDSIZE_TABLE, 5); 4669 ahc_outb(ahc, CMDSIZE_TABLE + 1, 9); 4670 ahc_outb(ahc, CMDSIZE_TABLE + 2, 9); 4671 ahc_outb(ahc, CMDSIZE_TABLE + 3, 0); 4672 ahc_outb(ahc, CMDSIZE_TABLE + 4, 15); 4673 ahc_outb(ahc, CMDSIZE_TABLE + 5, 11); 4674 ahc_outb(ahc, CMDSIZE_TABLE + 6, 0); 4675 ahc_outb(ahc, CMDSIZE_TABLE + 7, 0); 4676 4677 if ((ahc->features & AHC_HS_MAILBOX) != 0) 4678 ahc_outb(ahc, HS_MAILBOX, 0); 4679 4680 /* Tell the sequencer of our initial queue positions */ 4681 if ((ahc->features & AHC_TARGETMODE) != 0) { 4682 ahc->tqinfifonext = 1; 4683 ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1); 4684 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); 4685 } 4686 ahc->qinfifonext = 0; 4687 ahc->qoutfifonext = 0; 4688 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 4689 ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256); 4690 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 4691 ahc_outb(ahc, SNSCB_QOFF, ahc->qinfifonext); 4692 ahc_outb(ahc, SDSCB_QOFF, 0); 4693 } else { 4694 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 4695 ahc_outb(ahc, QINPOS, ahc->qinfifonext); 4696 ahc_outb(ahc, QOUTPOS, ahc->qoutfifonext); 4697 } 4698 4699 /* We don't have any waiting selections */ 4700 ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL); 4701 4702 /* Our disconnection list is empty too */ 4703 ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL); 4704 4705 /* Message out buffer starts empty */ 4706 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 4707 4708 /* 4709 * Setup the allowed SCSI Sequences based on operational mode. 4710 * If we are a target, we'll enalbe select in operations once 4711 * we've had a lun enabled. 4712 */ 4713 scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP; 4714 if ((ahc->flags & AHC_INITIATORROLE) != 0) 4715 scsiseq_template |= ENRSELI; 4716 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template); 4717 4718 /* Initialize our list of free SCBs. */ 4719 ahc_build_free_scb_list(ahc); 4720 4721 /* 4722 * Tell the sequencer which SCB will be the next one it receives. 4723 */ 4724 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); 4725 4726 /* 4727 * Load the Sequencer program and Enable the adapter 4728 * in "fast" mode. 4729 */ 4730 if (bootverbose) 4731 printf("%s: Downloading Sequencer Program...", 4732 ahc_name(ahc)); 4733 4734 error = ahc_loadseq(ahc); 4735 if (error != 0) 4736 return (error); 4737 4738 if ((ahc->features & AHC_ULTRA2) != 0) { 4739 int wait; 4740 4741 /* 4742 * Wait for up to 500ms for our transceivers 4743 * to settle. If the adapter does not have 4744 * a cable attached, the transceivers may 4745 * never settle, so don't complain if we 4746 * fail here. 4747 */ 4748 for (wait = 5000; 4749 (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; 4750 wait--) 4751 ahc_delay(100); 4752 } 4753 ahc_restart(ahc); 4754 return (0); 4755 } 4756 4757 /* 4758 * Start the board, ready for normal operation 4759 */ 4760 int 4761 ahc_init(struct ahc_softc *ahc) 4762 { 4763 int max_targ; 4764 u_int i; 4765 u_int scsi_conf; 4766 u_int ultraenb; 4767 u_int discenable; 4768 u_int tagenable; 4769 size_t driver_data_size; 4770 4771 #ifdef AHC_DEBUG 4772 if ((ahc_debug & AHC_DEBUG_SEQUENCER) != 0) 4773 ahc->flags |= AHC_SEQUENCER_DEBUG; 4774 #endif 4775 4776 #ifdef AHC_PRINT_SRAM 4777 printf("Scratch Ram:"); 4778 for (i = 0x20; i < 0x5f; i++) { 4779 if (((i % 8) == 0) && (i != 0)) { 4780 printf ("\n "); 4781 } 4782 printf (" 0x%x", ahc_inb(ahc, i)); 4783 } 4784 if ((ahc->features & AHC_MORE_SRAM) != 0) { 4785 for (i = 0x70; i < 0x7f; i++) { 4786 if (((i % 8) == 0) && (i != 0)) { 4787 printf ("\n "); 4788 } 4789 printf (" 0x%x", ahc_inb(ahc, i)); 4790 } 4791 } 4792 printf ("\n"); 4793 /* 4794 * Reading uninitialized scratch ram may 4795 * generate parity errors. 4796 */ 4797 ahc_outb(ahc, CLRINT, CLRPARERR); 4798 ahc_outb(ahc, CLRINT, CLRBRKADRINT); 4799 #endif 4800 max_targ = 15; 4801 4802 /* 4803 * Assume we have a board at this stage and it has been reset. 4804 */ 4805 if ((ahc->flags & AHC_USEDEFAULTS) != 0) 4806 ahc->our_id = ahc->our_id_b = 7; 4807 4808 /* 4809 * Default to allowing initiator operations. 4810 */ 4811 ahc->flags |= AHC_INITIATORROLE; 4812 4813 /* 4814 * Only allow target mode features if this unit has them enabled. 4815 */ 4816 if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0) 4817 ahc->features &= ~AHC_TARGETMODE; 4818 4819 #ifndef __linux__ 4820 /* DMA tag for mapping buffers into device visible space. */ 4821 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 4822 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4823 /*lowaddr*/ahc->flags & AHC_39BIT_ADDRESSING 4824 ? (bus_addr_t)0x7FFFFFFFFFULL 4825 : BUS_SPACE_MAXADDR_32BIT, 4826 /*highaddr*/BUS_SPACE_MAXADDR, 4827 /*filter*/NULL, /*filterarg*/NULL, 4828 /*maxsize*/(AHC_NSEG - 1) * PAGE_SIZE, 4829 /*nsegments*/AHC_NSEG, 4830 /*maxsegsz*/AHC_MAXTRANSFER_SIZE, 4831 /*flags*/BUS_DMA_ALLOCNOW, 4832 &ahc->buffer_dmat) != 0) { 4833 return (ENOMEM); 4834 } 4835 #endif 4836 4837 ahc->init_level++; 4838 4839 /* 4840 * DMA tag for our command fifos and other data in system memory 4841 * the card's sequencer must be able to access. For initiator 4842 * roles, we need to allocate space for the qinfifo and qoutfifo. 4843 * The qinfifo and qoutfifo are composed of 256 1 byte elements. 4844 * When providing for the target mode role, we must additionally 4845 * provide space for the incoming target command fifo and an extra 4846 * byte to deal with a dma bug in some chip versions. 4847 */ 4848 driver_data_size = 2 * 256 * sizeof(uint8_t); 4849 if ((ahc->features & AHC_TARGETMODE) != 0) 4850 driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd) 4851 + /*DMA WideOdd Bug Buffer*/1; 4852 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 4853 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4854 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 4855 /*highaddr*/BUS_SPACE_MAXADDR, 4856 /*filter*/NULL, /*filterarg*/NULL, 4857 driver_data_size, 4858 /*nsegments*/1, 4859 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 4860 /*flags*/0, &ahc->shared_data_dmat) != 0) { 4861 return (ENOMEM); 4862 } 4863 4864 ahc->init_level++; 4865 4866 /* Allocation of driver data */ 4867 if (ahc_dmamem_alloc(ahc, ahc->shared_data_dmat, 4868 (void **)&ahc->qoutfifo, 4869 BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) { 4870 return (ENOMEM); 4871 } 4872 4873 ahc->init_level++; 4874 4875 /* And permanently map it in */ 4876 ahc_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, 4877 ahc->qoutfifo, driver_data_size, ahc_dmamap_cb, 4878 &ahc->shared_data_busaddr, /*flags*/0); 4879 4880 if ((ahc->features & AHC_TARGETMODE) != 0) { 4881 ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo; 4882 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS]; 4883 ahc->dma_bug_buf = ahc->shared_data_busaddr 4884 + driver_data_size - 1; 4885 /* All target command blocks start out invalid. */ 4886 for (i = 0; i < AHC_TMODE_CMDS; i++) 4887 ahc->targetcmds[i].cmd_valid = 0; 4888 ahc_sync_tqinfifo(ahc, BUS_DMASYNC_PREREAD); 4889 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256]; 4890 } 4891 ahc->qinfifo = &ahc->qoutfifo[256]; 4892 4893 ahc->init_level++; 4894 4895 /* Allocate SCB data now that buffer_dmat is initialized */ 4896 if (ahc->scb_data->maxhscbs == 0) 4897 if (ahc_init_scbdata(ahc) != 0) 4898 return (ENOMEM); 4899 4900 /* 4901 * Allocate a tstate to house information for our 4902 * initiator presence on the bus as well as the user 4903 * data for any target mode initiator. 4904 */ 4905 if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) { 4906 printf("%s: unable to allocate ahc_tmode_tstate. " 4907 "Failing attach\n", ahc_name(ahc)); 4908 return (ENOMEM); 4909 } 4910 4911 if ((ahc->features & AHC_TWIN) != 0) { 4912 if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) { 4913 printf("%s: unable to allocate ahc_tmode_tstate. " 4914 "Failing attach\n", ahc_name(ahc)); 4915 return (ENOMEM); 4916 } 4917 } 4918 4919 if (ahc->scb_data->maxhscbs < AHC_SCB_MAX_ALLOC) { 4920 ahc->flags |= AHC_PAGESCBS; 4921 } else { 4922 ahc->flags &= ~AHC_PAGESCBS; 4923 } 4924 4925 #ifdef AHC_DEBUG 4926 if (ahc_debug & AHC_SHOW_MISC) { 4927 printf("%s: hardware scb %u bytes; kernel scb %u bytes; " 4928 "ahc_dma %u bytes\n", 4929 ahc_name(ahc), 4930 (u_int)sizeof(struct hardware_scb), 4931 (u_int)sizeof(struct scb), 4932 (u_int)sizeof(struct ahc_dma_seg)); 4933 } 4934 #endif /* AHC_DEBUG */ 4935 4936 /* 4937 * Look at the information that board initialization or 4938 * the board bios has left us. 4939 */ 4940 if (ahc->features & AHC_TWIN) { 4941 scsi_conf = ahc_inb(ahc, SCSICONF + 1); 4942 if ((scsi_conf & RESET_SCSI) != 0 4943 && (ahc->flags & AHC_INITIATORROLE) != 0) 4944 ahc->flags |= AHC_RESET_BUS_B; 4945 } 4946 4947 scsi_conf = ahc_inb(ahc, SCSICONF); 4948 if ((scsi_conf & RESET_SCSI) != 0 4949 && (ahc->flags & AHC_INITIATORROLE) != 0) 4950 ahc->flags |= AHC_RESET_BUS_A; 4951 4952 ultraenb = 0; 4953 tagenable = ALL_TARGETS_MASK; 4954 4955 /* Grab the disconnection disable table and invert it for our needs */ 4956 if ((ahc->flags & AHC_USEDEFAULTS) != 0) { 4957 printf("%s: Host Adapter Bios disabled. Using default SCSI " 4958 "device parameters\n", ahc_name(ahc)); 4959 ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B| 4960 AHC_TERM_ENB_A|AHC_TERM_ENB_B; 4961 discenable = ALL_TARGETS_MASK; 4962 if ((ahc->features & AHC_ULTRA) != 0) 4963 ultraenb = ALL_TARGETS_MASK; 4964 } else { 4965 discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8) 4966 | ahc_inb(ahc, DISC_DSB)); 4967 if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0) 4968 ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8) 4969 | ahc_inb(ahc, ULTRA_ENB); 4970 } 4971 4972 if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) 4973 max_targ = 7; 4974 4975 for (i = 0; i <= max_targ; i++) { 4976 struct ahc_initiator_tinfo *tinfo; 4977 struct ahc_tmode_tstate *tstate; 4978 u_int our_id; 4979 u_int target_id; 4980 char channel; 4981 4982 channel = 'A'; 4983 our_id = ahc->our_id; 4984 target_id = i; 4985 if (i > 7 && (ahc->features & AHC_TWIN) != 0) { 4986 channel = 'B'; 4987 our_id = ahc->our_id_b; 4988 target_id = i % 8; 4989 } 4990 tinfo = ahc_fetch_transinfo(ahc, channel, our_id, 4991 target_id, &tstate); 4992 /* Default to async narrow across the board */ 4993 memset(tinfo, 0, sizeof(*tinfo)); 4994 if (ahc->flags & AHC_USEDEFAULTS) { 4995 if ((ahc->features & AHC_WIDE) != 0) 4996 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 4997 4998 /* 4999 * These will be truncated when we determine the 5000 * connection type we have with the target. 5001 */ 5002 tinfo->user.period = ahc_syncrates->period; 5003 tinfo->user.offset = MAX_OFFSET; 5004 } else { 5005 u_int scsirate; 5006 uint16_t mask; 5007 5008 /* Take the settings leftover in scratch RAM. */ 5009 scsirate = ahc_inb(ahc, TARG_SCSIRATE + i); 5010 mask = (0x01 << i); 5011 if ((ahc->features & AHC_ULTRA2) != 0) { 5012 u_int offset; 5013 u_int maxsync; 5014 5015 if ((scsirate & SOFS) == 0x0F) { 5016 /* 5017 * Haven't negotiated yet, 5018 * so the format is different. 5019 */ 5020 scsirate = (scsirate & SXFR) >> 4 5021 | (ultraenb & mask) 5022 ? 0x08 : 0x0 5023 | (scsirate & WIDEXFER); 5024 offset = MAX_OFFSET_ULTRA2; 5025 } else 5026 offset = ahc_inb(ahc, TARG_OFFSET + i); 5027 if ((scsirate & ~WIDEXFER) == 0 && offset != 0) 5028 /* Set to the lowest sync rate, 5MHz */ 5029 scsirate |= 0x1c; 5030 maxsync = AHC_SYNCRATE_ULTRA2; 5031 if ((ahc->features & AHC_DT) != 0) 5032 maxsync = AHC_SYNCRATE_DT; 5033 tinfo->user.period = 5034 ahc_find_period(ahc, scsirate, maxsync); 5035 if (offset == 0) 5036 tinfo->user.period = 0; 5037 else 5038 tinfo->user.offset = MAX_OFFSET; 5039 if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/ 5040 && (ahc->features & AHC_DT) != 0) 5041 tinfo->user.ppr_options = 5042 MSG_EXT_PPR_DT_REQ; 5043 } else if ((scsirate & SOFS) != 0) { 5044 if ((scsirate & SXFR) == 0x40 5045 && (ultraenb & mask) != 0) { 5046 /* Treat 10MHz as a non-ultra speed */ 5047 scsirate &= ~SXFR; 5048 ultraenb &= ~mask; 5049 } 5050 tinfo->user.period = 5051 ahc_find_period(ahc, scsirate, 5052 (ultraenb & mask) 5053 ? AHC_SYNCRATE_ULTRA 5054 : AHC_SYNCRATE_FAST); 5055 if (tinfo->user.period != 0) 5056 tinfo->user.offset = MAX_OFFSET; 5057 } 5058 if (tinfo->user.period == 0) 5059 tinfo->user.offset = 0; 5060 if ((scsirate & WIDEXFER) != 0 5061 && (ahc->features & AHC_WIDE) != 0) 5062 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 5063 tinfo->user.protocol_version = 4; 5064 if ((ahc->features & AHC_DT) != 0) 5065 tinfo->user.transport_version = 3; 5066 else 5067 tinfo->user.transport_version = 2; 5068 tinfo->goal.protocol_version = 2; 5069 tinfo->goal.transport_version = 2; 5070 tinfo->curr.protocol_version = 2; 5071 tinfo->curr.transport_version = 2; 5072 } 5073 tstate->ultraenb = 0; 5074 } 5075 ahc->user_discenable = discenable; 5076 ahc->user_tagenable = tagenable; 5077 5078 return (ahc->bus_chip_init(ahc)); 5079 } 5080 5081 void 5082 ahc_intr_enable(struct ahc_softc *ahc, int enable) 5083 { 5084 u_int hcntrl; 5085 5086 hcntrl = ahc_inb(ahc, HCNTRL); 5087 hcntrl &= ~INTEN; 5088 ahc->pause &= ~INTEN; 5089 ahc->unpause &= ~INTEN; 5090 if (enable) { 5091 hcntrl |= INTEN; 5092 ahc->pause |= INTEN; 5093 ahc->unpause |= INTEN; 5094 } 5095 ahc_outb(ahc, HCNTRL, hcntrl); 5096 } 5097 5098 /* 5099 * Ensure that the card is paused in a location 5100 * outside of all critical sections and that all 5101 * pending work is completed prior to returning. 5102 * This routine should only be called from outside 5103 * an interrupt context. 5104 */ 5105 void 5106 ahc_pause_and_flushwork(struct ahc_softc *ahc) 5107 { 5108 int intstat; 5109 int maxloops; 5110 int paused; 5111 5112 maxloops = 1000; 5113 ahc->flags |= AHC_ALL_INTERRUPTS; 5114 paused = FALSE; 5115 do { 5116 if (paused) 5117 ahc_unpause(ahc); 5118 ahc_intr(ahc); 5119 ahc_pause(ahc); 5120 paused = TRUE; 5121 ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO); 5122 ahc_clear_critical_section(ahc); 5123 intstat = ahc_inb(ahc, INTSTAT); 5124 } while (--maxloops 5125 && (intstat != 0xFF || (ahc->features & AHC_REMOVABLE) == 0) 5126 && ((intstat & INT_PEND) != 0 5127 || (ahc_inb(ahc, SSTAT0) & (SELDO|SELINGO)) != 0)); 5128 if (maxloops == 0) { 5129 printf("Infinite interrupt loop, INTSTAT = %x", 5130 ahc_inb(ahc, INTSTAT)); 5131 } 5132 ahc_platform_flushwork(ahc); 5133 ahc->flags &= ~AHC_ALL_INTERRUPTS; 5134 } 5135 5136 int 5137 ahc_suspend(struct ahc_softc *ahc) 5138 { 5139 5140 ahc_pause_and_flushwork(ahc); 5141 5142 if (LIST_FIRST(&ahc->pending_scbs) != NULL) { 5143 ahc_unpause(ahc); 5144 return (EBUSY); 5145 } 5146 5147 #ifdef AHC_TARGET_MODE 5148 /* 5149 * XXX What about ATIOs that have not yet been serviced? 5150 * Perhaps we should just refuse to be suspended if we 5151 * are acting in a target role. 5152 */ 5153 if (ahc->pending_device != NULL) { 5154 ahc_unpause(ahc); 5155 return (EBUSY); 5156 } 5157 #endif 5158 ahc_shutdown(ahc); 5159 return (0); 5160 } 5161 5162 int 5163 ahc_resume(struct ahc_softc *ahc) 5164 { 5165 5166 ahc_reset(ahc, /*reinit*/TRUE); 5167 ahc_intr_enable(ahc, TRUE); 5168 ahc_restart(ahc); 5169 return (0); 5170 } 5171 5172 /************************** Busy Target Table *********************************/ 5173 /* 5174 * Return the untagged transaction id for a given target/channel lun. 5175 * Optionally, clear the entry. 5176 */ 5177 u_int 5178 ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl) 5179 { 5180 u_int scbid; 5181 u_int target_offset; 5182 5183 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5184 u_int saved_scbptr; 5185 5186 saved_scbptr = ahc_inb(ahc, SCBPTR); 5187 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 5188 scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl)); 5189 ahc_outb(ahc, SCBPTR, saved_scbptr); 5190 } else { 5191 target_offset = TCL_TARGET_OFFSET(tcl); 5192 scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset); 5193 } 5194 5195 return (scbid); 5196 } 5197 5198 void 5199 ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl) 5200 { 5201 u_int target_offset; 5202 5203 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5204 u_int saved_scbptr; 5205 5206 saved_scbptr = ahc_inb(ahc, SCBPTR); 5207 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 5208 ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL); 5209 ahc_outb(ahc, SCBPTR, saved_scbptr); 5210 } else { 5211 target_offset = TCL_TARGET_OFFSET(tcl); 5212 ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL); 5213 } 5214 } 5215 5216 void 5217 ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid) 5218 { 5219 u_int target_offset; 5220 5221 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5222 u_int saved_scbptr; 5223 5224 saved_scbptr = ahc_inb(ahc, SCBPTR); 5225 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 5226 ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid); 5227 ahc_outb(ahc, SCBPTR, saved_scbptr); 5228 } else { 5229 target_offset = TCL_TARGET_OFFSET(tcl); 5230 ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid); 5231 } 5232 } 5233 5234 /************************** SCB and SCB queue management **********************/ 5235 int 5236 ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target, 5237 char channel, int lun, u_int tag, role_t role) 5238 { 5239 int targ = SCB_GET_TARGET(ahc, scb); 5240 char chan = SCB_GET_CHANNEL(ahc, scb); 5241 int slun = SCB_GET_LUN(scb); 5242 int match; 5243 5244 match = ((chan == channel) || (channel == ALL_CHANNELS)); 5245 if (match != 0) 5246 match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); 5247 if (match != 0) 5248 match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); 5249 if (match != 0) { 5250 #ifdef AHC_TARGET_MODE 5251 int group; 5252 5253 group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); 5254 if (role == ROLE_INITIATOR) { 5255 match = (group != XPT_FC_GROUP_TMODE) 5256 && ((tag == scb->hscb->tag) 5257 || (tag == SCB_LIST_NULL)); 5258 } else if (role == ROLE_TARGET) { 5259 match = (group == XPT_FC_GROUP_TMODE) 5260 && ((tag == scb->io_ctx->csio.tag_id) 5261 || (tag == SCB_LIST_NULL)); 5262 } 5263 #else /* !AHC_TARGET_MODE */ 5264 match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL)); 5265 #endif /* AHC_TARGET_MODE */ 5266 } 5267 5268 return match; 5269 } 5270 5271 void 5272 ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb) 5273 { 5274 int target; 5275 char channel; 5276 int lun; 5277 5278 target = SCB_GET_TARGET(ahc, scb); 5279 lun = SCB_GET_LUN(scb); 5280 channel = SCB_GET_CHANNEL(ahc, scb); 5281 5282 ahc_search_qinfifo(ahc, target, channel, lun, 5283 /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, 5284 CAM_REQUEUE_REQ, SEARCH_COMPLETE); 5285 5286 ahc_platform_freeze_devq(ahc, scb); 5287 } 5288 5289 void 5290 ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb) 5291 { 5292 struct scb *prev_scb; 5293 5294 prev_scb = NULL; 5295 if (ahc_qinfifo_count(ahc) != 0) { 5296 u_int prev_tag; 5297 uint8_t prev_pos; 5298 5299 prev_pos = ahc->qinfifonext - 1; 5300 prev_tag = ahc->qinfifo[prev_pos]; 5301 prev_scb = ahc_lookup_scb(ahc, prev_tag); 5302 } 5303 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5304 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5305 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 5306 } else { 5307 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 5308 } 5309 } 5310 5311 static void 5312 ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb, 5313 struct scb *scb) 5314 { 5315 if (prev_scb == NULL) { 5316 ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); 5317 } else { 5318 prev_scb->hscb->next = scb->hscb->tag; 5319 ahc_sync_scb(ahc, prev_scb, 5320 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5321 } 5322 ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; 5323 scb->hscb->next = ahc->next_queued_scb->hscb->tag; 5324 ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5325 } 5326 5327 static int 5328 ahc_qinfifo_count(struct ahc_softc *ahc) 5329 { 5330 uint8_t qinpos; 5331 uint8_t diff; 5332 5333 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5334 qinpos = ahc_inb(ahc, SNSCB_QOFF); 5335 ahc_outb(ahc, SNSCB_QOFF, qinpos); 5336 } else 5337 qinpos = ahc_inb(ahc, QINPOS); 5338 diff = ahc->qinfifonext - qinpos; 5339 return (diff); 5340 } 5341 5342 int 5343 ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel, 5344 int lun, u_int tag, role_t role, uint32_t status, 5345 ahc_search_action action) 5346 { 5347 struct scb *scb; 5348 struct scb *prev_scb; 5349 uint8_t qinstart; 5350 uint8_t qinpos; 5351 uint8_t qintail; 5352 uint8_t next; 5353 uint8_t prev; 5354 uint8_t curscbptr; 5355 int found; 5356 int have_qregs; 5357 5358 qintail = ahc->qinfifonext; 5359 have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0; 5360 if (have_qregs) { 5361 qinstart = ahc_inb(ahc, SNSCB_QOFF); 5362 ahc_outb(ahc, SNSCB_QOFF, qinstart); 5363 } else 5364 qinstart = ahc_inb(ahc, QINPOS); 5365 qinpos = qinstart; 5366 found = 0; 5367 prev_scb = NULL; 5368 5369 if (action == SEARCH_COMPLETE) { 5370 /* 5371 * Don't attempt to run any queued untagged transactions 5372 * until we are done with the abort process. 5373 */ 5374 ahc_freeze_untagged_queues(ahc); 5375 } 5376 5377 /* 5378 * Start with an empty queue. Entries that are not chosen 5379 * for removal will be re-added to the queue as we go. 5380 */ 5381 ahc->qinfifonext = qinpos; 5382 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); 5383 5384 while (qinpos != qintail) { 5385 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]); 5386 if (scb == NULL) { 5387 printf("qinpos = %d, SCB index = %d\n", 5388 qinpos, ahc->qinfifo[qinpos]); 5389 panic("Loop 1\n"); 5390 } 5391 5392 if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) { 5393 /* 5394 * We found an scb that needs to be acted on. 5395 */ 5396 found++; 5397 switch (action) { 5398 case SEARCH_COMPLETE: 5399 { 5400 cam_status ostat; 5401 cam_status cstat; 5402 5403 ostat = ahc_get_transaction_status(scb); 5404 if (ostat == CAM_REQ_INPROG) 5405 ahc_set_transaction_status(scb, status); 5406 cstat = ahc_get_transaction_status(scb); 5407 if (cstat != CAM_REQ_CMP) 5408 ahc_freeze_scb(scb); 5409 if ((scb->flags & SCB_ACTIVE) == 0) 5410 printf("Inactive SCB in qinfifo\n"); 5411 ahc_done(ahc, scb); 5412 5413 /* FALLTHROUGH */ 5414 } 5415 case SEARCH_REMOVE: 5416 break; 5417 case SEARCH_COUNT: 5418 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5419 prev_scb = scb; 5420 break; 5421 } 5422 } else { 5423 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5424 prev_scb = scb; 5425 } 5426 qinpos++; 5427 } 5428 5429 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5430 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 5431 } else { 5432 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 5433 } 5434 5435 if (action != SEARCH_COUNT 5436 && (found != 0) 5437 && (qinstart != ahc->qinfifonext)) { 5438 /* 5439 * The sequencer may be in the process of dmaing 5440 * down the SCB at the beginning of the queue. 5441 * This could be problematic if either the first, 5442 * or the second SCB is removed from the queue 5443 * (the first SCB includes a pointer to the "next" 5444 * SCB to dma). If we have removed any entries, swap 5445 * the first element in the queue with the next HSCB 5446 * so the sequencer will notice that NEXT_QUEUED_SCB 5447 * has changed during its dma attempt and will retry 5448 * the DMA. 5449 */ 5450 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]); 5451 5452 if (scb == NULL) { 5453 printf("found = %d, qinstart = %d, qinfifionext = %d\n", 5454 found, qinstart, ahc->qinfifonext); 5455 panic("First/Second Qinfifo fixup\n"); 5456 } 5457 /* 5458 * ahc_swap_with_next_hscb forces our next pointer to 5459 * point to the reserved SCB for future commands. Save 5460 * and restore our original next pointer to maintain 5461 * queue integrity. 5462 */ 5463 next = scb->hscb->next; 5464 ahc->scb_data->scbindex[scb->hscb->tag] = NULL; 5465 ahc_swap_with_next_hscb(ahc, scb); 5466 scb->hscb->next = next; 5467 ahc->qinfifo[qinstart] = scb->hscb->tag; 5468 5469 /* Tell the card about the new head of the qinfifo. */ 5470 ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); 5471 5472 /* Fixup the tail "next" pointer. */ 5473 qintail = ahc->qinfifonext - 1; 5474 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]); 5475 scb->hscb->next = ahc->next_queued_scb->hscb->tag; 5476 } 5477 5478 /* 5479 * Search waiting for selection list. 5480 */ 5481 curscbptr = ahc_inb(ahc, SCBPTR); 5482 next = ahc_inb(ahc, WAITING_SCBH); /* Start at head of list. */ 5483 prev = SCB_LIST_NULL; 5484 5485 while (next != SCB_LIST_NULL) { 5486 uint8_t scb_index; 5487 5488 ahc_outb(ahc, SCBPTR, next); 5489 scb_index = ahc_inb(ahc, SCB_TAG); 5490 if (scb_index >= ahc->scb_data->numscbs) { 5491 printf("Waiting List inconsistency. " 5492 "SCB index == %d, yet numscbs == %d.", 5493 scb_index, ahc->scb_data->numscbs); 5494 ahc_dump_card_state(ahc); 5495 panic("for safety"); 5496 } 5497 scb = ahc_lookup_scb(ahc, scb_index); 5498 if (scb == NULL) { 5499 printf("scb_index = %d, next = %d\n", 5500 scb_index, next); 5501 panic("Waiting List traversal\n"); 5502 } 5503 if (ahc_match_scb(ahc, scb, target, channel, 5504 lun, SCB_LIST_NULL, role)) { 5505 /* 5506 * We found an scb that needs to be acted on. 5507 */ 5508 found++; 5509 switch (action) { 5510 case SEARCH_COMPLETE: 5511 { 5512 cam_status ostat; 5513 cam_status cstat; 5514 5515 ostat = ahc_get_transaction_status(scb); 5516 if (ostat == CAM_REQ_INPROG) 5517 ahc_set_transaction_status(scb, 5518 status); 5519 cstat = ahc_get_transaction_status(scb); 5520 if (cstat != CAM_REQ_CMP) 5521 ahc_freeze_scb(scb); 5522 if ((scb->flags & SCB_ACTIVE) == 0) 5523 printf("Inactive SCB in Waiting List\n"); 5524 ahc_done(ahc, scb); 5525 /* FALLTHROUGH */ 5526 } 5527 case SEARCH_REMOVE: 5528 next = ahc_rem_wscb(ahc, next, prev); 5529 break; 5530 case SEARCH_COUNT: 5531 prev = next; 5532 next = ahc_inb(ahc, SCB_NEXT); 5533 break; 5534 } 5535 } else { 5536 5537 prev = next; 5538 next = ahc_inb(ahc, SCB_NEXT); 5539 } 5540 } 5541 ahc_outb(ahc, SCBPTR, curscbptr); 5542 5543 found += ahc_search_untagged_queues(ahc, /*ahc_io_ctx_t*/NULL, target, 5544 channel, lun, status, action); 5545 5546 if (action == SEARCH_COMPLETE) 5547 ahc_release_untagged_queues(ahc); 5548 return (found); 5549 } 5550 5551 int 5552 ahc_search_untagged_queues(struct ahc_softc *ahc, ahc_io_ctx_t ctx, 5553 int target, char channel, int lun, uint32_t status, 5554 ahc_search_action action) 5555 { 5556 struct scb *scb; 5557 int maxtarget; 5558 int found; 5559 int i; 5560 5561 if (action == SEARCH_COMPLETE) { 5562 /* 5563 * Don't attempt to run any queued untagged transactions 5564 * until we are done with the abort process. 5565 */ 5566 ahc_freeze_untagged_queues(ahc); 5567 } 5568 5569 found = 0; 5570 i = 0; 5571 if ((ahc->flags & AHC_SCB_BTT) == 0) { 5572 5573 maxtarget = 16; 5574 if (target != CAM_TARGET_WILDCARD) { 5575 5576 i = target; 5577 if (channel == 'B') 5578 i += 8; 5579 maxtarget = i + 1; 5580 } 5581 } else { 5582 maxtarget = 0; 5583 } 5584 5585 for (; i < maxtarget; i++) { 5586 struct scb_tailq *untagged_q; 5587 struct scb *next_scb; 5588 5589 untagged_q = &(ahc->untagged_queues[i]); 5590 next_scb = TAILQ_FIRST(untagged_q); 5591 while (next_scb != NULL) { 5592 5593 scb = next_scb; 5594 next_scb = TAILQ_NEXT(scb, links.tqe); 5595 5596 /* 5597 * The head of the list may be the currently 5598 * active untagged command for a device. 5599 * We're only searching for commands that 5600 * have not been started. A transaction 5601 * marked active but still in the qinfifo 5602 * is removed by the qinfifo scanning code 5603 * above. 5604 */ 5605 if ((scb->flags & SCB_ACTIVE) != 0) 5606 continue; 5607 5608 if (ahc_match_scb(ahc, scb, target, channel, lun, 5609 SCB_LIST_NULL, ROLE_INITIATOR) == 0 5610 || (ctx != NULL && ctx != scb->io_ctx)) 5611 continue; 5612 5613 /* 5614 * We found an scb that needs to be acted on. 5615 */ 5616 found++; 5617 switch (action) { 5618 case SEARCH_COMPLETE: 5619 { 5620 cam_status ostat; 5621 cam_status cstat; 5622 5623 ostat = ahc_get_transaction_status(scb); 5624 if (ostat == CAM_REQ_INPROG) 5625 ahc_set_transaction_status(scb, status); 5626 cstat = ahc_get_transaction_status(scb); 5627 if (cstat != CAM_REQ_CMP) 5628 ahc_freeze_scb(scb); 5629 if ((scb->flags & SCB_ACTIVE) == 0) 5630 printf("Inactive SCB in untaggedQ\n"); 5631 ahc_done(ahc, scb); 5632 break; 5633 } 5634 case SEARCH_REMOVE: 5635 scb->flags &= ~SCB_UNTAGGEDQ; 5636 TAILQ_REMOVE(untagged_q, scb, links.tqe); 5637 break; 5638 case SEARCH_COUNT: 5639 break; 5640 } 5641 } 5642 } 5643 5644 if (action == SEARCH_COMPLETE) 5645 ahc_release_untagged_queues(ahc); 5646 return (found); 5647 } 5648 5649 int 5650 ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel, 5651 int lun, u_int tag, int stop_on_first, int remove, 5652 int save_state) 5653 { 5654 struct scb *scbp; 5655 u_int next; 5656 u_int prev; 5657 u_int count; 5658 u_int active_scb; 5659 5660 count = 0; 5661 next = ahc_inb(ahc, DISCONNECTED_SCBH); 5662 prev = SCB_LIST_NULL; 5663 5664 if (save_state) { 5665 /* restore this when we're done */ 5666 active_scb = ahc_inb(ahc, SCBPTR); 5667 } else 5668 /* Silence compiler */ 5669 active_scb = SCB_LIST_NULL; 5670 5671 while (next != SCB_LIST_NULL) { 5672 u_int scb_index; 5673 5674 ahc_outb(ahc, SCBPTR, next); 5675 scb_index = ahc_inb(ahc, SCB_TAG); 5676 if (scb_index >= ahc->scb_data->numscbs) { 5677 printf("Disconnected List inconsistency. " 5678 "SCB index == %d, yet numscbs == %d.", 5679 scb_index, ahc->scb_data->numscbs); 5680 ahc_dump_card_state(ahc); 5681 panic("for safety"); 5682 } 5683 5684 if (next == prev) { 5685 panic("Disconnected List Loop. " 5686 "cur SCBPTR == %x, prev SCBPTR == %x.", 5687 next, prev); 5688 } 5689 scbp = ahc_lookup_scb(ahc, scb_index); 5690 if (ahc_match_scb(ahc, scbp, target, channel, lun, 5691 tag, ROLE_INITIATOR)) { 5692 count++; 5693 if (remove) { 5694 next = 5695 ahc_rem_scb_from_disc_list(ahc, prev, next); 5696 } else { 5697 prev = next; 5698 next = ahc_inb(ahc, SCB_NEXT); 5699 } 5700 if (stop_on_first) 5701 break; 5702 } else { 5703 prev = next; 5704 next = ahc_inb(ahc, SCB_NEXT); 5705 } 5706 } 5707 if (save_state) 5708 ahc_outb(ahc, SCBPTR, active_scb); 5709 return (count); 5710 } 5711 5712 /* 5713 * Remove an SCB from the on chip list of disconnected transactions. 5714 * This is empty/unused if we are not performing SCB paging. 5715 */ 5716 static u_int 5717 ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr) 5718 { 5719 u_int next; 5720 5721 ahc_outb(ahc, SCBPTR, scbptr); 5722 next = ahc_inb(ahc, SCB_NEXT); 5723 5724 ahc_outb(ahc, SCB_CONTROL, 0); 5725 5726 ahc_add_curscb_to_free_list(ahc); 5727 5728 if (prev != SCB_LIST_NULL) { 5729 ahc_outb(ahc, SCBPTR, prev); 5730 ahc_outb(ahc, SCB_NEXT, next); 5731 } else 5732 ahc_outb(ahc, DISCONNECTED_SCBH, next); 5733 5734 return (next); 5735 } 5736 5737 /* 5738 * Add the SCB as selected by SCBPTR onto the on chip list of 5739 * free hardware SCBs. This list is empty/unused if we are not 5740 * performing SCB paging. 5741 */ 5742 static void 5743 ahc_add_curscb_to_free_list(struct ahc_softc *ahc) 5744 { 5745 /* 5746 * Invalidate the tag so that our abort 5747 * routines don't think it's active. 5748 */ 5749 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 5750 5751 if ((ahc->flags & AHC_PAGESCBS) != 0) { 5752 ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH)); 5753 ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR)); 5754 } 5755 } 5756 5757 /* 5758 * Manipulate the waiting for selection list and return the 5759 * scb that follows the one that we remove. 5760 */ 5761 static u_int 5762 ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev) 5763 { 5764 u_int curscb, next; 5765 5766 /* 5767 * Select the SCB we want to abort and 5768 * pull the next pointer out of it. 5769 */ 5770 curscb = ahc_inb(ahc, SCBPTR); 5771 ahc_outb(ahc, SCBPTR, scbpos); 5772 next = ahc_inb(ahc, SCB_NEXT); 5773 5774 /* Clear the necessary fields */ 5775 ahc_outb(ahc, SCB_CONTROL, 0); 5776 5777 ahc_add_curscb_to_free_list(ahc); 5778 5779 /* update the waiting list */ 5780 if (prev == SCB_LIST_NULL) { 5781 /* First in the list */ 5782 ahc_outb(ahc, WAITING_SCBH, next); 5783 5784 /* 5785 * Ensure we aren't attempting to perform 5786 * selection for this entry. 5787 */ 5788 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 5789 } else { 5790 /* 5791 * Select the scb that pointed to us 5792 * and update its next pointer. 5793 */ 5794 ahc_outb(ahc, SCBPTR, prev); 5795 ahc_outb(ahc, SCB_NEXT, next); 5796 } 5797 5798 /* 5799 * Point us back at the original scb position. 5800 */ 5801 ahc_outb(ahc, SCBPTR, curscb); 5802 return next; 5803 } 5804 5805 /******************************** Error Handling ******************************/ 5806 /* 5807 * Abort all SCBs that match the given description (target/channel/lun/tag), 5808 * setting their status to the passed in status if the status has not already 5809 * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer 5810 * is paused before it is called. 5811 */ 5812 int 5813 ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, 5814 int lun, u_int tag, role_t role, uint32_t status) 5815 { 5816 struct scb *scbp; 5817 struct scb *scbp_next; 5818 u_int active_scb; 5819 int i, j; 5820 int maxtarget; 5821 int minlun; 5822 int maxlun; 5823 5824 int found; 5825 5826 /* 5827 * Don't attempt to run any queued untagged transactions 5828 * until we are done with the abort process. 5829 */ 5830 ahc_freeze_untagged_queues(ahc); 5831 5832 /* restore this when we're done */ 5833 active_scb = ahc_inb(ahc, SCBPTR); 5834 5835 found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL, 5836 role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); 5837 5838 /* 5839 * Clean out the busy target table for any untagged commands. 5840 */ 5841 i = 0; 5842 maxtarget = 16; 5843 if (target != CAM_TARGET_WILDCARD) { 5844 i = target; 5845 if (channel == 'B') 5846 i += 8; 5847 maxtarget = i + 1; 5848 } 5849 5850 if (lun == CAM_LUN_WILDCARD) { 5851 5852 /* 5853 * Unless we are using an SCB based 5854 * busy targets table, there is only 5855 * one table entry for all luns of 5856 * a target. 5857 */ 5858 minlun = 0; 5859 maxlun = 1; 5860 if ((ahc->flags & AHC_SCB_BTT) != 0) 5861 maxlun = AHC_NUM_LUNS; 5862 } else { 5863 minlun = lun; 5864 maxlun = lun + 1; 5865 } 5866 5867 if (role != ROLE_TARGET) { 5868 for (;i < maxtarget; i++) { 5869 for (j = minlun;j < maxlun; j++) { 5870 u_int scbid; 5871 u_int tcl; 5872 5873 tcl = BUILD_TCL(i << 4, j); 5874 scbid = ahc_index_busy_tcl(ahc, tcl); 5875 scbp = ahc_lookup_scb(ahc, scbid); 5876 if (scbp == NULL 5877 || ahc_match_scb(ahc, scbp, target, channel, 5878 lun, tag, role) == 0) 5879 continue; 5880 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j)); 5881 } 5882 } 5883 5884 /* 5885 * Go through the disconnected list and remove any entries we 5886 * have queued for completion, 0'ing their control byte too. 5887 * We save the active SCB and restore it ourselves, so there 5888 * is no reason for this search to restore it too. 5889 */ 5890 ahc_search_disc_list(ahc, target, channel, lun, tag, 5891 /*stop_on_first*/FALSE, /*remove*/TRUE, 5892 /*save_state*/FALSE); 5893 } 5894 5895 /* 5896 * Go through the hardware SCB array looking for commands that 5897 * were active but not on any list. In some cases, these remnants 5898 * might not still have mappings in the scbindex array (e.g. unexpected 5899 * bus free with the same scb queued for an abort). Don't hold this 5900 * against them. 5901 */ 5902 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 5903 u_int scbid; 5904 5905 ahc_outb(ahc, SCBPTR, i); 5906 scbid = ahc_inb(ahc, SCB_TAG); 5907 scbp = ahc_lookup_scb(ahc, scbid); 5908 if ((scbp == NULL && scbid != SCB_LIST_NULL) 5909 || (scbp != NULL 5910 && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role))) 5911 ahc_add_curscb_to_free_list(ahc); 5912 } 5913 5914 /* 5915 * Go through the pending CCB list and look for 5916 * commands for this target that are still active. 5917 * These are other tagged commands that were 5918 * disconnected when the reset occurred. 5919 */ 5920 scbp_next = LIST_FIRST(&ahc->pending_scbs); 5921 while (scbp_next != NULL) { 5922 scbp = scbp_next; 5923 scbp_next = LIST_NEXT(scbp, pending_links); 5924 if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) { 5925 cam_status ostat; 5926 5927 ostat = ahc_get_transaction_status(scbp); 5928 if (ostat == CAM_REQ_INPROG) 5929 ahc_set_transaction_status(scbp, status); 5930 if (ahc_get_transaction_status(scbp) != CAM_REQ_CMP) 5931 ahc_freeze_scb(scbp); 5932 if ((scbp->flags & SCB_ACTIVE) == 0) 5933 printf("Inactive SCB on pending list\n"); 5934 ahc_done(ahc, scbp); 5935 found++; 5936 } 5937 } 5938 ahc_outb(ahc, SCBPTR, active_scb); 5939 ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status); 5940 ahc_release_untagged_queues(ahc); 5941 return found; 5942 } 5943 5944 static void 5945 ahc_reset_current_bus(struct ahc_softc *ahc) 5946 { 5947 uint8_t scsiseq; 5948 5949 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST); 5950 scsiseq = ahc_inb(ahc, SCSISEQ); 5951 ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO); 5952 ahc_flush_device_writes(ahc); 5953 ahc_delay(AHC_BUSRESET_DELAY); 5954 /* Turn off the bus reset */ 5955 ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO); 5956 5957 ahc_clear_intstat(ahc); 5958 5959 /* Re-enable reset interrupts */ 5960 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST); 5961 } 5962 5963 int 5964 ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset) 5965 { 5966 struct ahc_devinfo devinfo; 5967 u_int initiator, target, max_scsiid; 5968 u_int sblkctl; 5969 u_int scsiseq; 5970 u_int simode1; 5971 int found; 5972 int restart_needed; 5973 char cur_channel; 5974 5975 ahc->pending_device = NULL; 5976 5977 ahc_compile_devinfo(&devinfo, 5978 CAM_TARGET_WILDCARD, 5979 CAM_TARGET_WILDCARD, 5980 CAM_LUN_WILDCARD, 5981 channel, ROLE_UNKNOWN); 5982 ahc_pause(ahc); 5983 5984 /* Make sure the sequencer is in a safe location. */ 5985 ahc_clear_critical_section(ahc); 5986 5987 /* 5988 * Run our command complete fifos to ensure that we perform 5989 * completion processing on any commands that 'completed' 5990 * before the reset occurred. 5991 */ 5992 ahc_run_qoutfifo(ahc); 5993 #ifdef AHC_TARGET_MODE 5994 /* 5995 * XXX - In Twin mode, the tqinfifo may have commands 5996 * for an unaffected channel in it. However, if 5997 * we have run out of ATIO resources to drain that 5998 * queue, we may not get them all out here. Further, 5999 * the blocked transactions for the reset channel 6000 * should just be killed off, irrespecitve of whether 6001 * we are blocked on ATIO resources. Write a routine 6002 * to compact the tqinfifo appropriately. 6003 */ 6004 if ((ahc->flags & AHC_TARGETROLE) != 0) { 6005 ahc_run_tqinfifo(ahc, /*paused*/TRUE); 6006 } 6007 #endif 6008 6009 /* 6010 * Reset the bus if we are initiating this reset 6011 */ 6012 sblkctl = ahc_inb(ahc, SBLKCTL); 6013 cur_channel = 'A'; 6014 if ((ahc->features & AHC_TWIN) != 0 6015 && ((sblkctl & SELBUSB) != 0)) 6016 cur_channel = 'B'; 6017 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 6018 if (cur_channel != channel) { 6019 /* Case 1: Command for another bus is active 6020 * Stealthily reset the other bus without 6021 * upsetting the current bus. 6022 */ 6023 ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB); 6024 simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); 6025 #ifdef AHC_TARGET_MODE 6026 /* 6027 * Bus resets clear ENSELI, so we cannot 6028 * defer re-enabling bus reset interrupts 6029 * if we are in target mode. 6030 */ 6031 if ((ahc->flags & AHC_TARGETROLE) != 0) 6032 simode1 |= ENSCSIRST; 6033 #endif 6034 ahc_outb(ahc, SIMODE1, simode1); 6035 if (initiate_reset) 6036 ahc_reset_current_bus(ahc); 6037 ahc_clear_intstat(ahc); 6038 ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); 6039 ahc_outb(ahc, SBLKCTL, sblkctl); 6040 restart_needed = FALSE; 6041 } else { 6042 /* Case 2: A command from this bus is active or we're idle */ 6043 simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); 6044 #ifdef AHC_TARGET_MODE 6045 /* 6046 * Bus resets clear ENSELI, so we cannot 6047 * defer re-enabling bus reset interrupts 6048 * if we are in target mode. 6049 */ 6050 if ((ahc->flags & AHC_TARGETROLE) != 0) 6051 simode1 |= ENSCSIRST; 6052 #endif 6053 ahc_outb(ahc, SIMODE1, simode1); 6054 if (initiate_reset) 6055 ahc_reset_current_bus(ahc); 6056 ahc_clear_intstat(ahc); 6057 ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); 6058 restart_needed = TRUE; 6059 } 6060 6061 /* 6062 * Clean up all the state information for the 6063 * pending transactions on this bus. 6064 */ 6065 found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel, 6066 CAM_LUN_WILDCARD, SCB_LIST_NULL, 6067 ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); 6068 6069 max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7; 6070 6071 #ifdef AHC_TARGET_MODE 6072 /* 6073 * Send an immediate notify ccb to all target more peripheral 6074 * drivers affected by this action. 6075 */ 6076 for (target = 0; target <= max_scsiid; target++) { 6077 struct ahc_tmode_tstate* tstate; 6078 u_int lun; 6079 6080 tstate = ahc->enabled_targets[target]; 6081 if (tstate == NULL) 6082 continue; 6083 for (lun = 0; lun < AHC_NUM_LUNS; lun++) { 6084 struct ahc_tmode_lstate* lstate; 6085 6086 lstate = tstate->enabled_luns[lun]; 6087 if (lstate == NULL) 6088 continue; 6089 6090 ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD, 6091 EVENT_TYPE_BUS_RESET, /*arg*/0); 6092 ahc_send_lstate_events(ahc, lstate); 6093 } 6094 } 6095 #endif 6096 /* Notify the XPT that a bus reset occurred */ 6097 ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD, 6098 CAM_LUN_WILDCARD, AC_BUS_RESET, NULL); 6099 6100 /* 6101 * Revert to async/narrow transfers until we renegotiate. 6102 */ 6103 for (target = 0; target <= max_scsiid; target++) { 6104 6105 if (ahc->enabled_targets[target] == NULL) 6106 continue; 6107 for (initiator = 0; initiator <= max_scsiid; initiator++) { 6108 struct ahc_devinfo devinfo; 6109 6110 ahc_compile_devinfo(&devinfo, target, initiator, 6111 CAM_LUN_WILDCARD, 6112 channel, ROLE_UNKNOWN); 6113 ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 6114 AHC_TRANS_CUR, /*paused*/TRUE); 6115 ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, 6116 /*period*/0, /*offset*/0, 6117 /*ppr_options*/0, AHC_TRANS_CUR, 6118 /*paused*/TRUE); 6119 } 6120 } 6121 6122 if (restart_needed) 6123 ahc_restart(ahc); 6124 else 6125 ahc_unpause(ahc); 6126 return found; 6127 } 6128 6129 6130 /***************************** Residual Processing ****************************/ 6131 /* 6132 * Calculate the residual for a just completed SCB. 6133 */ 6134 void 6135 ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb) 6136 { 6137 struct hardware_scb *hscb; 6138 struct status_pkt *spkt; 6139 uint32_t sgptr; 6140 uint32_t resid_sgptr; 6141 uint32_t resid; 6142 6143 /* 6144 * 5 cases. 6145 * 1) No residual. 6146 * SG_RESID_VALID clear in sgptr. 6147 * 2) Transferless command 6148 * 3) Never performed any transfers. 6149 * sgptr has SG_FULL_RESID set. 6150 * 4) No residual but target did not 6151 * save data pointers after the 6152 * last transfer, so sgptr was 6153 * never updated. 6154 * 5) We have a partial residual. 6155 * Use residual_sgptr to determine 6156 * where we are. 6157 */ 6158 6159 hscb = scb->hscb; 6160 sgptr = ahc_le32toh(hscb->sgptr); 6161 if ((sgptr & SG_RESID_VALID) == 0) 6162 /* Case 1 */ 6163 return; 6164 sgptr &= ~SG_RESID_VALID; 6165 6166 if ((sgptr & SG_LIST_NULL) != 0) 6167 /* Case 2 */ 6168 return; 6169 6170 spkt = &hscb->shared_data.status; 6171 resid_sgptr = ahc_le32toh(spkt->residual_sg_ptr); 6172 if ((sgptr & SG_FULL_RESID) != 0) { 6173 /* Case 3 */ 6174 resid = ahc_get_transfer_length(scb); 6175 } else if ((resid_sgptr & SG_LIST_NULL) != 0) { 6176 /* Case 4 */ 6177 return; 6178 } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { 6179 panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); 6180 } else { 6181 struct ahc_dma_seg *sg; 6182 6183 /* 6184 * Remainder of the SG where the transfer 6185 * stopped. 6186 */ 6187 resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK; 6188 sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK); 6189 6190 /* The residual sg_ptr always points to the next sg */ 6191 sg--; 6192 6193 /* 6194 * Add up the contents of all residual 6195 * SG segments that are after the SG where 6196 * the transfer stopped. 6197 */ 6198 while ((ahc_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) { 6199 sg++; 6200 resid += ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; 6201 } 6202 } 6203 if ((scb->flags & SCB_SENSE) == 0) 6204 ahc_set_residual(scb, resid); 6205 else 6206 ahc_set_sense_residual(scb, resid); 6207 6208 #ifdef AHC_DEBUG 6209 if ((ahc_debug & AHC_SHOW_MISC) != 0) { 6210 ahc_print_path(ahc, scb); 6211 printf("Handled %sResidual of %d bytes\n", 6212 (scb->flags & SCB_SENSE) ? "Sense " : "", resid); 6213 } 6214 #endif 6215 } 6216 6217 /******************************* Target Mode **********************************/ 6218 #ifdef AHC_TARGET_MODE 6219 /* 6220 * Add a target mode event to this lun's queue 6221 */ 6222 static void 6223 ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate, 6224 u_int initiator_id, u_int event_type, u_int event_arg) 6225 { 6226 struct ahc_tmode_event *event; 6227 int pending; 6228 6229 xpt_freeze_devq(lstate->path, /*count*/1); 6230 if (lstate->event_w_idx >= lstate->event_r_idx) 6231 pending = lstate->event_w_idx - lstate->event_r_idx; 6232 else 6233 pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1 6234 - (lstate->event_r_idx - lstate->event_w_idx); 6235 6236 if (event_type == EVENT_TYPE_BUS_RESET 6237 || event_type == MSG_BUS_DEV_RESET) { 6238 /* 6239 * Any earlier events are irrelevant, so reset our buffer. 6240 * This has the effect of allowing us to deal with reset 6241 * floods (an external device holding down the reset line) 6242 * without losing the event that is really interesting. 6243 */ 6244 lstate->event_r_idx = 0; 6245 lstate->event_w_idx = 0; 6246 xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); 6247 } 6248 6249 if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) { 6250 xpt_print_path(lstate->path); 6251 printf("immediate event %x:%x lost\n", 6252 lstate->event_buffer[lstate->event_r_idx].event_type, 6253 lstate->event_buffer[lstate->event_r_idx].event_arg); 6254 lstate->event_r_idx++; 6255 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6256 lstate->event_r_idx = 0; 6257 xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); 6258 } 6259 6260 event = &lstate->event_buffer[lstate->event_w_idx]; 6261 event->initiator_id = initiator_id; 6262 event->event_type = event_type; 6263 event->event_arg = event_arg; 6264 lstate->event_w_idx++; 6265 if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6266 lstate->event_w_idx = 0; 6267 } 6268 6269 /* 6270 * Send any target mode events queued up waiting 6271 * for immediate notify resources. 6272 */ 6273 void 6274 ahc_send_lstate_events(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate) 6275 { 6276 struct ccb_hdr *ccbh; 6277 struct ccb_immed_notify *inot; 6278 6279 while (lstate->event_r_idx != lstate->event_w_idx 6280 && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { 6281 struct ahc_tmode_event *event; 6282 6283 event = &lstate->event_buffer[lstate->event_r_idx]; 6284 SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); 6285 inot = (struct ccb_immed_notify *)ccbh; 6286 switch (event->event_type) { 6287 case EVENT_TYPE_BUS_RESET: 6288 ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; 6289 break; 6290 default: 6291 ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 6292 inot->message_args[0] = event->event_type; 6293 inot->message_args[1] = event->event_arg; 6294 break; 6295 } 6296 inot->initiator_id = event->initiator_id; 6297 inot->sense_len = 0; 6298 xpt_done((union ccb *)inot); 6299 lstate->event_r_idx++; 6300 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6301 lstate->event_r_idx = 0; 6302 } 6303 } 6304 #endif 6305 6306 /******************** Sequencer Program Patching/Download *********************/ 6307 6308 #ifdef AHC_DUMP_SEQ 6309 void 6310 ahc_dumpseq(struct ahc_softc* ahc) 6311 { 6312 int i; 6313 6314 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 6315 ahc_outb(ahc, SEQADDR0, 0); 6316 ahc_outb(ahc, SEQADDR1, 0); 6317 for (i = 0; i < ahc->instruction_ram_size; i++) { 6318 uint8_t ins_bytes[4]; 6319 6320 ahc_insb(ahc, SEQRAM, ins_bytes, 4); 6321 printf("0x%08x\n", ins_bytes[0] << 24 6322 | ins_bytes[1] << 16 6323 | ins_bytes[2] << 8 6324 | ins_bytes[3]); 6325 } 6326 } 6327 #endif 6328 6329 static int 6330 ahc_loadseq(struct ahc_softc *ahc) 6331 { 6332 struct cs cs_table[num_critical_sections]; 6333 u_int begin_set[num_critical_sections]; 6334 u_int end_set[num_critical_sections]; 6335 struct patch *cur_patch; 6336 u_int cs_count; 6337 u_int cur_cs; 6338 u_int i; 6339 u_int skip_addr; 6340 u_int sg_prefetch_cnt; 6341 int downloaded; 6342 uint8_t download_consts[7]; 6343 6344 /* 6345 * Start out with 0 critical sections 6346 * that apply to this firmware load. 6347 */ 6348 cs_count = 0; 6349 cur_cs = 0; 6350 memset(begin_set, 0, sizeof(begin_set)); 6351 memset(end_set, 0, sizeof(end_set)); 6352 6353 /* Setup downloadable constant table */ 6354 download_consts[QOUTFIFO_OFFSET] = 0; 6355 if (ahc->targetcmds != NULL) 6356 download_consts[QOUTFIFO_OFFSET] += 32; 6357 download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1; 6358 download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1; 6359 download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1); 6360 sg_prefetch_cnt = ahc->pci_cachesize; 6361 if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg))) 6362 sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg); 6363 download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; 6364 download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1); 6365 download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1); 6366 6367 cur_patch = patches; 6368 downloaded = 0; 6369 skip_addr = 0; 6370 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 6371 ahc_outb(ahc, SEQADDR0, 0); 6372 ahc_outb(ahc, SEQADDR1, 0); 6373 6374 for (i = 0; i < sizeof(seqprog)/4; i++) { 6375 if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) { 6376 /* 6377 * Don't download this instruction as it 6378 * is in a patch that was removed. 6379 */ 6380 continue; 6381 } 6382 6383 if (downloaded == ahc->instruction_ram_size) { 6384 /* 6385 * We're about to exceed the instruction 6386 * storage capacity for this chip. Fail 6387 * the load. 6388 */ 6389 printf("\n%s: Program too large for instruction memory " 6390 "size of %d!\n", ahc_name(ahc), 6391 ahc->instruction_ram_size); 6392 return (ENOMEM); 6393 } 6394 6395 /* 6396 * Move through the CS table until we find a CS 6397 * that might apply to this instruction. 6398 */ 6399 for (; cur_cs < num_critical_sections; cur_cs++) { 6400 if (critical_sections[cur_cs].end <= i) { 6401 if (begin_set[cs_count] == TRUE 6402 && end_set[cs_count] == FALSE) { 6403 cs_table[cs_count].end = downloaded; 6404 end_set[cs_count] = TRUE; 6405 cs_count++; 6406 } 6407 continue; 6408 } 6409 if (critical_sections[cur_cs].begin <= i 6410 && begin_set[cs_count] == FALSE) { 6411 cs_table[cs_count].begin = downloaded; 6412 begin_set[cs_count] = TRUE; 6413 } 6414 break; 6415 } 6416 ahc_download_instr(ahc, i, download_consts); 6417 downloaded++; 6418 } 6419 6420 ahc->num_critical_sections = cs_count; 6421 if (cs_count != 0) { 6422 6423 cs_count *= sizeof(struct cs); 6424 ahc->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT); 6425 if (ahc->critical_sections == NULL) 6426 panic("ahc_loadseq: Could not malloc"); 6427 memcpy(ahc->critical_sections, cs_table, cs_count); 6428 } 6429 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE); 6430 6431 if (bootverbose) { 6432 printf(" %d instructions downloaded\n", downloaded); 6433 printf("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n", 6434 ahc_name(ahc), ahc->features, ahc->bugs, ahc->flags); 6435 } 6436 return (0); 6437 } 6438 6439 static int 6440 ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch, 6441 u_int start_instr, u_int *skip_addr) 6442 { 6443 struct patch *cur_patch; 6444 struct patch *last_patch; 6445 u_int num_patches; 6446 6447 num_patches = sizeof(patches)/sizeof(struct patch); 6448 last_patch = &patches[num_patches]; 6449 cur_patch = *start_patch; 6450 6451 while (cur_patch < last_patch && start_instr == cur_patch->begin) { 6452 6453 if (cur_patch->patch_func(ahc) == 0) { 6454 6455 /* Start rejecting code */ 6456 *skip_addr = start_instr + cur_patch->skip_instr; 6457 cur_patch += cur_patch->skip_patch; 6458 } else { 6459 /* Accepted this patch. Advance to the next 6460 * one and wait for our intruction pointer to 6461 * hit this point. 6462 */ 6463 cur_patch++; 6464 } 6465 } 6466 6467 *start_patch = cur_patch; 6468 if (start_instr < *skip_addr) 6469 /* Still skipping */ 6470 return (0); 6471 6472 return (1); 6473 } 6474 6475 static void 6476 ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts) 6477 { 6478 union ins_formats instr; 6479 struct ins_format1 *fmt1_ins; 6480 struct ins_format3 *fmt3_ins; 6481 u_int opcode; 6482 6483 /* 6484 * The firmware is always compiled into a little endian format. 6485 */ 6486 instr.integer = ahc_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); 6487 6488 fmt1_ins = &instr.format1; 6489 fmt3_ins = NULL; 6490 6491 /* Pull the opcode */ 6492 opcode = instr.format1.opcode; 6493 switch (opcode) { 6494 case AIC_OP_JMP: 6495 case AIC_OP_JC: 6496 case AIC_OP_JNC: 6497 case AIC_OP_CALL: 6498 case AIC_OP_JNE: 6499 case AIC_OP_JNZ: 6500 case AIC_OP_JE: 6501 case AIC_OP_JZ: 6502 { 6503 struct patch *cur_patch; 6504 int address_offset; 6505 u_int address; 6506 u_int skip_addr; 6507 u_int i; 6508 6509 fmt3_ins = &instr.format3; 6510 address_offset = 0; 6511 address = fmt3_ins->address; 6512 cur_patch = patches; 6513 skip_addr = 0; 6514 6515 for (i = 0; i < address;) { 6516 6517 ahc_check_patch(ahc, &cur_patch, i, &skip_addr); 6518 6519 if (skip_addr > i) { 6520 int end_addr; 6521 6522 end_addr = MIN(address, skip_addr); 6523 address_offset += end_addr - i; 6524 i = skip_addr; 6525 } else { 6526 i++; 6527 } 6528 } 6529 address -= address_offset; 6530 fmt3_ins->address = address; 6531 /* FALLTHROUGH */ 6532 } 6533 case AIC_OP_OR: 6534 case AIC_OP_AND: 6535 case AIC_OP_XOR: 6536 case AIC_OP_ADD: 6537 case AIC_OP_ADC: 6538 case AIC_OP_BMOV: 6539 if (fmt1_ins->parity != 0) { 6540 fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; 6541 } 6542 fmt1_ins->parity = 0; 6543 if ((ahc->features & AHC_CMD_CHAN) == 0 6544 && opcode == AIC_OP_BMOV) { 6545 /* 6546 * Block move was added at the same time 6547 * as the command channel. Verify that 6548 * this is only a move of a single element 6549 * and convert the BMOV to a MOV 6550 * (AND with an immediate of FF). 6551 */ 6552 if (fmt1_ins->immediate != 1) 6553 panic("%s: BMOV not supported\n", 6554 ahc_name(ahc)); 6555 fmt1_ins->opcode = AIC_OP_AND; 6556 fmt1_ins->immediate = 0xff; 6557 } 6558 /* FALLTHROUGH */ 6559 case AIC_OP_ROL: 6560 if ((ahc->features & AHC_ULTRA2) != 0) { 6561 int i, count; 6562 6563 /* Calculate odd parity for the instruction */ 6564 for (i = 0, count = 0; i < 31; i++) { 6565 uint32_t mask; 6566 6567 mask = 0x01 << i; 6568 if ((instr.integer & mask) != 0) 6569 count++; 6570 } 6571 if ((count & 0x01) == 0) 6572 instr.format1.parity = 1; 6573 } else { 6574 /* Compress the instruction for older sequencers */ 6575 if (fmt3_ins != NULL) { 6576 instr.integer = 6577 fmt3_ins->immediate 6578 | (fmt3_ins->source << 8) 6579 | (fmt3_ins->address << 16) 6580 | (fmt3_ins->opcode << 25); 6581 } else { 6582 instr.integer = 6583 fmt1_ins->immediate 6584 | (fmt1_ins->source << 8) 6585 | (fmt1_ins->destination << 16) 6586 | (fmt1_ins->ret << 24) 6587 | (fmt1_ins->opcode << 25); 6588 } 6589 } 6590 /* The sequencer is a little endian cpu */ 6591 instr.integer = ahc_htole32(instr.integer); 6592 ahc_outsb(ahc, SEQRAM, instr.bytes, 4); 6593 break; 6594 default: 6595 panic("Unknown opcode encountered in seq program"); 6596 break; 6597 } 6598 } 6599 6600 int 6601 ahc_print_register(ahc_reg_parse_entry_t *table, u_int num_entries, 6602 const char *name, u_int address, u_int value, 6603 u_int *cur_column, u_int wrap_point) 6604 { 6605 int printed; 6606 u_int printed_mask; 6607 6608 if (cur_column != NULL && *cur_column >= wrap_point) { 6609 printf("\n"); 6610 *cur_column = 0; 6611 } 6612 printed = printf("%s[0x%x]", name, value); 6613 if (table == NULL) { 6614 printed += printf(" "); 6615 *cur_column += printed; 6616 return (printed); 6617 } 6618 printed_mask = 0; 6619 while (printed_mask != 0xFF) { 6620 int entry; 6621 6622 for (entry = 0; entry < num_entries; entry++) { 6623 if (((value & table[entry].mask) 6624 != table[entry].value) 6625 || ((printed_mask & table[entry].mask) 6626 == table[entry].mask)) 6627 continue; 6628 6629 printed += printf("%s%s", 6630 printed_mask == 0 ? ":(" : "|", 6631 table[entry].name); 6632 printed_mask |= table[entry].mask; 6633 6634 break; 6635 } 6636 if (entry >= num_entries) 6637 break; 6638 } 6639 if (printed_mask != 0) 6640 printed += printf(") "); 6641 else 6642 printed += printf(" "); 6643 if (cur_column != NULL) 6644 *cur_column += printed; 6645 return (printed); 6646 } 6647 6648 void 6649 ahc_dump_card_state(struct ahc_softc *ahc) 6650 { 6651 struct scb *scb; 6652 struct scb_tailq *untagged_q; 6653 u_int cur_col; 6654 int paused; 6655 int target; 6656 int maxtarget; 6657 int i; 6658 uint8_t last_phase; 6659 uint8_t qinpos; 6660 uint8_t qintail; 6661 uint8_t qoutpos; 6662 uint8_t scb_index; 6663 uint8_t saved_scbptr; 6664 6665 if (ahc_is_paused(ahc)) { 6666 paused = 1; 6667 } else { 6668 paused = 0; 6669 ahc_pause(ahc); 6670 } 6671 6672 saved_scbptr = ahc_inb(ahc, SCBPTR); 6673 last_phase = ahc_inb(ahc, LASTPHASE); 6674 printf(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n" 6675 "%s: Dumping Card State %s, at SEQADDR 0x%x\n", 6676 ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg, 6677 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); 6678 if (paused) 6679 printf("Card was paused\n"); 6680 printf("ACCUM = 0x%x, SINDEX = 0x%x, DINDEX = 0x%x, ARG_2 = 0x%x\n", 6681 ahc_inb(ahc, ACCUM), ahc_inb(ahc, SINDEX), ahc_inb(ahc, DINDEX), 6682 ahc_inb(ahc, ARG_2)); 6683 printf("HCNT = 0x%x SCBPTR = 0x%x\n", ahc_inb(ahc, HCNT), 6684 ahc_inb(ahc, SCBPTR)); 6685 cur_col = 0; 6686 if ((ahc->features & AHC_DT) != 0) 6687 ahc_scsiphase_print(ahc_inb(ahc, SCSIPHASE), &cur_col, 50); 6688 ahc_scsisigi_print(ahc_inb(ahc, SCSISIGI), &cur_col, 50); 6689 ahc_error_print(ahc_inb(ahc, ERROR), &cur_col, 50); 6690 ahc_scsibusl_print(ahc_inb(ahc, SCSIBUSL), &cur_col, 50); 6691 ahc_lastphase_print(ahc_inb(ahc, LASTPHASE), &cur_col, 50); 6692 ahc_scsiseq_print(ahc_inb(ahc, SCSISEQ), &cur_col, 50); 6693 ahc_sblkctl_print(ahc_inb(ahc, SBLKCTL), &cur_col, 50); 6694 ahc_scsirate_print(ahc_inb(ahc, SCSIRATE), &cur_col, 50); 6695 ahc_seqctl_print(ahc_inb(ahc, SEQCTL), &cur_col, 50); 6696 ahc_seq_flags_print(ahc_inb(ahc, SEQ_FLAGS), &cur_col, 50); 6697 ahc_sstat0_print(ahc_inb(ahc, SSTAT0), &cur_col, 50); 6698 ahc_sstat1_print(ahc_inb(ahc, SSTAT1), &cur_col, 50); 6699 ahc_sstat2_print(ahc_inb(ahc, SSTAT2), &cur_col, 50); 6700 ahc_sstat3_print(ahc_inb(ahc, SSTAT3), &cur_col, 50); 6701 ahc_simode0_print(ahc_inb(ahc, SIMODE0), &cur_col, 50); 6702 ahc_simode1_print(ahc_inb(ahc, SIMODE1), &cur_col, 50); 6703 ahc_sxfrctl0_print(ahc_inb(ahc, SXFRCTL0), &cur_col, 50); 6704 ahc_dfcntrl_print(ahc_inb(ahc, DFCNTRL), &cur_col, 50); 6705 ahc_dfstatus_print(ahc_inb(ahc, DFSTATUS), &cur_col, 50); 6706 if (cur_col != 0) 6707 printf("\n"); 6708 printf("STACK:"); 6709 for (i = 0; i < STACK_SIZE; i++) 6710 printf(" 0x%x", ahc_inb(ahc, STACK)|(ahc_inb(ahc, STACK) << 8)); 6711 printf("\nSCB count = %d\n", ahc->scb_data->numscbs); 6712 printf("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag); 6713 printf("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB)); 6714 /* QINFIFO */ 6715 printf("QINFIFO entries: "); 6716 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 6717 qinpos = ahc_inb(ahc, SNSCB_QOFF); 6718 ahc_outb(ahc, SNSCB_QOFF, qinpos); 6719 } else 6720 qinpos = ahc_inb(ahc, QINPOS); 6721 qintail = ahc->qinfifonext; 6722 while (qinpos != qintail) { 6723 printf("%d ", ahc->qinfifo[qinpos]); 6724 qinpos++; 6725 } 6726 printf("\n"); 6727 6728 printf("Waiting Queue entries: "); 6729 scb_index = ahc_inb(ahc, WAITING_SCBH); 6730 i = 0; 6731 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6732 ahc_outb(ahc, SCBPTR, scb_index); 6733 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); 6734 scb_index = ahc_inb(ahc, SCB_NEXT); 6735 } 6736 printf("\n"); 6737 6738 printf("Disconnected Queue entries: "); 6739 scb_index = ahc_inb(ahc, DISCONNECTED_SCBH); 6740 i = 0; 6741 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6742 ahc_outb(ahc, SCBPTR, scb_index); 6743 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); 6744 scb_index = ahc_inb(ahc, SCB_NEXT); 6745 } 6746 printf("\n"); 6747 6748 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); 6749 printf("QOUTFIFO entries: "); 6750 qoutpos = ahc->qoutfifonext; 6751 i = 0; 6752 while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) { 6753 printf("%d ", ahc->qoutfifo[qoutpos]); 6754 qoutpos++; 6755 } 6756 printf("\n"); 6757 6758 printf("Sequencer Free SCB List: "); 6759 scb_index = ahc_inb(ahc, FREE_SCBH); 6760 i = 0; 6761 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6762 ahc_outb(ahc, SCBPTR, scb_index); 6763 printf("%d ", scb_index); 6764 scb_index = ahc_inb(ahc, SCB_NEXT); 6765 } 6766 printf("\n"); 6767 6768 printf("Sequencer SCB Info: "); 6769 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 6770 ahc_outb(ahc, SCBPTR, i); 6771 cur_col = printf("\n%3d ", i); 6772 6773 ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60); 6774 ahc_scb_scsiid_print(ahc_inb(ahc, SCB_SCSIID), &cur_col, 60); 6775 ahc_scb_lun_print(ahc_inb(ahc, SCB_LUN), &cur_col, 60); 6776 ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); 6777 } 6778 printf("\n"); 6779 6780 printf("Pending list: "); 6781 i = 0; 6782 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { 6783 if (i++ > 256) 6784 break; 6785 cur_col = printf("\n%3d ", scb->hscb->tag); 6786 ahc_scb_control_print(scb->hscb->control, &cur_col, 60); 6787 ahc_scb_scsiid_print(scb->hscb->scsiid, &cur_col, 60); 6788 ahc_scb_lun_print(scb->hscb->lun, &cur_col, 60); 6789 if ((ahc->flags & AHC_PAGESCBS) == 0) { 6790 ahc_outb(ahc, SCBPTR, scb->hscb->tag); 6791 printf("("); 6792 ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), 6793 &cur_col, 60); 6794 ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); 6795 printf(")"); 6796 } 6797 } 6798 printf("\n"); 6799 6800 printf("Kernel Free SCB list: "); 6801 i = 0; 6802 SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) { 6803 if (i++ > 256) 6804 break; 6805 printf("%d ", scb->hscb->tag); 6806 } 6807 printf("\n"); 6808 6809 maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7; 6810 for (target = 0; target <= maxtarget; target++) { 6811 untagged_q = &ahc->untagged_queues[target]; 6812 if (TAILQ_FIRST(untagged_q) == NULL) 6813 continue; 6814 printf("Untagged Q(%d): ", target); 6815 i = 0; 6816 TAILQ_FOREACH(scb, untagged_q, links.tqe) { 6817 if (i++ > 256) 6818 break; 6819 printf("%d ", scb->hscb->tag); 6820 } 6821 printf("\n"); 6822 } 6823 6824 ahc_platform_dump_card_state(ahc); 6825 printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); 6826 ahc_outb(ahc, SCBPTR, saved_scbptr); 6827 if (paused == 0) 6828 ahc_unpause(ahc); 6829 } 6830 6831 /************************* Target Mode ****************************************/ 6832 #ifdef AHC_TARGET_MODE 6833 cam_status 6834 ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb, 6835 struct ahc_tmode_tstate **tstate, 6836 struct ahc_tmode_lstate **lstate, 6837 int notfound_failure) 6838 { 6839 6840 if ((ahc->features & AHC_TARGETMODE) == 0) 6841 return (CAM_REQ_INVALID); 6842 6843 /* 6844 * Handle the 'black hole' device that sucks up 6845 * requests to unattached luns on enabled targets. 6846 */ 6847 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD 6848 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 6849 *tstate = NULL; 6850 *lstate = ahc->black_hole; 6851 } else { 6852 u_int max_id; 6853 6854 max_id = (ahc->features & AHC_WIDE) ? 15 : 7; 6855 if (ccb->ccb_h.target_id > max_id) 6856 return (CAM_TID_INVALID); 6857 6858 if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS) 6859 return (CAM_LUN_INVALID); 6860 6861 *tstate = ahc->enabled_targets[ccb->ccb_h.target_id]; 6862 *lstate = NULL; 6863 if (*tstate != NULL) 6864 *lstate = 6865 (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; 6866 } 6867 6868 if (notfound_failure != 0 && *lstate == NULL) 6869 return (CAM_PATH_INVALID); 6870 6871 return (CAM_REQ_CMP); 6872 } 6873 6874 void 6875 ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) 6876 { 6877 struct ahc_tmode_tstate *tstate; 6878 struct ahc_tmode_lstate *lstate; 6879 struct ccb_en_lun *cel; 6880 cam_status status; 6881 u_long s; 6882 u_int target; 6883 u_int lun; 6884 u_int target_mask; 6885 u_int our_id; 6886 int error; 6887 char channel; 6888 6889 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate, 6890 /*notfound_failure*/FALSE); 6891 6892 if (status != CAM_REQ_CMP) { 6893 ccb->ccb_h.status = status; 6894 return; 6895 } 6896 6897 if (cam_sim_bus(sim) == 0) 6898 our_id = ahc->our_id; 6899 else 6900 our_id = ahc->our_id_b; 6901 6902 if (ccb->ccb_h.target_id != our_id) { 6903 /* 6904 * our_id represents our initiator ID, or 6905 * the ID of the first target to have an 6906 * enabled lun in target mode. There are 6907 * two cases that may preclude enabling a 6908 * target id other than our_id. 6909 * 6910 * o our_id is for an active initiator role. 6911 * Since the hardware does not support 6912 * reselections to the initiator role at 6913 * anything other than our_id, and our_id 6914 * is used by the hardware to indicate the 6915 * ID to use for both select-out and 6916 * reselect-out operations, the only target 6917 * ID we can support in this mode is our_id. 6918 * 6919 * o The MULTARGID feature is not available and 6920 * a previous target mode ID has been enabled. 6921 */ 6922 if ((ahc->features & AHC_MULTIROLE) != 0) { 6923 6924 if ((ahc->features & AHC_MULTI_TID) != 0 6925 && (ahc->flags & AHC_INITIATORROLE) != 0) { 6926 /* 6927 * Only allow additional targets if 6928 * the initiator role is disabled. 6929 * The hardware cannot handle a re-select-in 6930 * on the initiator id during a re-select-out 6931 * on a different target id. 6932 */ 6933 status = CAM_TID_INVALID; 6934 } else if ((ahc->flags & AHC_INITIATORROLE) != 0 6935 || ahc->enabled_luns > 0) { 6936 /* 6937 * Only allow our target id to change 6938 * if the initiator role is not configured 6939 * and there are no enabled luns which 6940 * are attached to the currently registered 6941 * scsi id. 6942 */ 6943 status = CAM_TID_INVALID; 6944 } 6945 } else if ((ahc->features & AHC_MULTI_TID) == 0 6946 && ahc->enabled_luns > 0) { 6947 6948 status = CAM_TID_INVALID; 6949 } 6950 } 6951 6952 if (status != CAM_REQ_CMP) { 6953 ccb->ccb_h.status = status; 6954 return; 6955 } 6956 6957 /* 6958 * We now have an id that is valid. 6959 * If we aren't in target mode, switch modes. 6960 */ 6961 if ((ahc->flags & AHC_TARGETROLE) == 0 6962 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 6963 u_long s; 6964 ahc_flag saved_flags; 6965 6966 printf("Configuring Target Mode\n"); 6967 ahc_lock(ahc, &s); 6968 if (LIST_FIRST(&ahc->pending_scbs) != NULL) { 6969 ccb->ccb_h.status = CAM_BUSY; 6970 ahc_unlock(ahc, &s); 6971 return; 6972 } 6973 saved_flags = ahc->flags; 6974 ahc->flags |= AHC_TARGETROLE; 6975 if ((ahc->features & AHC_MULTIROLE) == 0) 6976 ahc->flags &= ~AHC_INITIATORROLE; 6977 ahc_pause(ahc); 6978 error = ahc_loadseq(ahc); 6979 if (error != 0) { 6980 /* 6981 * Restore original configuration and notify 6982 * the caller that we cannot support target mode. 6983 * Since the adapter started out in this 6984 * configuration, the firmware load will succeed, 6985 * so there is no point in checking ahc_loadseq's 6986 * return value. 6987 */ 6988 ahc->flags = saved_flags; 6989 (void)ahc_loadseq(ahc); 6990 ahc_restart(ahc); 6991 ahc_unlock(ahc, &s); 6992 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 6993 return; 6994 } 6995 ahc_restart(ahc); 6996 ahc_unlock(ahc, &s); 6997 } 6998 cel = &ccb->cel; 6999 target = ccb->ccb_h.target_id; 7000 lun = ccb->ccb_h.target_lun; 7001 channel = SIM_CHANNEL(ahc, sim); 7002 target_mask = 0x01 << target; 7003 if (channel == 'B') 7004 target_mask <<= 8; 7005 7006 if (cel->enable != 0) { 7007 u_int scsiseq; 7008 7009 /* Are we already enabled?? */ 7010 if (lstate != NULL) { 7011 xpt_print_path(ccb->ccb_h.path); 7012 printf("Lun already enabled\n"); 7013 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 7014 return; 7015 } 7016 7017 if (cel->grp6_len != 0 7018 || cel->grp7_len != 0) { 7019 /* 7020 * Don't (yet?) support vendor 7021 * specific commands. 7022 */ 7023 ccb->ccb_h.status = CAM_REQ_INVALID; 7024 printf("Non-zero Group Codes\n"); 7025 return; 7026 } 7027 7028 /* 7029 * Seems to be okay. 7030 * Setup our data structures. 7031 */ 7032 if (target != CAM_TARGET_WILDCARD && tstate == NULL) { 7033 tstate = ahc_alloc_tstate(ahc, target, channel); 7034 if (tstate == NULL) { 7035 xpt_print_path(ccb->ccb_h.path); 7036 printf("Couldn't allocate tstate\n"); 7037 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 7038 return; 7039 } 7040 } 7041 lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT); 7042 if (lstate == NULL) { 7043 xpt_print_path(ccb->ccb_h.path); 7044 printf("Couldn't allocate lstate\n"); 7045 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 7046 return; 7047 } 7048 memset(lstate, 0, sizeof(*lstate)); 7049 status = xpt_create_path(&lstate->path, /*periph*/NULL, 7050 xpt_path_path_id(ccb->ccb_h.path), 7051 xpt_path_target_id(ccb->ccb_h.path), 7052 xpt_path_lun_id(ccb->ccb_h.path)); 7053 if (status != CAM_REQ_CMP) { 7054 free(lstate, M_DEVBUF); 7055 xpt_print_path(ccb->ccb_h.path); 7056 printf("Couldn't allocate path\n"); 7057 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 7058 return; 7059 } 7060 SLIST_INIT(&lstate->accept_tios); 7061 SLIST_INIT(&lstate->immed_notifies); 7062 ahc_lock(ahc, &s); 7063 ahc_pause(ahc); 7064 if (target != CAM_TARGET_WILDCARD) { 7065 tstate->enabled_luns[lun] = lstate; 7066 ahc->enabled_luns++; 7067 7068 if ((ahc->features & AHC_MULTI_TID) != 0) { 7069 u_int targid_mask; 7070 7071 targid_mask = ahc_inb(ahc, TARGID) 7072 | (ahc_inb(ahc, TARGID + 1) << 8); 7073 7074 targid_mask |= target_mask; 7075 ahc_outb(ahc, TARGID, targid_mask); 7076 ahc_outb(ahc, TARGID+1, (targid_mask >> 8)); 7077 7078 ahc_update_scsiid(ahc, targid_mask); 7079 } else { 7080 u_int our_id; 7081 char channel; 7082 7083 channel = SIM_CHANNEL(ahc, sim); 7084 our_id = SIM_SCSI_ID(ahc, sim); 7085 7086 /* 7087 * This can only happen if selections 7088 * are not enabled 7089 */ 7090 if (target != our_id) { 7091 u_int sblkctl; 7092 char cur_channel; 7093 int swap; 7094 7095 sblkctl = ahc_inb(ahc, SBLKCTL); 7096 cur_channel = (sblkctl & SELBUSB) 7097 ? 'B' : 'A'; 7098 if ((ahc->features & AHC_TWIN) == 0) 7099 cur_channel = 'A'; 7100 swap = cur_channel != channel; 7101 if (channel == 'A') 7102 ahc->our_id = target; 7103 else 7104 ahc->our_id_b = target; 7105 7106 if (swap) 7107 ahc_outb(ahc, SBLKCTL, 7108 sblkctl ^ SELBUSB); 7109 7110 ahc_outb(ahc, SCSIID, target); 7111 7112 if (swap) 7113 ahc_outb(ahc, SBLKCTL, sblkctl); 7114 } 7115 } 7116 } else 7117 ahc->black_hole = lstate; 7118 /* Allow select-in operations */ 7119 if (ahc->black_hole != NULL && ahc->enabled_luns > 0) { 7120 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 7121 scsiseq |= ENSELI; 7122 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 7123 scsiseq = ahc_inb(ahc, SCSISEQ); 7124 scsiseq |= ENSELI; 7125 ahc_outb(ahc, SCSISEQ, scsiseq); 7126 } 7127 ahc_unpause(ahc); 7128 ahc_unlock(ahc, &s); 7129 ccb->ccb_h.status = CAM_REQ_CMP; 7130 xpt_print_path(ccb->ccb_h.path); 7131 printf("Lun now enabled for target mode\n"); 7132 } else { 7133 struct scb *scb; 7134 int i, empty; 7135 7136 if (lstate == NULL) { 7137 ccb->ccb_h.status = CAM_LUN_INVALID; 7138 return; 7139 } 7140 7141 ahc_lock(ahc, &s); 7142 7143 ccb->ccb_h.status = CAM_REQ_CMP; 7144 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { 7145 struct ccb_hdr *ccbh; 7146 7147 ccbh = &scb->io_ctx->ccb_h; 7148 if (ccbh->func_code == XPT_CONT_TARGET_IO 7149 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ 7150 printf("CTIO pending\n"); 7151 ccb->ccb_h.status = CAM_REQ_INVALID; 7152 ahc_unlock(ahc, &s); 7153 return; 7154 } 7155 } 7156 7157 if (SLIST_FIRST(&lstate->accept_tios) != NULL) { 7158 printf("ATIOs pending\n"); 7159 ccb->ccb_h.status = CAM_REQ_INVALID; 7160 } 7161 7162 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { 7163 printf("INOTs pending\n"); 7164 ccb->ccb_h.status = CAM_REQ_INVALID; 7165 } 7166 7167 if (ccb->ccb_h.status != CAM_REQ_CMP) { 7168 ahc_unlock(ahc, &s); 7169 return; 7170 } 7171 7172 xpt_print_path(ccb->ccb_h.path); 7173 printf("Target mode disabled\n"); 7174 xpt_free_path(lstate->path); 7175 free(lstate, M_DEVBUF); 7176 7177 ahc_pause(ahc); 7178 /* Can we clean up the target too? */ 7179 if (target != CAM_TARGET_WILDCARD) { 7180 tstate->enabled_luns[lun] = NULL; 7181 ahc->enabled_luns--; 7182 for (empty = 1, i = 0; i < 8; i++) 7183 if (tstate->enabled_luns[i] != NULL) { 7184 empty = 0; 7185 break; 7186 } 7187 7188 if (empty) { 7189 ahc_free_tstate(ahc, target, channel, 7190 /*force*/FALSE); 7191 if (ahc->features & AHC_MULTI_TID) { 7192 u_int targid_mask; 7193 7194 targid_mask = ahc_inb(ahc, TARGID) 7195 | (ahc_inb(ahc, TARGID + 1) 7196 << 8); 7197 7198 targid_mask &= ~target_mask; 7199 ahc_outb(ahc, TARGID, targid_mask); 7200 ahc_outb(ahc, TARGID+1, 7201 (targid_mask >> 8)); 7202 ahc_update_scsiid(ahc, targid_mask); 7203 } 7204 } 7205 } else { 7206 7207 ahc->black_hole = NULL; 7208 7209 /* 7210 * We can't allow selections without 7211 * our black hole device. 7212 */ 7213 empty = TRUE; 7214 } 7215 if (ahc->enabled_luns == 0) { 7216 /* Disallow select-in */ 7217 u_int scsiseq; 7218 7219 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 7220 scsiseq &= ~ENSELI; 7221 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 7222 scsiseq = ahc_inb(ahc, SCSISEQ); 7223 scsiseq &= ~ENSELI; 7224 ahc_outb(ahc, SCSISEQ, scsiseq); 7225 7226 if ((ahc->features & AHC_MULTIROLE) == 0) { 7227 printf("Configuring Initiator Mode\n"); 7228 ahc->flags &= ~AHC_TARGETROLE; 7229 ahc->flags |= AHC_INITIATORROLE; 7230 /* 7231 * Returning to a configuration that 7232 * fit previously will always succeed. 7233 */ 7234 (void)ahc_loadseq(ahc); 7235 ahc_restart(ahc); 7236 /* 7237 * Unpaused. The extra unpause 7238 * that follows is harmless. 7239 */ 7240 } 7241 } 7242 ahc_unpause(ahc); 7243 ahc_unlock(ahc, &s); 7244 } 7245 } 7246 7247 static void 7248 ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask) 7249 { 7250 u_int scsiid_mask; 7251 u_int scsiid; 7252 7253 if ((ahc->features & AHC_MULTI_TID) == 0) 7254 panic("ahc_update_scsiid called on non-multitid unit\n"); 7255 7256 /* 7257 * Since we will rely on the TARGID mask 7258 * for selection enables, ensure that OID 7259 * in SCSIID is not set to some other ID 7260 * that we don't want to allow selections on. 7261 */ 7262 if ((ahc->features & AHC_ULTRA2) != 0) 7263 scsiid = ahc_inb(ahc, SCSIID_ULTRA2); 7264 else 7265 scsiid = ahc_inb(ahc, SCSIID); 7266 scsiid_mask = 0x1 << (scsiid & OID); 7267 if ((targid_mask & scsiid_mask) == 0) { 7268 u_int our_id; 7269 7270 /* ffs counts from 1 */ 7271 our_id = ffs(targid_mask); 7272 if (our_id == 0) 7273 our_id = ahc->our_id; 7274 else 7275 our_id--; 7276 scsiid &= TID; 7277 scsiid |= our_id; 7278 } 7279 if ((ahc->features & AHC_ULTRA2) != 0) 7280 ahc_outb(ahc, SCSIID_ULTRA2, scsiid); 7281 else 7282 ahc_outb(ahc, SCSIID, scsiid); 7283 } 7284 7285 void 7286 ahc_run_tqinfifo(struct ahc_softc *ahc, int paused) 7287 { 7288 struct target_cmd *cmd; 7289 7290 /* 7291 * If the card supports auto-access pause, 7292 * we can access the card directly regardless 7293 * of whether it is paused or not. 7294 */ 7295 if ((ahc->features & AHC_AUTOPAUSE) != 0) 7296 paused = TRUE; 7297 7298 ahc_sync_tqinfifo(ahc, BUS_DMASYNC_POSTREAD); 7299 while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) { 7300 7301 /* 7302 * Only advance through the queue if we 7303 * have the resources to process the command. 7304 */ 7305 if (ahc_handle_target_cmd(ahc, cmd) != 0) 7306 break; 7307 7308 cmd->cmd_valid = 0; 7309 ahc_dmamap_sync(ahc, ahc->shared_data_dmat, 7310 ahc->shared_data_dmamap, 7311 ahc_targetcmd_offset(ahc, ahc->tqinfifonext), 7312 sizeof(struct target_cmd), 7313 BUS_DMASYNC_PREREAD); 7314 ahc->tqinfifonext++; 7315 7316 /* 7317 * Lazily update our position in the target mode incoming 7318 * command queue as seen by the sequencer. 7319 */ 7320 if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { 7321 if ((ahc->features & AHC_HS_MAILBOX) != 0) { 7322 u_int hs_mailbox; 7323 7324 hs_mailbox = ahc_inb(ahc, HS_MAILBOX); 7325 hs_mailbox &= ~HOST_TQINPOS; 7326 hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS; 7327 ahc_outb(ahc, HS_MAILBOX, hs_mailbox); 7328 } else { 7329 if (!paused) 7330 ahc_pause(ahc); 7331 ahc_outb(ahc, KERNEL_TQINPOS, 7332 ahc->tqinfifonext & HOST_TQINPOS); 7333 if (!paused) 7334 ahc_unpause(ahc); 7335 } 7336 } 7337 } 7338 } 7339 7340 static int 7341 ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd) 7342 { 7343 struct ahc_tmode_tstate *tstate; 7344 struct ahc_tmode_lstate *lstate; 7345 struct ccb_accept_tio *atio; 7346 uint8_t *byte; 7347 int initiator; 7348 int target; 7349 int lun; 7350 7351 initiator = SCSIID_TARGET(ahc, cmd->scsiid); 7352 target = SCSIID_OUR_ID(cmd->scsiid); 7353 lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); 7354 7355 byte = cmd->bytes; 7356 tstate = ahc->enabled_targets[target]; 7357 lstate = NULL; 7358 if (tstate != NULL) 7359 lstate = tstate->enabled_luns[lun]; 7360 7361 /* 7362 * Commands for disabled luns go to the black hole driver. 7363 */ 7364 if (lstate == NULL) 7365 lstate = ahc->black_hole; 7366 7367 atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); 7368 if (atio == NULL) { 7369 ahc->flags |= AHC_TQINFIFO_BLOCKED; 7370 /* 7371 * Wait for more ATIOs from the peripheral driver for this lun. 7372 */ 7373 if (bootverbose) 7374 printf("%s: ATIOs exhausted\n", ahc_name(ahc)); 7375 return (1); 7376 } else 7377 ahc->flags &= ~AHC_TQINFIFO_BLOCKED; 7378 #if 0 7379 printf("Incoming command from %d for %d:%d%s\n", 7380 initiator, target, lun, 7381 lstate == ahc->black_hole ? "(Black Holed)" : ""); 7382 #endif 7383 SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); 7384 7385 if (lstate == ahc->black_hole) { 7386 /* Fill in the wildcards */ 7387 atio->ccb_h.target_id = target; 7388 atio->ccb_h.target_lun = lun; 7389 } 7390 7391 /* 7392 * Package it up and send it off to 7393 * whomever has this lun enabled. 7394 */ 7395 atio->sense_len = 0; 7396 atio->init_id = initiator; 7397 if (byte[0] != 0xFF) { 7398 /* Tag was included */ 7399 atio->tag_action = *byte++; 7400 atio->tag_id = *byte++; 7401 atio->ccb_h.flags = CAM_TAG_ACTION_VALID; 7402 } else { 7403 atio->ccb_h.flags = 0; 7404 } 7405 byte++; 7406 7407 /* Okay. Now determine the cdb size based on the command code */ 7408 switch (*byte >> CMD_GROUP_CODE_SHIFT) { 7409 case 0: 7410 atio->cdb_len = 6; 7411 break; 7412 case 1: 7413 case 2: 7414 atio->cdb_len = 10; 7415 break; 7416 case 4: 7417 atio->cdb_len = 16; 7418 break; 7419 case 5: 7420 atio->cdb_len = 12; 7421 break; 7422 case 3: 7423 default: 7424 /* Only copy the opcode. */ 7425 atio->cdb_len = 1; 7426 printf("Reserved or VU command code type encountered\n"); 7427 break; 7428 } 7429 7430 memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); 7431 7432 atio->ccb_h.status |= CAM_CDB_RECVD; 7433 7434 if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { 7435 /* 7436 * We weren't allowed to disconnect. 7437 * We're hanging on the bus until a 7438 * continue target I/O comes in response 7439 * to this accept tio. 7440 */ 7441 #if 0 7442 printf("Received Immediate Command %d:%d:%d - %p\n", 7443 initiator, target, lun, ahc->pending_device); 7444 #endif 7445 ahc->pending_device = lstate; 7446 ahc_freeze_ccb((union ccb *)atio); 7447 atio->ccb_h.flags |= CAM_DIS_DISCONNECT; 7448 } 7449 xpt_done((union ccb*)atio); 7450 return (0); 7451 } 7452 7453 #endif 7454