1 /* 2 * Core routines and tables shareable across OS platforms. 3 * 4 * Copyright (c) 1994-2002 Justin T. Gibbs. 5 * Copyright (c) 2000-2002 Adaptec Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification. 14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 15 * substantially similar to the "NO WARRANTY" disclaimer below 16 * ("Disclaimer") and any redistribution must be conditioned upon 17 * including a substantially similar Disclaimer requirement for further 18 * binary redistribution. 19 * 3. Neither the names of the above-listed copyright holders nor the names 20 * of any contributors may be used to endorse or promote products derived 21 * from this software without specific prior written permission. 22 * 23 * Alternatively, this software may be distributed under the terms of the 24 * GNU General Public License ("GPL") version 2 as published by the Free 25 * Software Foundation. 26 * 27 * NO WARRANTY 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 36 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 37 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 38 * POSSIBILITY OF SUCH DAMAGES. 39 * 40 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#134 $ 41 * 42 * $FreeBSD$ 43 */ 44 45 #ifdef __linux__ 46 #include "aic7xxx_osm.h" 47 #include "aic7xxx_inline.h" 48 #include "aicasm/aicasm_insformat.h" 49 #else 50 #include <dev/aic7xxx/aic7xxx_osm.h> 51 #include <dev/aic7xxx/aic7xxx_inline.h> 52 #include <dev/aic7xxx/aicasm/aicasm_insformat.h> 53 #endif 54 55 /***************************** Lookup Tables **********************************/ 56 char *ahc_chip_names[] = 57 { 58 "NONE", 59 "aic7770", 60 "aic7850", 61 "aic7855", 62 "aic7859", 63 "aic7860", 64 "aic7870", 65 "aic7880", 66 "aic7895", 67 "aic7895C", 68 "aic7890/91", 69 "aic7896/97", 70 "aic7892", 71 "aic7899" 72 }; 73 static const u_int num_chip_names = NUM_ELEMENTS(ahc_chip_names); 74 75 /* 76 * Hardware error codes. 77 */ 78 struct ahc_hard_error_entry { 79 uint8_t errno; 80 char *errmesg; 81 }; 82 83 static struct ahc_hard_error_entry ahc_hard_errors[] = { 84 { ILLHADDR, "Illegal Host Access" }, 85 { ILLSADDR, "Illegal Sequencer Address referrenced" }, 86 { ILLOPCODE, "Illegal Opcode in sequencer program" }, 87 { SQPARERR, "Sequencer Parity Error" }, 88 { DPARERR, "Data-path Parity Error" }, 89 { MPARERR, "Scratch or SCB Memory Parity Error" }, 90 { PCIERRSTAT, "PCI Error detected" }, 91 { CIOPARERR, "CIOBUS Parity Error" }, 92 }; 93 static const u_int num_errors = NUM_ELEMENTS(ahc_hard_errors); 94 95 static struct ahc_phase_table_entry ahc_phase_table[] = 96 { 97 { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, 98 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, 99 { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" }, 100 { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" }, 101 { P_COMMAND, MSG_NOOP, "in Command phase" }, 102 { P_MESGOUT, MSG_NOOP, "in Message-out phase" }, 103 { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" }, 104 { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, 105 { P_BUSFREE, MSG_NOOP, "while idle" }, 106 { 0, MSG_NOOP, "in unknown phase" } 107 }; 108 109 /* 110 * In most cases we only wish to itterate over real phases, so 111 * exclude the last element from the count. 112 */ 113 static const u_int num_phases = NUM_ELEMENTS(ahc_phase_table) - 1; 114 115 /* 116 * Valid SCSIRATE values. (p. 3-17) 117 * Provides a mapping of tranfer periods in ns to the proper value to 118 * stick in the scsixfer reg. 119 */ 120 static struct ahc_syncrate ahc_syncrates[] = 121 { 122 /* ultra2 fast/ultra period rate */ 123 { 0x42, 0x000, 9, "80.0" }, 124 { 0x03, 0x000, 10, "40.0" }, 125 { 0x04, 0x000, 11, "33.0" }, 126 { 0x05, 0x100, 12, "20.0" }, 127 { 0x06, 0x110, 15, "16.0" }, 128 { 0x07, 0x120, 18, "13.4" }, 129 { 0x08, 0x000, 25, "10.0" }, 130 { 0x19, 0x010, 31, "8.0" }, 131 { 0x1a, 0x020, 37, "6.67" }, 132 { 0x1b, 0x030, 43, "5.7" }, 133 { 0x1c, 0x040, 50, "5.0" }, 134 { 0x00, 0x050, 56, "4.4" }, 135 { 0x00, 0x060, 62, "4.0" }, 136 { 0x00, 0x070, 68, "3.6" }, 137 { 0x00, 0x000, 0, NULL } 138 }; 139 140 /* Our Sequencer Program */ 141 #include "aic7xxx_seq.h" 142 143 /**************************** Function Declarations ***************************/ 144 static void ahc_force_renegotiation(struct ahc_softc *ahc, 145 struct ahc_devinfo *devinfo); 146 static struct ahc_tmode_tstate* 147 ahc_alloc_tstate(struct ahc_softc *ahc, 148 u_int scsi_id, char channel); 149 #ifdef AHC_TARGET_MODE 150 static void ahc_free_tstate(struct ahc_softc *ahc, 151 u_int scsi_id, char channel, int force); 152 #endif 153 static struct ahc_syncrate* 154 ahc_devlimited_syncrate(struct ahc_softc *ahc, 155 struct ahc_initiator_tinfo *, 156 u_int *period, 157 u_int *ppr_options, 158 role_t role); 159 static void ahc_update_pending_scbs(struct ahc_softc *ahc); 160 static void ahc_fetch_devinfo(struct ahc_softc *ahc, 161 struct ahc_devinfo *devinfo); 162 static void ahc_scb_devinfo(struct ahc_softc *ahc, 163 struct ahc_devinfo *devinfo, 164 struct scb *scb); 165 static void ahc_assert_atn(struct ahc_softc *ahc); 166 static void ahc_setup_initiator_msgout(struct ahc_softc *ahc, 167 struct ahc_devinfo *devinfo, 168 struct scb *scb); 169 static void ahc_build_transfer_msg(struct ahc_softc *ahc, 170 struct ahc_devinfo *devinfo); 171 static void ahc_construct_sdtr(struct ahc_softc *ahc, 172 struct ahc_devinfo *devinfo, 173 u_int period, u_int offset); 174 static void ahc_construct_wdtr(struct ahc_softc *ahc, 175 struct ahc_devinfo *devinfo, 176 u_int bus_width); 177 static void ahc_construct_ppr(struct ahc_softc *ahc, 178 struct ahc_devinfo *devinfo, 179 u_int period, u_int offset, 180 u_int bus_width, u_int ppr_options); 181 static void ahc_clear_msg_state(struct ahc_softc *ahc); 182 static void ahc_handle_proto_violation(struct ahc_softc *ahc); 183 static void ahc_handle_message_phase(struct ahc_softc *ahc); 184 typedef enum { 185 AHCMSG_1B, 186 AHCMSG_2B, 187 AHCMSG_EXT 188 } ahc_msgtype; 189 static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, 190 u_int msgval, int full); 191 static int ahc_parse_msg(struct ahc_softc *ahc, 192 struct ahc_devinfo *devinfo); 193 static int ahc_handle_msg_reject(struct ahc_softc *ahc, 194 struct ahc_devinfo *devinfo); 195 static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc, 196 struct ahc_devinfo *devinfo); 197 static void ahc_reinitialize_dataptrs(struct ahc_softc *ahc); 198 static void ahc_handle_devreset(struct ahc_softc *ahc, 199 struct ahc_devinfo *devinfo, 200 cam_status status, char *message, 201 int verbose_level); 202 #ifdef AHC_TARGET_MODE 203 static void ahc_setup_target_msgin(struct ahc_softc *ahc, 204 struct ahc_devinfo *devinfo, 205 struct scb *scb); 206 #endif 207 208 static bus_dmamap_callback_t ahc_dmamap_cb; 209 static void ahc_build_free_scb_list(struct ahc_softc *ahc); 210 static int ahc_init_scbdata(struct ahc_softc *ahc); 211 static void ahc_fini_scbdata(struct ahc_softc *ahc); 212 static void ahc_qinfifo_requeue(struct ahc_softc *ahc, 213 struct scb *prev_scb, 214 struct scb *scb); 215 static int ahc_qinfifo_count(struct ahc_softc *ahc); 216 static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, 217 u_int prev, u_int scbptr); 218 static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc); 219 static u_int ahc_rem_wscb(struct ahc_softc *ahc, 220 u_int scbpos, u_int prev); 221 static void ahc_reset_current_bus(struct ahc_softc *ahc); 222 #ifdef AHC_DUMP_SEQ 223 static void ahc_dumpseq(struct ahc_softc *ahc); 224 #endif 225 static int ahc_loadseq(struct ahc_softc *ahc); 226 static int ahc_check_patch(struct ahc_softc *ahc, 227 struct patch **start_patch, 228 u_int start_instr, u_int *skip_addr); 229 static void ahc_download_instr(struct ahc_softc *ahc, 230 u_int instrptr, uint8_t *dconsts); 231 #ifdef AHC_TARGET_MODE 232 static void ahc_queue_lstate_event(struct ahc_softc *ahc, 233 struct ahc_tmode_lstate *lstate, 234 u_int initiator_id, 235 u_int event_type, 236 u_int event_arg); 237 static void ahc_update_scsiid(struct ahc_softc *ahc, 238 u_int targid_mask); 239 static int ahc_handle_target_cmd(struct ahc_softc *ahc, 240 struct target_cmd *cmd); 241 #endif 242 /************************* Sequencer Execution Control ************************/ 243 /* 244 * Restart the sequencer program from address zero 245 */ 246 void 247 ahc_restart(struct ahc_softc *ahc) 248 { 249 250 ahc_pause(ahc); 251 252 /* No more pending messages. */ 253 ahc_clear_msg_state(ahc); 254 255 ahc_outb(ahc, SCSISIGO, 0); /* De-assert BSY */ 256 ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* No message to send */ 257 ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); 258 ahc_outb(ahc, LASTPHASE, P_BUSFREE); 259 ahc_outb(ahc, SAVED_SCSIID, 0xFF); 260 ahc_outb(ahc, SAVED_LUN, 0xFF); 261 262 /* 263 * Ensure that the sequencer's idea of TQINPOS 264 * matches our own. The sequencer increments TQINPOS 265 * only after it sees a DMA complete and a reset could 266 * occur before the increment leaving the kernel to believe 267 * the command arrived but the sequencer to not. 268 */ 269 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); 270 271 /* Always allow reselection */ 272 ahc_outb(ahc, SCSISEQ, 273 ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); 274 if ((ahc->features & AHC_CMD_CHAN) != 0) { 275 /* Ensure that no DMA operations are in progress */ 276 ahc_outb(ahc, CCSCBCNT, 0); 277 ahc_outb(ahc, CCSGCTL, 0); 278 ahc_outb(ahc, CCSCBCTL, 0); 279 } 280 /* 281 * If we were in the process of DMA'ing SCB data into 282 * an SCB, replace that SCB on the free list. This prevents 283 * an SCB leak. 284 */ 285 if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) { 286 ahc_add_curscb_to_free_list(ahc); 287 ahc_outb(ahc, SEQ_FLAGS2, 288 ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA); 289 } 290 ahc_outb(ahc, MWI_RESIDUAL, 0); 291 ahc_outb(ahc, SEQCTL, ahc->seqctl); 292 ahc_outb(ahc, SEQADDR0, 0); 293 ahc_outb(ahc, SEQADDR1, 0); 294 ahc_unpause(ahc); 295 } 296 297 /************************* Input/Output Queues ********************************/ 298 void 299 ahc_run_qoutfifo(struct ahc_softc *ahc) 300 { 301 struct scb *scb; 302 u_int scb_index; 303 304 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); 305 while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) { 306 307 scb_index = ahc->qoutfifo[ahc->qoutfifonext]; 308 if ((ahc->qoutfifonext & 0x03) == 0x03) { 309 u_int modnext; 310 311 /* 312 * Clear 32bits of QOUTFIFO at a time 313 * so that we don't clobber an incoming 314 * byte DMA to the array on architectures 315 * that only support 32bit load and store 316 * operations. 317 */ 318 modnext = ahc->qoutfifonext & ~0x3; 319 *((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL; 320 ahc_dmamap_sync(ahc, ahc->shared_data_dmat, 321 ahc->shared_data_dmamap, 322 /*offset*/modnext, /*len*/4, 323 BUS_DMASYNC_PREREAD); 324 } 325 ahc->qoutfifonext++; 326 327 scb = ahc_lookup_scb(ahc, scb_index); 328 if (scb == NULL) { 329 printf("%s: WARNING no command for scb %d " 330 "(cmdcmplt)\nQOUTPOS = %d\n", 331 ahc_name(ahc), scb_index, 332 (ahc->qoutfifonext - 1) & 0xFF); 333 continue; 334 } 335 336 /* 337 * Save off the residual 338 * if there is one. 339 */ 340 ahc_update_residual(ahc, scb); 341 ahc_done(ahc, scb); 342 } 343 } 344 345 void 346 ahc_run_untagged_queues(struct ahc_softc *ahc) 347 { 348 int i; 349 350 for (i = 0; i < 16; i++) 351 ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]); 352 } 353 354 void 355 ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue) 356 { 357 struct scb *scb; 358 359 if (ahc->untagged_queue_lock != 0) 360 return; 361 362 if ((scb = TAILQ_FIRST(queue)) != NULL 363 && (scb->flags & SCB_ACTIVE) == 0) { 364 scb->flags |= SCB_ACTIVE; 365 ahc_queue_scb(ahc, scb); 366 } 367 } 368 369 /************************* Interrupt Handling *********************************/ 370 void 371 ahc_handle_brkadrint(struct ahc_softc *ahc) 372 { 373 /* 374 * We upset the sequencer :-( 375 * Lookup the error message 376 */ 377 int i; 378 int error; 379 380 error = ahc_inb(ahc, ERROR); 381 for (i = 0; error != 1 && i < num_errors; i++) 382 error >>= 1; 383 printf("%s: brkadrint, %s at seqaddr = 0x%x\n", 384 ahc_name(ahc), ahc_hard_errors[i].errmesg, 385 ahc_inb(ahc, SEQADDR0) | 386 (ahc_inb(ahc, SEQADDR1) << 8)); 387 388 ahc_dump_card_state(ahc); 389 390 /* Tell everyone that this HBA is no longer available */ 391 ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS, 392 CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, 393 CAM_NO_HBA); 394 395 /* Disable all interrupt sources by resetting the controller */ 396 ahc_shutdown(ahc); 397 } 398 399 void 400 ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat) 401 { 402 struct scb *scb; 403 struct ahc_devinfo devinfo; 404 405 ahc_fetch_devinfo(ahc, &devinfo); 406 407 /* 408 * Clear the upper byte that holds SEQINT status 409 * codes and clear the SEQINT bit. We will unpause 410 * the sequencer, if appropriate, after servicing 411 * the request. 412 */ 413 ahc_outb(ahc, CLRINT, CLRSEQINT); 414 switch (intstat & SEQINT_MASK) { 415 case BAD_STATUS: 416 { 417 u_int scb_index; 418 struct hardware_scb *hscb; 419 420 /* 421 * Set the default return value to 0 (don't 422 * send sense). The sense code will change 423 * this if needed. 424 */ 425 ahc_outb(ahc, RETURN_1, 0); 426 427 /* 428 * The sequencer will notify us when a command 429 * has an error that would be of interest to 430 * the kernel. This allows us to leave the sequencer 431 * running in the common case of command completes 432 * without error. The sequencer will already have 433 * dma'd the SCB back up to us, so we can reference 434 * the in kernel copy directly. 435 */ 436 scb_index = ahc_inb(ahc, SCB_TAG); 437 scb = ahc_lookup_scb(ahc, scb_index); 438 if (scb == NULL) { 439 ahc_print_devinfo(ahc, &devinfo); 440 printf("ahc_intr - referenced scb " 441 "not valid during seqint 0x%x scb(%d)\n", 442 intstat, scb_index); 443 ahc_dump_card_state(ahc); 444 panic("for safety"); 445 goto unpause; 446 } 447 448 hscb = scb->hscb; 449 450 /* Don't want to clobber the original sense code */ 451 if ((scb->flags & SCB_SENSE) != 0) { 452 /* 453 * Clear the SCB_SENSE Flag and have 454 * the sequencer do a normal command 455 * complete. 456 */ 457 scb->flags &= ~SCB_SENSE; 458 ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); 459 break; 460 } 461 ahc_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); 462 /* Freeze the queue until the client sees the error. */ 463 ahc_freeze_devq(ahc, scb); 464 ahc_freeze_scb(scb); 465 ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status); 466 switch (hscb->shared_data.status.scsi_status) { 467 case SCSI_STATUS_OK: 468 printf("%s: Interrupted for staus of 0???\n", 469 ahc_name(ahc)); 470 break; 471 case SCSI_STATUS_CMD_TERMINATED: 472 case SCSI_STATUS_CHECK_COND: 473 { 474 struct ahc_dma_seg *sg; 475 struct scsi_sense *sc; 476 struct ahc_initiator_tinfo *targ_info; 477 struct ahc_tmode_tstate *tstate; 478 struct ahc_transinfo *tinfo; 479 #ifdef AHC_DEBUG 480 if (ahc_debug & AHC_SHOW_SENSE) { 481 ahc_print_path(ahc, scb); 482 printf("SCB %d: requests Check Status\n", 483 scb->hscb->tag); 484 } 485 #endif 486 487 if (ahc_perform_autosense(scb) == 0) 488 break; 489 490 targ_info = ahc_fetch_transinfo(ahc, 491 devinfo.channel, 492 devinfo.our_scsiid, 493 devinfo.target, 494 &tstate); 495 tinfo = &targ_info->curr; 496 sg = scb->sg_list; 497 sc = (struct scsi_sense *)(&hscb->shared_data.cdb); 498 /* 499 * Save off the residual if there is one. 500 */ 501 ahc_update_residual(ahc, scb); 502 #ifdef AHC_DEBUG 503 if (ahc_debug & AHC_SHOW_SENSE) { 504 ahc_print_path(ahc, scb); 505 printf("Sending Sense\n"); 506 } 507 #endif 508 sg->addr = ahc_get_sense_bufaddr(ahc, scb); 509 sg->len = ahc_get_sense_bufsize(ahc, scb); 510 sg->len |= AHC_DMA_LAST_SEG; 511 512 /* Fixup byte order */ 513 sg->addr = ahc_htole32(sg->addr); 514 sg->len = ahc_htole32(sg->len); 515 516 sc->opcode = REQUEST_SENSE; 517 sc->byte2 = 0; 518 if (tinfo->protocol_version <= SCSI_REV_2 519 && SCB_GET_LUN(scb) < 8) 520 sc->byte2 = SCB_GET_LUN(scb) << 5; 521 sc->unused[0] = 0; 522 sc->unused[1] = 0; 523 sc->length = sg->len; 524 sc->control = 0; 525 526 /* 527 * We can't allow the target to disconnect. 528 * This will be an untagged transaction and 529 * having the target disconnect will make this 530 * transaction indestinguishable from outstanding 531 * tagged transactions. 532 */ 533 hscb->control = 0; 534 535 /* 536 * This request sense could be because the 537 * the device lost power or in some other 538 * way has lost our transfer negotiations. 539 * Renegotiate if appropriate. Unit attention 540 * errors will be reported before any data 541 * phases occur. 542 */ 543 if (ahc_get_residual(scb) 544 == ahc_get_transfer_length(scb)) { 545 ahc_update_neg_request(ahc, &devinfo, 546 tstate, targ_info, 547 AHC_NEG_IF_NON_ASYNC); 548 } 549 if (tstate->auto_negotiate & devinfo.target_mask) { 550 hscb->control |= MK_MESSAGE; 551 scb->flags &= ~SCB_NEGOTIATE; 552 scb->flags |= SCB_AUTO_NEGOTIATE; 553 } 554 hscb->cdb_len = sizeof(*sc); 555 hscb->dataptr = sg->addr; 556 hscb->datacnt = sg->len; 557 hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID; 558 hscb->sgptr = ahc_htole32(hscb->sgptr); 559 scb->sg_count = 1; 560 scb->flags |= SCB_SENSE; 561 ahc_qinfifo_requeue_tail(ahc, scb); 562 ahc_outb(ahc, RETURN_1, SEND_SENSE); 563 /* 564 * Ensure we have enough time to actually 565 * retrieve the sense. 566 */ 567 ahc_scb_timer_reset(scb, 5 * 1000000); 568 break; 569 } 570 default: 571 break; 572 } 573 break; 574 } 575 case NO_MATCH: 576 { 577 /* Ensure we don't leave the selection hardware on */ 578 ahc_outb(ahc, SCSISEQ, 579 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 580 581 printf("%s:%c:%d: no active SCB for reconnecting " 582 "target - issuing BUS DEVICE RESET\n", 583 ahc_name(ahc), devinfo.channel, devinfo.target); 584 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " 585 "ARG_1 == 0x%x ACCUM = 0x%x\n", 586 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), 587 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); 588 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " 589 "SINDEX == 0x%x\n", 590 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), 591 ahc_index_busy_tcl(ahc, 592 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), 593 ahc_inb(ahc, SAVED_LUN))), 594 ahc_inb(ahc, SINDEX)); 595 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " 596 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", 597 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), 598 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), 599 ahc_inb(ahc, SCB_CONTROL)); 600 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", 601 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); 602 printf("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0)); 603 printf("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL)); 604 ahc_dump_card_state(ahc); 605 ahc->msgout_buf[0] = MSG_BUS_DEV_RESET; 606 ahc->msgout_len = 1; 607 ahc->msgout_index = 0; 608 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 609 ahc_outb(ahc, MSG_OUT, HOST_MSG); 610 ahc_assert_atn(ahc); 611 break; 612 } 613 case SEND_REJECT: 614 { 615 u_int rejbyte = ahc_inb(ahc, ACCUM); 616 printf("%s:%c:%d: Warning - unknown message received from " 617 "target (0x%x). Rejecting\n", 618 ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte); 619 break; 620 } 621 case PROTO_VIOLATION: 622 { 623 ahc_handle_proto_violation(ahc); 624 break; 625 } 626 case IGN_WIDE_RES: 627 ahc_handle_ign_wide_residue(ahc, &devinfo); 628 break; 629 case PDATA_REINIT: 630 ahc_reinitialize_dataptrs(ahc); 631 break; 632 case BAD_PHASE: 633 { 634 u_int lastphase; 635 636 lastphase = ahc_inb(ahc, LASTPHASE); 637 printf("%s:%c:%d: unknown scsi bus phase %x, " 638 "lastphase = 0x%x. Attempting to continue\n", 639 ahc_name(ahc), devinfo.channel, devinfo.target, 640 lastphase, ahc_inb(ahc, SCSISIGI)); 641 break; 642 } 643 case MISSED_BUSFREE: 644 { 645 u_int lastphase; 646 647 lastphase = ahc_inb(ahc, LASTPHASE); 648 printf("%s:%c:%d: Missed busfree. " 649 "Lastphase = 0x%x, Curphase = 0x%x\n", 650 ahc_name(ahc), devinfo.channel, devinfo.target, 651 lastphase, ahc_inb(ahc, SCSISIGI)); 652 ahc_restart(ahc); 653 return; 654 } 655 case HOST_MSG_LOOP: 656 { 657 /* 658 * The sequencer has encountered a message phase 659 * that requires host assistance for completion. 660 * While handling the message phase(s), we will be 661 * notified by the sequencer after each byte is 662 * transfered so we can track bus phase changes. 663 * 664 * If this is the first time we've seen a HOST_MSG_LOOP 665 * interrupt, initialize the state of the host message 666 * loop. 667 */ 668 if (ahc->msg_type == MSG_TYPE_NONE) { 669 struct scb *scb; 670 u_int scb_index; 671 u_int bus_phase; 672 673 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 674 if (bus_phase != P_MESGIN 675 && bus_phase != P_MESGOUT) { 676 printf("ahc_intr: HOST_MSG_LOOP bad " 677 "phase 0x%x\n", 678 bus_phase); 679 /* 680 * Probably transitioned to bus free before 681 * we got here. Just punt the message. 682 */ 683 ahc_clear_intstat(ahc); 684 ahc_restart(ahc); 685 return; 686 } 687 688 scb_index = ahc_inb(ahc, SCB_TAG); 689 scb = ahc_lookup_scb(ahc, scb_index); 690 if (devinfo.role == ROLE_INITIATOR) { 691 if (scb == NULL) 692 panic("HOST_MSG_LOOP with " 693 "invalid SCB %x\n", scb_index); 694 695 if (bus_phase == P_MESGOUT) 696 ahc_setup_initiator_msgout(ahc, 697 &devinfo, 698 scb); 699 else { 700 ahc->msg_type = 701 MSG_TYPE_INITIATOR_MSGIN; 702 ahc->msgin_index = 0; 703 } 704 } 705 #ifdef AHC_TARGET_MODE 706 else { 707 if (bus_phase == P_MESGOUT) { 708 ahc->msg_type = 709 MSG_TYPE_TARGET_MSGOUT; 710 ahc->msgin_index = 0; 711 } 712 else 713 ahc_setup_target_msgin(ahc, 714 &devinfo, 715 scb); 716 } 717 #endif 718 } 719 720 ahc_handle_message_phase(ahc); 721 break; 722 } 723 case PERR_DETECTED: 724 { 725 /* 726 * If we've cleared the parity error interrupt 727 * but the sequencer still believes that SCSIPERR 728 * is true, it must be that the parity error is 729 * for the currently presented byte on the bus, 730 * and we are not in a phase (data-in) where we will 731 * eventually ack this byte. Ack the byte and 732 * throw it away in the hope that the target will 733 * take us to message out to deliver the appropriate 734 * error message. 735 */ 736 if ((intstat & SCSIINT) == 0 737 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) { 738 739 if ((ahc->features & AHC_DT) == 0) { 740 u_int curphase; 741 742 /* 743 * The hardware will only let you ack bytes 744 * if the expected phase in SCSISIGO matches 745 * the current phase. Make sure this is 746 * currently the case. 747 */ 748 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 749 ahc_outb(ahc, LASTPHASE, curphase); 750 ahc_outb(ahc, SCSISIGO, curphase); 751 } 752 if ((ahc_inb(ahc, SCSISIGI) & (CDI|MSGI)) == 0) { 753 int wait; 754 755 /* 756 * In a data phase. Faster to bitbucket 757 * the data than to individually ack each 758 * byte. This is also the only strategy 759 * that will work with AUTOACK enabled. 760 */ 761 ahc_outb(ahc, SXFRCTL1, 762 ahc_inb(ahc, SXFRCTL1) | BITBUCKET); 763 wait = 5000; 764 while (--wait != 0) { 765 if ((ahc_inb(ahc, SCSISIGI) 766 & (CDI|MSGI)) != 0) 767 break; 768 ahc_delay(100); 769 } 770 ahc_outb(ahc, SXFRCTL1, 771 ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); 772 if (wait == 0) { 773 struct scb *scb; 774 u_int scb_index; 775 776 ahc_print_devinfo(ahc, &devinfo); 777 printf("Unable to clear parity error. " 778 "Resetting bus.\n"); 779 scb_index = ahc_inb(ahc, SCB_TAG); 780 scb = ahc_lookup_scb(ahc, scb_index); 781 if (scb != NULL) 782 ahc_set_transaction_status(scb, 783 CAM_UNCOR_PARITY); 784 ahc_reset_channel(ahc, devinfo.channel, 785 /*init reset*/TRUE); 786 } 787 } else { 788 ahc_inb(ahc, SCSIDATL); 789 } 790 } 791 break; 792 } 793 case DATA_OVERRUN: 794 { 795 /* 796 * When the sequencer detects an overrun, it 797 * places the controller in "BITBUCKET" mode 798 * and allows the target to complete its transfer. 799 * Unfortunately, none of the counters get updated 800 * when the controller is in this mode, so we have 801 * no way of knowing how large the overrun was. 802 */ 803 u_int scbindex = ahc_inb(ahc, SCB_TAG); 804 u_int lastphase = ahc_inb(ahc, LASTPHASE); 805 u_int i; 806 807 scb = ahc_lookup_scb(ahc, scbindex); 808 for (i = 0; i < num_phases; i++) { 809 if (lastphase == ahc_phase_table[i].phase) 810 break; 811 } 812 ahc_print_path(ahc, scb); 813 printf("data overrun detected %s." 814 " Tag == 0x%x.\n", 815 ahc_phase_table[i].phasemsg, 816 scb->hscb->tag); 817 ahc_print_path(ahc, scb); 818 printf("%s seen Data Phase. Length = %ld. NumSGs = %d.\n", 819 ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", 820 ahc_get_transfer_length(scb), scb->sg_count); 821 if (scb->sg_count > 0) { 822 for (i = 0; i < scb->sg_count; i++) { 823 824 printf("sg[%d] - Addr 0x%x%x : Length %d\n", 825 i, 826 (ahc_le32toh(scb->sg_list[i].len) >> 24 827 & SG_HIGH_ADDR_BITS), 828 ahc_le32toh(scb->sg_list[i].addr), 829 ahc_le32toh(scb->sg_list[i].len) 830 & AHC_SG_LEN_MASK); 831 } 832 } 833 /* 834 * Set this and it will take effect when the 835 * target does a command complete. 836 */ 837 ahc_freeze_devq(ahc, scb); 838 if ((scb->flags & SCB_SENSE) == 0) { 839 ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR); 840 } else { 841 scb->flags &= ~SCB_SENSE; 842 ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); 843 } 844 ahc_freeze_scb(scb); 845 846 if ((ahc->features & AHC_ULTRA2) != 0) { 847 /* 848 * Clear the channel in case we return 849 * to data phase later. 850 */ 851 ahc_outb(ahc, SXFRCTL0, 852 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); 853 ahc_outb(ahc, SXFRCTL0, 854 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); 855 } 856 if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { 857 u_int dscommand1; 858 859 /* Ensure HHADDR is 0 for future DMA operations. */ 860 dscommand1 = ahc_inb(ahc, DSCOMMAND1); 861 ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); 862 ahc_outb(ahc, HADDR, 0); 863 ahc_outb(ahc, DSCOMMAND1, dscommand1); 864 } 865 break; 866 } 867 case MKMSG_FAILED: 868 { 869 u_int scbindex; 870 871 printf("%s:%c:%d:%d: Attempt to issue message failed\n", 872 ahc_name(ahc), devinfo.channel, devinfo.target, 873 devinfo.lun); 874 scbindex = ahc_inb(ahc, SCB_TAG); 875 scb = ahc_lookup_scb(ahc, scbindex); 876 if (scb != NULL 877 && (scb->flags & SCB_RECOVERY_SCB) != 0) 878 /* 879 * Ensure that we didn't put a second instance of this 880 * SCB into the QINFIFO. 881 */ 882 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), 883 SCB_GET_CHANNEL(ahc, scb), 884 SCB_GET_LUN(scb), scb->hscb->tag, 885 ROLE_INITIATOR, /*status*/0, 886 SEARCH_REMOVE); 887 break; 888 } 889 case NO_FREE_SCB: 890 { 891 printf("%s: No free or disconnected SCBs\n", ahc_name(ahc)); 892 ahc_dump_card_state(ahc); 893 panic("for safety"); 894 break; 895 } 896 case SCB_MISMATCH: 897 { 898 u_int scbptr; 899 900 scbptr = ahc_inb(ahc, SCBPTR); 901 printf("Bogus TAG after DMA. SCBPTR %d, tag %d, our tag %d\n", 902 scbptr, ahc_inb(ahc, ARG_1), 903 ahc->scb_data->hscbs[scbptr].tag); 904 ahc_dump_card_state(ahc); 905 panic("for saftey"); 906 break; 907 } 908 case OUT_OF_RANGE: 909 { 910 printf("%s: BTT calculation out of range\n", ahc_name(ahc)); 911 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " 912 "ARG_1 == 0x%x ACCUM = 0x%x\n", 913 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), 914 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); 915 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " 916 "SINDEX == 0x%x\n, A == 0x%x\n", 917 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), 918 ahc_index_busy_tcl(ahc, 919 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), 920 ahc_inb(ahc, SAVED_LUN))), 921 ahc_inb(ahc, SINDEX), 922 ahc_inb(ahc, ACCUM)); 923 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " 924 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", 925 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), 926 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), 927 ahc_inb(ahc, SCB_CONTROL)); 928 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", 929 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); 930 ahc_dump_card_state(ahc); 931 panic("for safety"); 932 break; 933 } 934 default: 935 printf("ahc_intr: seqint, " 936 "intstat == 0x%x, scsisigi = 0x%x\n", 937 intstat, ahc_inb(ahc, SCSISIGI)); 938 break; 939 } 940 unpause: 941 /* 942 * The sequencer is paused immediately on 943 * a SEQINT, so we should restart it when 944 * we're done. 945 */ 946 ahc_unpause(ahc); 947 } 948 949 void 950 ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat) 951 { 952 u_int scb_index; 953 u_int status0; 954 u_int status; 955 struct scb *scb; 956 char cur_channel; 957 char intr_channel; 958 959 if ((ahc->features & AHC_TWIN) != 0 960 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0)) 961 cur_channel = 'B'; 962 else 963 cur_channel = 'A'; 964 intr_channel = cur_channel; 965 966 if ((ahc->features & AHC_ULTRA2) != 0) 967 status0 = ahc_inb(ahc, SSTAT0) & IOERR; 968 else 969 status0 = 0; 970 status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); 971 if (status == 0 && status0 == 0) { 972 if ((ahc->features & AHC_TWIN) != 0) { 973 /* Try the other channel */ 974 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 975 status = ahc_inb(ahc, SSTAT1) 976 & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); 977 intr_channel = (cur_channel == 'A') ? 'B' : 'A'; 978 } 979 if (status == 0) { 980 printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc)); 981 ahc_outb(ahc, CLRINT, CLRSCSIINT); 982 ahc_unpause(ahc); 983 return; 984 } 985 } 986 987 /* Make sure the sequencer is in a safe location. */ 988 ahc_clear_critical_section(ahc); 989 990 scb_index = ahc_inb(ahc, SCB_TAG); 991 scb = ahc_lookup_scb(ahc, scb_index); 992 if (scb != NULL 993 && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) 994 scb = NULL; 995 996 if ((ahc->features & AHC_ULTRA2) != 0 997 && (status0 & IOERR) != 0) { 998 int now_lvd; 999 1000 now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40; 1001 printf("%s: Transceiver State Has Changed to %s mode\n", 1002 ahc_name(ahc), now_lvd ? "LVD" : "SE"); 1003 ahc_outb(ahc, CLRSINT0, CLRIOERR); 1004 /* 1005 * When transitioning to SE mode, the reset line 1006 * glitches, triggering an arbitration bug in some 1007 * Ultra2 controllers. This bug is cleared when we 1008 * assert the reset line. Since a reset glitch has 1009 * already occurred with this transition and a 1010 * transceiver state change is handled just like 1011 * a bus reset anyway, asserting the reset line 1012 * ourselves is safe. 1013 */ 1014 ahc_reset_channel(ahc, intr_channel, 1015 /*Initiate Reset*/now_lvd == 0); 1016 } else if ((status & SCSIRSTI) != 0) { 1017 printf("%s: Someone reset channel %c\n", 1018 ahc_name(ahc), intr_channel); 1019 if (intr_channel != cur_channel) 1020 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 1021 ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE); 1022 } else if ((status & SCSIPERR) != 0) { 1023 /* 1024 * Determine the bus phase and queue an appropriate message. 1025 * SCSIPERR is latched true as soon as a parity error 1026 * occurs. If the sequencer acked the transfer that 1027 * caused the parity error and the currently presented 1028 * transfer on the bus has correct parity, SCSIPERR will 1029 * be cleared by CLRSCSIPERR. Use this to determine if 1030 * we should look at the last phase the sequencer recorded, 1031 * or the current phase presented on the bus. 1032 */ 1033 struct ahc_devinfo devinfo; 1034 u_int mesg_out; 1035 u_int curphase; 1036 u_int errorphase; 1037 u_int lastphase; 1038 u_int scsirate; 1039 u_int i; 1040 u_int sstat2; 1041 int silent; 1042 1043 lastphase = ahc_inb(ahc, LASTPHASE); 1044 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 1045 sstat2 = ahc_inb(ahc, SSTAT2); 1046 ahc_outb(ahc, CLRSINT1, CLRSCSIPERR); 1047 /* 1048 * For all phases save DATA, the sequencer won't 1049 * automatically ack a byte that has a parity error 1050 * in it. So the only way that the current phase 1051 * could be 'data-in' is if the parity error is for 1052 * an already acked byte in the data phase. During 1053 * synchronous data-in transfers, we may actually 1054 * ack bytes before latching the current phase in 1055 * LASTPHASE, leading to the discrepancy between 1056 * curphase and lastphase. 1057 */ 1058 if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0 1059 || curphase == P_DATAIN || curphase == P_DATAIN_DT) 1060 errorphase = curphase; 1061 else 1062 errorphase = lastphase; 1063 1064 for (i = 0; i < num_phases; i++) { 1065 if (errorphase == ahc_phase_table[i].phase) 1066 break; 1067 } 1068 mesg_out = ahc_phase_table[i].mesg_out; 1069 silent = FALSE; 1070 if (scb != NULL) { 1071 if (SCB_IS_SILENT(scb)) 1072 silent = TRUE; 1073 else 1074 ahc_print_path(ahc, scb); 1075 scb->flags |= SCB_TRANSMISSION_ERROR; 1076 } else 1077 printf("%s:%c:%d: ", ahc_name(ahc), intr_channel, 1078 SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID))); 1079 scsirate = ahc_inb(ahc, SCSIRATE); 1080 if (silent == FALSE) { 1081 printf("parity error detected %s. " 1082 "SEQADDR(0x%x) SCSIRATE(0x%x)\n", 1083 ahc_phase_table[i].phasemsg, 1084 ahc_inw(ahc, SEQADDR0), 1085 scsirate); 1086 if ((ahc->features & AHC_DT) != 0) { 1087 if ((sstat2 & CRCVALERR) != 0) 1088 printf("\tCRC Value Mismatch\n"); 1089 if ((sstat2 & CRCENDERR) != 0) 1090 printf("\tNo terminal CRC packet " 1091 "recevied\n"); 1092 if ((sstat2 & CRCREQERR) != 0) 1093 printf("\tIllegal CRC packet " 1094 "request\n"); 1095 if ((sstat2 & DUAL_EDGE_ERR) != 0) 1096 printf("\tUnexpected %sDT Data Phase\n", 1097 (scsirate & SINGLE_EDGE) 1098 ? "" : "non-"); 1099 } 1100 } 1101 1102 if ((ahc->features & AHC_DT) != 0 1103 && (sstat2 & DUAL_EDGE_ERR) != 0) { 1104 /* 1105 * This error applies regardless of 1106 * data direction, so ignore the value 1107 * in the phase table. 1108 */ 1109 mesg_out = MSG_INITIATOR_DET_ERR; 1110 } 1111 1112 /* 1113 * We've set the hardware to assert ATN if we 1114 * get a parity error on "in" phases, so all we 1115 * need to do is stuff the message buffer with 1116 * the appropriate message. "In" phases have set 1117 * mesg_out to something other than MSG_NOP. 1118 */ 1119 if (mesg_out != MSG_NOOP) { 1120 if (ahc->msg_type != MSG_TYPE_NONE) 1121 ahc->send_msg_perror = TRUE; 1122 else 1123 ahc_outb(ahc, MSG_OUT, mesg_out); 1124 } 1125 /* 1126 * Force a renegotiation with this target just in 1127 * case we are out of sync for some external reason 1128 * unknown (or unreported) by the target. 1129 */ 1130 ahc_fetch_devinfo(ahc, &devinfo); 1131 ahc_force_renegotiation(ahc, &devinfo); 1132 1133 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1134 ahc_unpause(ahc); 1135 } else if ((status & SELTO) != 0) { 1136 u_int scbptr; 1137 1138 /* Stop the selection */ 1139 ahc_outb(ahc, SCSISEQ, 0); 1140 1141 /* No more pending messages */ 1142 ahc_clear_msg_state(ahc); 1143 1144 /* Clear interrupt state */ 1145 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 1146 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); 1147 1148 /* 1149 * Although the driver does not care about the 1150 * 'Selection in Progress' status bit, the busy 1151 * LED does. SELINGO is only cleared by a sucessfull 1152 * selection, so we must manually clear it to insure 1153 * the LED turns off just incase no future successful 1154 * selections occur (e.g. no devices on the bus). 1155 */ 1156 ahc_outb(ahc, CLRSINT0, CLRSELINGO); 1157 1158 scbptr = ahc_inb(ahc, WAITING_SCBH); 1159 ahc_outb(ahc, SCBPTR, scbptr); 1160 scb_index = ahc_inb(ahc, SCB_TAG); 1161 1162 scb = ahc_lookup_scb(ahc, scb_index); 1163 if (scb == NULL) { 1164 printf("%s: ahc_intr - referenced scb not " 1165 "valid during SELTO scb(%d, %d)\n", 1166 ahc_name(ahc), scbptr, scb_index); 1167 ahc_dump_card_state(ahc); 1168 } else { 1169 struct ahc_devinfo devinfo; 1170 #ifdef AHC_DEBUG 1171 if ((ahc_debug & AHC_SHOW_SELTO) != 0) { 1172 ahc_print_path(ahc, scb); 1173 printf("Saw Selection Timeout for SCB 0x%x\n", 1174 scb_index); 1175 } 1176 #endif 1177 /* 1178 * Force a renegotiation with this target just in 1179 * case the cable was pulled and will later be 1180 * re-attached. The target may forget its negotiation 1181 * settings with us should it attempt to reselect 1182 * during the interruption. The target will not issue 1183 * a unit attention in this case, so we must always 1184 * renegotiate. 1185 */ 1186 ahc_scb_devinfo(ahc, &devinfo, scb); 1187 ahc_force_renegotiation(ahc, &devinfo); 1188 ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT); 1189 ahc_freeze_devq(ahc, scb); 1190 } 1191 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1192 ahc_restart(ahc); 1193 } else if ((status & BUSFREE) != 0 1194 && (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) { 1195 struct ahc_devinfo devinfo; 1196 u_int lastphase; 1197 u_int saved_scsiid; 1198 u_int saved_lun; 1199 u_int target; 1200 u_int initiator_role_id; 1201 char channel; 1202 int printerror; 1203 1204 /* 1205 * Clear our selection hardware as soon as possible. 1206 * We may have an entry in the waiting Q for this target, 1207 * that is affected by this busfree and we don't want to 1208 * go about selecting the target while we handle the event. 1209 */ 1210 ahc_outb(ahc, SCSISEQ, 1211 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 1212 1213 /* 1214 * Disable busfree interrupts and clear the busfree 1215 * interrupt status. We do this here so that several 1216 * bus transactions occur prior to clearing the SCSIINT 1217 * latch. It can take a bit for the clearing to take effect. 1218 */ 1219 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 1220 ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR); 1221 1222 /* 1223 * Look at what phase we were last in. 1224 * If its message out, chances are pretty good 1225 * that the busfree was in response to one of 1226 * our abort requests. 1227 */ 1228 lastphase = ahc_inb(ahc, LASTPHASE); 1229 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); 1230 saved_lun = ahc_inb(ahc, SAVED_LUN); 1231 target = SCSIID_TARGET(ahc, saved_scsiid); 1232 initiator_role_id = SCSIID_OUR_ID(saved_scsiid); 1233 channel = SCSIID_CHANNEL(ahc, saved_scsiid); 1234 ahc_compile_devinfo(&devinfo, initiator_role_id, 1235 target, saved_lun, channel, ROLE_INITIATOR); 1236 printerror = 1; 1237 1238 if (lastphase == P_MESGOUT) { 1239 u_int tag; 1240 1241 tag = SCB_LIST_NULL; 1242 if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE) 1243 || ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) { 1244 if (ahc->msgout_buf[ahc->msgout_index - 1] 1245 == MSG_ABORT_TAG) 1246 tag = scb->hscb->tag; 1247 ahc_print_path(ahc, scb); 1248 printf("SCB %d - Abort%s Completed.\n", 1249 scb->hscb->tag, tag == SCB_LIST_NULL ? 1250 "" : " Tag"); 1251 ahc_abort_scbs(ahc, target, channel, 1252 saved_lun, tag, 1253 ROLE_INITIATOR, 1254 CAM_REQ_ABORTED); 1255 printerror = 0; 1256 } else if (ahc_sent_msg(ahc, AHCMSG_1B, 1257 MSG_BUS_DEV_RESET, TRUE)) { 1258 #ifdef __FreeBSD__ 1259 /* 1260 * Don't mark the user's request for this BDR 1261 * as completing with CAM_BDR_SENT. CAM3 1262 * specifies CAM_REQ_CMP. 1263 */ 1264 if (scb != NULL 1265 && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV 1266 && ahc_match_scb(ahc, scb, target, channel, 1267 CAM_LUN_WILDCARD, 1268 SCB_LIST_NULL, 1269 ROLE_INITIATOR)) { 1270 ahc_set_transaction_status(scb, CAM_REQ_CMP); 1271 } 1272 #endif 1273 ahc_compile_devinfo(&devinfo, 1274 initiator_role_id, 1275 target, 1276 CAM_LUN_WILDCARD, 1277 channel, 1278 ROLE_INITIATOR); 1279 ahc_handle_devreset(ahc, &devinfo, 1280 CAM_BDR_SENT, 1281 "Bus Device Reset", 1282 /*verbose_level*/0); 1283 printerror = 0; 1284 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, 1285 MSG_EXT_PPR, FALSE)) { 1286 struct ahc_initiator_tinfo *tinfo; 1287 struct ahc_tmode_tstate *tstate; 1288 1289 /* 1290 * PPR Rejected. Try non-ppr negotiation 1291 * and retry command. 1292 */ 1293 tinfo = ahc_fetch_transinfo(ahc, 1294 devinfo.channel, 1295 devinfo.our_scsiid, 1296 devinfo.target, 1297 &tstate); 1298 tinfo->curr.transport_version = 2; 1299 tinfo->goal.transport_version = 2; 1300 tinfo->goal.ppr_options = 0; 1301 ahc_qinfifo_requeue_tail(ahc, scb); 1302 printerror = 0; 1303 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, 1304 MSG_EXT_WDTR, FALSE)) { 1305 /* 1306 * Negotiation Rejected. Go-narrow and 1307 * retry command. 1308 */ 1309 ahc_set_width(ahc, &devinfo, 1310 MSG_EXT_WDTR_BUS_8_BIT, 1311 AHC_TRANS_CUR|AHC_TRANS_GOAL, 1312 /*paused*/TRUE); 1313 ahc_qinfifo_requeue_tail(ahc, scb); 1314 printerror = 0; 1315 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, 1316 MSG_EXT_SDTR, FALSE)) { 1317 /* 1318 * Negotiation Rejected. Go-async and 1319 * retry command. 1320 */ 1321 ahc_set_syncrate(ahc, &devinfo, 1322 /*syncrate*/NULL, 1323 /*period*/0, /*offset*/0, 1324 /*ppr_options*/0, 1325 AHC_TRANS_CUR|AHC_TRANS_GOAL, 1326 /*paused*/TRUE); 1327 ahc_qinfifo_requeue_tail(ahc, scb); 1328 printerror = 0; 1329 } 1330 } 1331 if (printerror != 0) { 1332 u_int i; 1333 1334 if (scb != NULL) { 1335 u_int tag; 1336 1337 if ((scb->hscb->control & TAG_ENB) != 0) 1338 tag = scb->hscb->tag; 1339 else 1340 tag = SCB_LIST_NULL; 1341 ahc_print_path(ahc, scb); 1342 ahc_abort_scbs(ahc, target, channel, 1343 SCB_GET_LUN(scb), tag, 1344 ROLE_INITIATOR, 1345 CAM_UNEXP_BUSFREE); 1346 } else { 1347 /* 1348 * We had not fully identified this connection, 1349 * so we cannot abort anything. 1350 */ 1351 printf("%s: ", ahc_name(ahc)); 1352 } 1353 for (i = 0; i < num_phases; i++) { 1354 if (lastphase == ahc_phase_table[i].phase) 1355 break; 1356 } 1357 if (lastphase != P_BUSFREE) { 1358 /* 1359 * Renegotiate with this device at the 1360 * next oportunity just in case this busfree 1361 * is due to a negotiation mismatch with the 1362 * device. 1363 */ 1364 ahc_force_renegotiation(ahc, &devinfo); 1365 } 1366 printf("Unexpected busfree %s\n" 1367 "SEQADDR == 0x%x\n", 1368 ahc_phase_table[i].phasemsg, 1369 ahc_inb(ahc, SEQADDR0) 1370 | (ahc_inb(ahc, SEQADDR1) << 8)); 1371 } 1372 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1373 ahc_restart(ahc); 1374 } else { 1375 printf("%s: Missing case in ahc_handle_scsiint. status = %x\n", 1376 ahc_name(ahc), status); 1377 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1378 } 1379 } 1380 1381 /* 1382 * Force renegotiation to occur the next time we initiate 1383 * a command to the current device. 1384 */ 1385 static void 1386 ahc_force_renegotiation(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 1387 { 1388 struct ahc_initiator_tinfo *targ_info; 1389 struct ahc_tmode_tstate *tstate; 1390 1391 targ_info = ahc_fetch_transinfo(ahc, 1392 devinfo->channel, 1393 devinfo->our_scsiid, 1394 devinfo->target, 1395 &tstate); 1396 ahc_update_neg_request(ahc, devinfo, tstate, 1397 targ_info, AHC_NEG_IF_NON_ASYNC); 1398 } 1399 1400 #define AHC_MAX_STEPS 2000 1401 void 1402 ahc_clear_critical_section(struct ahc_softc *ahc) 1403 { 1404 int stepping; 1405 int steps; 1406 u_int simode0; 1407 u_int simode1; 1408 1409 if (ahc->num_critical_sections == 0) 1410 return; 1411 1412 stepping = FALSE; 1413 steps = 0; 1414 simode0 = 0; 1415 simode1 = 0; 1416 for (;;) { 1417 struct cs *cs; 1418 u_int seqaddr; 1419 u_int i; 1420 1421 seqaddr = ahc_inb(ahc, SEQADDR0) 1422 | (ahc_inb(ahc, SEQADDR1) << 8); 1423 1424 /* 1425 * Seqaddr represents the next instruction to execute, 1426 * so we are really executing the instruction just 1427 * before it. 1428 */ 1429 if (seqaddr != 0) 1430 seqaddr -= 1; 1431 cs = ahc->critical_sections; 1432 for (i = 0; i < ahc->num_critical_sections; i++, cs++) { 1433 1434 if (cs->begin < seqaddr && cs->end >= seqaddr) 1435 break; 1436 } 1437 1438 if (i == ahc->num_critical_sections) 1439 break; 1440 1441 if (steps > AHC_MAX_STEPS) { 1442 printf("%s: Infinite loop in critical section\n", 1443 ahc_name(ahc)); 1444 ahc_dump_card_state(ahc); 1445 panic("critical section loop"); 1446 } 1447 1448 steps++; 1449 if (stepping == FALSE) { 1450 1451 /* 1452 * Disable all interrupt sources so that the 1453 * sequencer will not be stuck by a pausing 1454 * interrupt condition while we attempt to 1455 * leave a critical section. 1456 */ 1457 simode0 = ahc_inb(ahc, SIMODE0); 1458 ahc_outb(ahc, SIMODE0, 0); 1459 simode1 = ahc_inb(ahc, SIMODE1); 1460 if ((ahc->features & AHC_DT) != 0) 1461 /* 1462 * On DT class controllers, we 1463 * use the enhanced busfree logic. 1464 * Unfortunately we cannot re-enable 1465 * busfree detection within the 1466 * current connection, so we must 1467 * leave it on while single stepping. 1468 */ 1469 ahc_outb(ahc, SIMODE1, simode1 & ENBUSFREE); 1470 else 1471 ahc_outb(ahc, SIMODE1, 0); 1472 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1473 ahc_outb(ahc, SEQCTL, ahc->seqctl | STEP); 1474 stepping = TRUE; 1475 } 1476 if ((ahc->features & AHC_DT) != 0) { 1477 ahc_outb(ahc, CLRSINT1, CLRBUSFREE); 1478 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1479 } 1480 ahc_outb(ahc, HCNTRL, ahc->unpause); 1481 while (!ahc_is_paused(ahc)) 1482 ahc_delay(200); 1483 } 1484 if (stepping) { 1485 ahc_outb(ahc, SIMODE0, simode0); 1486 ahc_outb(ahc, SIMODE1, simode1); 1487 ahc_outb(ahc, SEQCTL, ahc->seqctl); 1488 } 1489 } 1490 1491 /* 1492 * Clear any pending interrupt status. 1493 */ 1494 void 1495 ahc_clear_intstat(struct ahc_softc *ahc) 1496 { 1497 /* Clear any interrupt conditions this may have caused */ 1498 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI 1499 |CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG| 1500 CLRREQINIT); 1501 ahc_flush_device_writes(ahc); 1502 ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO); 1503 ahc_flush_device_writes(ahc); 1504 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1505 ahc_flush_device_writes(ahc); 1506 } 1507 1508 /**************************** Debugging Routines ******************************/ 1509 #ifdef AHC_DEBUG 1510 uint32_t ahc_debug = AHC_DEBUG_OPTS; 1511 #endif 1512 1513 void 1514 ahc_print_scb(struct scb *scb) 1515 { 1516 int i; 1517 1518 struct hardware_scb *hscb = scb->hscb; 1519 1520 printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", 1521 (void *)scb, 1522 hscb->control, 1523 hscb->scsiid, 1524 hscb->lun, 1525 hscb->cdb_len); 1526 printf("Shared Data: "); 1527 for (i = 0; i < sizeof(hscb->shared_data.cdb); i++) 1528 printf("%#02x", hscb->shared_data.cdb[i]); 1529 printf(" dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n", 1530 ahc_le32toh(hscb->dataptr), 1531 ahc_le32toh(hscb->datacnt), 1532 ahc_le32toh(hscb->sgptr), 1533 hscb->tag); 1534 if (scb->sg_count > 0) { 1535 for (i = 0; i < scb->sg_count; i++) { 1536 printf("sg[%d] - Addr 0x%x%x : Length %d\n", 1537 i, 1538 (ahc_le32toh(scb->sg_list[i].len) >> 24 1539 & SG_HIGH_ADDR_BITS), 1540 ahc_le32toh(scb->sg_list[i].addr), 1541 ahc_le32toh(scb->sg_list[i].len)); 1542 } 1543 } 1544 } 1545 1546 /************************* Transfer Negotiation *******************************/ 1547 /* 1548 * Allocate per target mode instance (ID we respond to as a target) 1549 * transfer negotiation data structures. 1550 */ 1551 static struct ahc_tmode_tstate * 1552 ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel) 1553 { 1554 struct ahc_tmode_tstate *master_tstate; 1555 struct ahc_tmode_tstate *tstate; 1556 int i; 1557 1558 master_tstate = ahc->enabled_targets[ahc->our_id]; 1559 if (channel == 'B') { 1560 scsi_id += 8; 1561 master_tstate = ahc->enabled_targets[ahc->our_id_b + 8]; 1562 } 1563 if (ahc->enabled_targets[scsi_id] != NULL 1564 && ahc->enabled_targets[scsi_id] != master_tstate) 1565 panic("%s: ahc_alloc_tstate - Target already allocated", 1566 ahc_name(ahc)); 1567 tstate = (struct ahc_tmode_tstate*)malloc(sizeof(*tstate), 1568 M_DEVBUF, M_NOWAIT); 1569 if (tstate == NULL) 1570 return (NULL); 1571 1572 /* 1573 * If we have allocated a master tstate, copy user settings from 1574 * the master tstate (taken from SRAM or the EEPROM) for this 1575 * channel, but reset our current and goal settings to async/narrow 1576 * until an initiator talks to us. 1577 */ 1578 if (master_tstate != NULL) { 1579 memcpy(tstate, master_tstate, sizeof(*tstate)); 1580 memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); 1581 tstate->ultraenb = 0; 1582 for (i = 0; i < AHC_NUM_TARGETS; i++) { 1583 memset(&tstate->transinfo[i].curr, 0, 1584 sizeof(tstate->transinfo[i].curr)); 1585 memset(&tstate->transinfo[i].goal, 0, 1586 sizeof(tstate->transinfo[i].goal)); 1587 } 1588 } else 1589 memset(tstate, 0, sizeof(*tstate)); 1590 ahc->enabled_targets[scsi_id] = tstate; 1591 return (tstate); 1592 } 1593 1594 #ifdef AHC_TARGET_MODE 1595 /* 1596 * Free per target mode instance (ID we respond to as a target) 1597 * transfer negotiation data structures. 1598 */ 1599 static void 1600 ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force) 1601 { 1602 struct ahc_tmode_tstate *tstate; 1603 1604 /* 1605 * Don't clean up our "master" tstate. 1606 * It has our default user settings. 1607 */ 1608 if (((channel == 'B' && scsi_id == ahc->our_id_b) 1609 || (channel == 'A' && scsi_id == ahc->our_id)) 1610 && force == FALSE) 1611 return; 1612 1613 if (channel == 'B') 1614 scsi_id += 8; 1615 tstate = ahc->enabled_targets[scsi_id]; 1616 if (tstate != NULL) 1617 free(tstate, M_DEVBUF); 1618 ahc->enabled_targets[scsi_id] = NULL; 1619 } 1620 #endif 1621 1622 /* 1623 * Called when we have an active connection to a target on the bus, 1624 * this function finds the nearest syncrate to the input period limited 1625 * by the capabilities of the bus connectivity of and sync settings for 1626 * the target. 1627 */ 1628 struct ahc_syncrate * 1629 ahc_devlimited_syncrate(struct ahc_softc *ahc, 1630 struct ahc_initiator_tinfo *tinfo, 1631 u_int *period, u_int *ppr_options, role_t role) 1632 { 1633 struct ahc_transinfo *transinfo; 1634 u_int maxsync; 1635 1636 if ((ahc->features & AHC_ULTRA2) != 0) { 1637 if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0 1638 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) { 1639 maxsync = AHC_SYNCRATE_DT; 1640 } else { 1641 maxsync = AHC_SYNCRATE_ULTRA; 1642 /* Can't do DT on an SE bus */ 1643 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1644 } 1645 } else if ((ahc->features & AHC_ULTRA) != 0) { 1646 maxsync = AHC_SYNCRATE_ULTRA; 1647 } else { 1648 maxsync = AHC_SYNCRATE_FAST; 1649 } 1650 /* 1651 * Never allow a value higher than our current goal 1652 * period otherwise we may allow a target initiated 1653 * negotiation to go above the limit as set by the 1654 * user. In the case of an initiator initiated 1655 * sync negotiation, we limit based on the user 1656 * setting. This allows the system to still accept 1657 * incoming negotiations even if target initiated 1658 * negotiation is not performed. 1659 */ 1660 if (role == ROLE_TARGET) 1661 transinfo = &tinfo->user; 1662 else 1663 transinfo = &tinfo->goal; 1664 *ppr_options &= transinfo->ppr_options; 1665 if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) { 1666 maxsync = MAX(maxsync, AHC_SYNCRATE_ULTRA2); 1667 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1668 } 1669 if (transinfo->period == 0) { 1670 *period = 0; 1671 *ppr_options = 0; 1672 return (NULL); 1673 } 1674 *period = MAX(*period, transinfo->period); 1675 return (ahc_find_syncrate(ahc, period, ppr_options, maxsync)); 1676 } 1677 1678 /* 1679 * Look up the valid period to SCSIRATE conversion in our table. 1680 * Return the period and offset that should be sent to the target 1681 * if this was the beginning of an SDTR. 1682 */ 1683 struct ahc_syncrate * 1684 ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, 1685 u_int *ppr_options, u_int maxsync) 1686 { 1687 struct ahc_syncrate *syncrate; 1688 1689 if ((ahc->features & AHC_DT) == 0) 1690 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1691 1692 /* Skip all DT only entries if DT is not available */ 1693 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 1694 && maxsync < AHC_SYNCRATE_ULTRA2) 1695 maxsync = AHC_SYNCRATE_ULTRA2; 1696 1697 for (syncrate = &ahc_syncrates[maxsync]; 1698 syncrate->rate != NULL; 1699 syncrate++) { 1700 1701 /* 1702 * The Ultra2 table doesn't go as low 1703 * as for the Fast/Ultra cards. 1704 */ 1705 if ((ahc->features & AHC_ULTRA2) != 0 1706 && (syncrate->sxfr_u2 == 0)) 1707 break; 1708 1709 if (*period <= syncrate->period) { 1710 /* 1711 * When responding to a target that requests 1712 * sync, the requested rate may fall between 1713 * two rates that we can output, but still be 1714 * a rate that we can receive. Because of this, 1715 * we want to respond to the target with 1716 * the same rate that it sent to us even 1717 * if the period we use to send data to it 1718 * is lower. Only lower the response period 1719 * if we must. 1720 */ 1721 if (syncrate == &ahc_syncrates[maxsync]) 1722 *period = syncrate->period; 1723 1724 /* 1725 * At some speeds, we only support 1726 * ST transfers. 1727 */ 1728 if ((syncrate->sxfr_u2 & ST_SXFR) != 0) 1729 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1730 break; 1731 } 1732 } 1733 1734 if ((*period == 0) 1735 || (syncrate->rate == NULL) 1736 || ((ahc->features & AHC_ULTRA2) != 0 1737 && (syncrate->sxfr_u2 == 0))) { 1738 /* Use asynchronous transfers. */ 1739 *period = 0; 1740 syncrate = NULL; 1741 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1742 } 1743 return (syncrate); 1744 } 1745 1746 /* 1747 * Convert from an entry in our syncrate table to the SCSI equivalent 1748 * sync "period" factor. 1749 */ 1750 u_int 1751 ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync) 1752 { 1753 struct ahc_syncrate *syncrate; 1754 1755 if ((ahc->features & AHC_ULTRA2) != 0) 1756 scsirate &= SXFR_ULTRA2; 1757 else 1758 scsirate &= SXFR; 1759 1760 syncrate = &ahc_syncrates[maxsync]; 1761 while (syncrate->rate != NULL) { 1762 1763 if ((ahc->features & AHC_ULTRA2) != 0) { 1764 if (syncrate->sxfr_u2 == 0) 1765 break; 1766 else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2)) 1767 return (syncrate->period); 1768 } else if (scsirate == (syncrate->sxfr & SXFR)) { 1769 return (syncrate->period); 1770 } 1771 syncrate++; 1772 } 1773 return (0); /* async */ 1774 } 1775 1776 /* 1777 * Truncate the given synchronous offset to a value the 1778 * current adapter type and syncrate are capable of. 1779 */ 1780 void 1781 ahc_validate_offset(struct ahc_softc *ahc, 1782 struct ahc_initiator_tinfo *tinfo, 1783 struct ahc_syncrate *syncrate, 1784 u_int *offset, int wide, role_t role) 1785 { 1786 u_int maxoffset; 1787 1788 /* Limit offset to what we can do */ 1789 if (syncrate == NULL) { 1790 maxoffset = 0; 1791 } else if ((ahc->features & AHC_ULTRA2) != 0) { 1792 maxoffset = MAX_OFFSET_ULTRA2; 1793 } else { 1794 if (wide) 1795 maxoffset = MAX_OFFSET_16BIT; 1796 else 1797 maxoffset = MAX_OFFSET_8BIT; 1798 } 1799 *offset = MIN(*offset, maxoffset); 1800 if (tinfo != NULL) { 1801 if (role == ROLE_TARGET) 1802 *offset = MIN(*offset, tinfo->user.offset); 1803 else 1804 *offset = MIN(*offset, tinfo->goal.offset); 1805 } 1806 } 1807 1808 /* 1809 * Truncate the given transfer width parameter to a value the 1810 * current adapter type is capable of. 1811 */ 1812 void 1813 ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, 1814 u_int *bus_width, role_t role) 1815 { 1816 switch (*bus_width) { 1817 default: 1818 if (ahc->features & AHC_WIDE) { 1819 /* Respond Wide */ 1820 *bus_width = MSG_EXT_WDTR_BUS_16_BIT; 1821 break; 1822 } 1823 /* FALLTHROUGH */ 1824 case MSG_EXT_WDTR_BUS_8_BIT: 1825 *bus_width = MSG_EXT_WDTR_BUS_8_BIT; 1826 break; 1827 } 1828 if (tinfo != NULL) { 1829 if (role == ROLE_TARGET) 1830 *bus_width = MIN(tinfo->user.width, *bus_width); 1831 else 1832 *bus_width = MIN(tinfo->goal.width, *bus_width); 1833 } 1834 } 1835 1836 /* 1837 * Update the bitmask of targets for which the controller should 1838 * negotiate with at the next convenient oportunity. This currently 1839 * means the next time we send the initial identify messages for 1840 * a new transaction. 1841 */ 1842 int 1843 ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1844 struct ahc_tmode_tstate *tstate, 1845 struct ahc_initiator_tinfo *tinfo, ahc_neg_type neg_type) 1846 { 1847 u_int auto_negotiate_orig; 1848 1849 auto_negotiate_orig = tstate->auto_negotiate; 1850 if (neg_type == AHC_NEG_ALWAYS) { 1851 /* 1852 * Force our "current" settings to be 1853 * unknown so that unless a bus reset 1854 * occurs the need to renegotiate is 1855 * recorded persistently. 1856 */ 1857 if ((ahc->features & AHC_WIDE) != 0) 1858 tinfo->curr.width = AHC_WIDTH_UNKNOWN; 1859 tinfo->curr.period = AHC_PERIOD_UNKNOWN; 1860 tinfo->curr.offset = AHC_OFFSET_UNKNOWN; 1861 } 1862 if (tinfo->curr.period != tinfo->goal.period 1863 || tinfo->curr.width != tinfo->goal.width 1864 || tinfo->curr.offset != tinfo->goal.offset 1865 || tinfo->curr.ppr_options != tinfo->goal.ppr_options 1866 || (neg_type == AHC_NEG_IF_NON_ASYNC 1867 && (tinfo->goal.offset != 0 1868 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT 1869 || tinfo->goal.ppr_options != 0))) 1870 tstate->auto_negotiate |= devinfo->target_mask; 1871 else 1872 tstate->auto_negotiate &= ~devinfo->target_mask; 1873 1874 return (auto_negotiate_orig != tstate->auto_negotiate); 1875 } 1876 1877 /* 1878 * Update the user/goal/curr tables of synchronous negotiation 1879 * parameters as well as, in the case of a current or active update, 1880 * any data structures on the host controller. In the case of an 1881 * active update, the specified target is currently talking to us on 1882 * the bus, so the transfer parameter update must take effect 1883 * immediately. 1884 */ 1885 void 1886 ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1887 struct ahc_syncrate *syncrate, u_int period, 1888 u_int offset, u_int ppr_options, u_int type, int paused) 1889 { 1890 struct ahc_initiator_tinfo *tinfo; 1891 struct ahc_tmode_tstate *tstate; 1892 u_int old_period; 1893 u_int old_offset; 1894 u_int old_ppr; 1895 int active; 1896 int update_needed; 1897 1898 active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 1899 update_needed = 0; 1900 1901 if (syncrate == NULL) { 1902 period = 0; 1903 offset = 0; 1904 } 1905 1906 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1907 devinfo->target, &tstate); 1908 1909 if ((type & AHC_TRANS_USER) != 0) { 1910 tinfo->user.period = period; 1911 tinfo->user.offset = offset; 1912 tinfo->user.ppr_options = ppr_options; 1913 } 1914 1915 if ((type & AHC_TRANS_GOAL) != 0) { 1916 tinfo->goal.period = period; 1917 tinfo->goal.offset = offset; 1918 tinfo->goal.ppr_options = ppr_options; 1919 } 1920 1921 old_period = tinfo->curr.period; 1922 old_offset = tinfo->curr.offset; 1923 old_ppr = tinfo->curr.ppr_options; 1924 1925 if ((type & AHC_TRANS_CUR) != 0 1926 && (old_period != period 1927 || old_offset != offset 1928 || old_ppr != ppr_options)) { 1929 u_int scsirate; 1930 1931 update_needed++; 1932 scsirate = tinfo->scsirate; 1933 if ((ahc->features & AHC_ULTRA2) != 0) { 1934 1935 scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC); 1936 if (syncrate != NULL) { 1937 scsirate |= syncrate->sxfr_u2; 1938 if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) 1939 scsirate |= ENABLE_CRC; 1940 else 1941 scsirate |= SINGLE_EDGE; 1942 } 1943 } else { 1944 1945 scsirate &= ~(SXFR|SOFS); 1946 /* 1947 * Ensure Ultra mode is set properly for 1948 * this target. 1949 */ 1950 tstate->ultraenb &= ~devinfo->target_mask; 1951 if (syncrate != NULL) { 1952 if (syncrate->sxfr & ULTRA_SXFR) { 1953 tstate->ultraenb |= 1954 devinfo->target_mask; 1955 } 1956 scsirate |= syncrate->sxfr & SXFR; 1957 scsirate |= offset & SOFS; 1958 } 1959 if (active) { 1960 u_int sxfrctl0; 1961 1962 sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 1963 sxfrctl0 &= ~FAST20; 1964 if (tstate->ultraenb & devinfo->target_mask) 1965 sxfrctl0 |= FAST20; 1966 ahc_outb(ahc, SXFRCTL0, sxfrctl0); 1967 } 1968 } 1969 if (active) { 1970 ahc_outb(ahc, SCSIRATE, scsirate); 1971 if ((ahc->features & AHC_ULTRA2) != 0) 1972 ahc_outb(ahc, SCSIOFFSET, offset); 1973 } 1974 1975 tinfo->scsirate = scsirate; 1976 tinfo->curr.period = period; 1977 tinfo->curr.offset = offset; 1978 tinfo->curr.ppr_options = ppr_options; 1979 1980 ahc_send_async(ahc, devinfo->channel, devinfo->target, 1981 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); 1982 if (bootverbose) { 1983 if (offset != 0) { 1984 printf("%s: target %d synchronous at %sMHz%s, " 1985 "offset = 0x%x\n", ahc_name(ahc), 1986 devinfo->target, syncrate->rate, 1987 (ppr_options & MSG_EXT_PPR_DT_REQ) 1988 ? " DT" : "", offset); 1989 } else { 1990 printf("%s: target %d using " 1991 "asynchronous transfers\n", 1992 ahc_name(ahc), devinfo->target); 1993 } 1994 } 1995 } 1996 1997 update_needed += ahc_update_neg_request(ahc, devinfo, tstate, 1998 tinfo, AHC_NEG_TO_GOAL); 1999 2000 if (update_needed) 2001 ahc_update_pending_scbs(ahc); 2002 } 2003 2004 /* 2005 * Update the user/goal/curr tables of wide negotiation 2006 * parameters as well as, in the case of a current or active update, 2007 * any data structures on the host controller. In the case of an 2008 * active update, the specified target is currently talking to us on 2009 * the bus, so the transfer parameter update must take effect 2010 * immediately. 2011 */ 2012 void 2013 ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2014 u_int width, u_int type, int paused) 2015 { 2016 struct ahc_initiator_tinfo *tinfo; 2017 struct ahc_tmode_tstate *tstate; 2018 u_int oldwidth; 2019 int active; 2020 int update_needed; 2021 2022 active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 2023 update_needed = 0; 2024 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 2025 devinfo->target, &tstate); 2026 2027 if ((type & AHC_TRANS_USER) != 0) 2028 tinfo->user.width = width; 2029 2030 if ((type & AHC_TRANS_GOAL) != 0) 2031 tinfo->goal.width = width; 2032 2033 oldwidth = tinfo->curr.width; 2034 if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) { 2035 u_int scsirate; 2036 2037 update_needed++; 2038 scsirate = tinfo->scsirate; 2039 scsirate &= ~WIDEXFER; 2040 if (width == MSG_EXT_WDTR_BUS_16_BIT) 2041 scsirate |= WIDEXFER; 2042 2043 tinfo->scsirate = scsirate; 2044 2045 if (active) 2046 ahc_outb(ahc, SCSIRATE, scsirate); 2047 2048 tinfo->curr.width = width; 2049 2050 ahc_send_async(ahc, devinfo->channel, devinfo->target, 2051 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); 2052 if (bootverbose) { 2053 printf("%s: target %d using %dbit transfers\n", 2054 ahc_name(ahc), devinfo->target, 2055 8 * (0x01 << width)); 2056 } 2057 } 2058 2059 update_needed += ahc_update_neg_request(ahc, devinfo, tstate, 2060 tinfo, AHC_NEG_TO_GOAL); 2061 if (update_needed) 2062 ahc_update_pending_scbs(ahc); 2063 } 2064 2065 /* 2066 * Update the current state of tagged queuing for a given target. 2067 */ 2068 void 2069 ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2070 ahc_queue_alg alg) 2071 { 2072 ahc_platform_set_tags(ahc, devinfo, alg); 2073 ahc_send_async(ahc, devinfo->channel, devinfo->target, 2074 devinfo->lun, AC_TRANSFER_NEG, &alg); 2075 } 2076 2077 /* 2078 * When the transfer settings for a connection change, update any 2079 * in-transit SCBs to contain the new data so the hardware will 2080 * be set correctly during future (re)selections. 2081 */ 2082 static void 2083 ahc_update_pending_scbs(struct ahc_softc *ahc) 2084 { 2085 struct scb *pending_scb; 2086 int pending_scb_count; 2087 int i; 2088 int paused; 2089 u_int saved_scbptr; 2090 2091 /* 2092 * Traverse the pending SCB list and ensure that all of the 2093 * SCBs there have the proper settings. 2094 */ 2095 pending_scb_count = 0; 2096 LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) { 2097 struct ahc_devinfo devinfo; 2098 struct hardware_scb *pending_hscb; 2099 struct ahc_initiator_tinfo *tinfo; 2100 struct ahc_tmode_tstate *tstate; 2101 2102 ahc_scb_devinfo(ahc, &devinfo, pending_scb); 2103 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 2104 devinfo.our_scsiid, 2105 devinfo.target, &tstate); 2106 pending_hscb = pending_scb->hscb; 2107 pending_hscb->control &= ~ULTRAENB; 2108 if ((tstate->ultraenb & devinfo.target_mask) != 0) 2109 pending_hscb->control |= ULTRAENB; 2110 pending_hscb->scsirate = tinfo->scsirate; 2111 pending_hscb->scsioffset = tinfo->curr.offset; 2112 if ((tstate->auto_negotiate & devinfo.target_mask) == 0 2113 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { 2114 pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; 2115 pending_hscb->control &= ~MK_MESSAGE; 2116 } 2117 ahc_sync_scb(ahc, pending_scb, 2118 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2119 pending_scb_count++; 2120 } 2121 2122 if (pending_scb_count == 0) 2123 return; 2124 2125 if (ahc_is_paused(ahc)) { 2126 paused = 1; 2127 } else { 2128 paused = 0; 2129 ahc_pause(ahc); 2130 } 2131 2132 saved_scbptr = ahc_inb(ahc, SCBPTR); 2133 /* Ensure that the hscbs down on the card match the new information */ 2134 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 2135 struct hardware_scb *pending_hscb; 2136 u_int control; 2137 u_int scb_tag; 2138 2139 ahc_outb(ahc, SCBPTR, i); 2140 scb_tag = ahc_inb(ahc, SCB_TAG); 2141 pending_scb = ahc_lookup_scb(ahc, scb_tag); 2142 if (pending_scb == NULL) 2143 continue; 2144 2145 pending_hscb = pending_scb->hscb; 2146 control = ahc_inb(ahc, SCB_CONTROL); 2147 control &= ~(ULTRAENB|MK_MESSAGE); 2148 control |= pending_hscb->control & (ULTRAENB|MK_MESSAGE); 2149 ahc_outb(ahc, SCB_CONTROL, control); 2150 ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate); 2151 ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset); 2152 } 2153 ahc_outb(ahc, SCBPTR, saved_scbptr); 2154 2155 if (paused == 0) 2156 ahc_unpause(ahc); 2157 } 2158 2159 /**************************** Pathing Information *****************************/ 2160 static void 2161 ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2162 { 2163 u_int saved_scsiid; 2164 role_t role; 2165 int our_id; 2166 2167 if (ahc_inb(ahc, SSTAT0) & TARGET) 2168 role = ROLE_TARGET; 2169 else 2170 role = ROLE_INITIATOR; 2171 2172 if (role == ROLE_TARGET 2173 && (ahc->features & AHC_MULTI_TID) != 0 2174 && (ahc_inb(ahc, SEQ_FLAGS) 2175 & (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) { 2176 /* We were selected, so pull our id from TARGIDIN */ 2177 our_id = ahc_inb(ahc, TARGIDIN) & OID; 2178 } else if ((ahc->features & AHC_ULTRA2) != 0) 2179 our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID; 2180 else 2181 our_id = ahc_inb(ahc, SCSIID) & OID; 2182 2183 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); 2184 ahc_compile_devinfo(devinfo, 2185 our_id, 2186 SCSIID_TARGET(ahc, saved_scsiid), 2187 ahc_inb(ahc, SAVED_LUN), 2188 SCSIID_CHANNEL(ahc, saved_scsiid), 2189 role); 2190 } 2191 2192 struct ahc_phase_table_entry* 2193 ahc_lookup_phase_entry(int phase) 2194 { 2195 struct ahc_phase_table_entry *entry; 2196 struct ahc_phase_table_entry *last_entry; 2197 2198 /* 2199 * num_phases doesn't include the default entry which 2200 * will be returned if the phase doesn't match. 2201 */ 2202 last_entry = &ahc_phase_table[num_phases]; 2203 for (entry = ahc_phase_table; entry < last_entry; entry++) { 2204 if (phase == entry->phase) 2205 break; 2206 } 2207 return (entry); 2208 } 2209 2210 void 2211 ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target, 2212 u_int lun, char channel, role_t role) 2213 { 2214 devinfo->our_scsiid = our_id; 2215 devinfo->target = target; 2216 devinfo->lun = lun; 2217 devinfo->target_offset = target; 2218 devinfo->channel = channel; 2219 devinfo->role = role; 2220 if (channel == 'B') 2221 devinfo->target_offset += 8; 2222 devinfo->target_mask = (0x01 << devinfo->target_offset); 2223 } 2224 2225 void 2226 ahc_print_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2227 { 2228 printf("%s:%c:%d:%d: ", ahc_name(ahc), devinfo->channel, 2229 devinfo->target, devinfo->lun); 2230 } 2231 2232 static void 2233 ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2234 struct scb *scb) 2235 { 2236 role_t role; 2237 int our_id; 2238 2239 our_id = SCSIID_OUR_ID(scb->hscb->scsiid); 2240 role = ROLE_INITIATOR; 2241 if ((scb->flags & SCB_TARGET_SCB) != 0) 2242 role = ROLE_TARGET; 2243 ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb), 2244 SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role); 2245 } 2246 2247 2248 /************************ Message Phase Processing ****************************/ 2249 static void 2250 ahc_assert_atn(struct ahc_softc *ahc) 2251 { 2252 u_int scsisigo; 2253 2254 scsisigo = ATNO; 2255 if ((ahc->features & AHC_DT) == 0) 2256 scsisigo |= ahc_inb(ahc, SCSISIGI); 2257 ahc_outb(ahc, SCSISIGO, scsisigo); 2258 } 2259 2260 /* 2261 * When an initiator transaction with the MK_MESSAGE flag either reconnects 2262 * or enters the initial message out phase, we are interrupted. Fill our 2263 * outgoing message buffer with the appropriate message and beging handing 2264 * the message phase(s) manually. 2265 */ 2266 static void 2267 ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2268 struct scb *scb) 2269 { 2270 /* 2271 * To facilitate adding multiple messages together, 2272 * each routine should increment the index and len 2273 * variables instead of setting them explicitly. 2274 */ 2275 ahc->msgout_index = 0; 2276 ahc->msgout_len = 0; 2277 2278 if ((scb->flags & SCB_DEVICE_RESET) == 0 2279 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) { 2280 u_int identify_msg; 2281 2282 identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); 2283 if ((scb->hscb->control & DISCENB) != 0) 2284 identify_msg |= MSG_IDENTIFY_DISCFLAG; 2285 ahc->msgout_buf[ahc->msgout_index++] = identify_msg; 2286 ahc->msgout_len++; 2287 2288 if ((scb->hscb->control & TAG_ENB) != 0) { 2289 ahc->msgout_buf[ahc->msgout_index++] = 2290 scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); 2291 ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag; 2292 ahc->msgout_len += 2; 2293 } 2294 } 2295 2296 if (scb->flags & SCB_DEVICE_RESET) { 2297 ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET; 2298 ahc->msgout_len++; 2299 ahc_print_path(ahc, scb); 2300 printf("Bus Device Reset Message Sent\n"); 2301 /* 2302 * Clear our selection hardware in advance of 2303 * the busfree. We may have an entry in the waiting 2304 * Q for this target, and we don't want to go about 2305 * selecting while we handle the busfree and blow it 2306 * away. 2307 */ 2308 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 2309 } else if ((scb->flags & SCB_ABORT) != 0) { 2310 if ((scb->hscb->control & TAG_ENB) != 0) 2311 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG; 2312 else 2313 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT; 2314 ahc->msgout_len++; 2315 ahc_print_path(ahc, scb); 2316 printf("Abort%s Message Sent\n", 2317 (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); 2318 /* 2319 * Clear our selection hardware in advance of 2320 * the busfree. We may have an entry in the waiting 2321 * Q for this target, and we don't want to go about 2322 * selecting while we handle the busfree and blow it 2323 * away. 2324 */ 2325 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 2326 } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { 2327 ahc_build_transfer_msg(ahc, devinfo); 2328 } else { 2329 printf("ahc_intr: AWAITING_MSG for an SCB that " 2330 "does not have a waiting message\n"); 2331 printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, 2332 devinfo->target_mask); 2333 panic("SCB = %d, SCB Control = %x, MSG_OUT = %x " 2334 "SCB flags = %x", scb->hscb->tag, scb->hscb->control, 2335 ahc_inb(ahc, MSG_OUT), scb->flags); 2336 } 2337 2338 /* 2339 * Clear the MK_MESSAGE flag from the SCB so we aren't 2340 * asked to send this message again. 2341 */ 2342 ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE); 2343 scb->hscb->control &= ~MK_MESSAGE; 2344 ahc->msgout_index = 0; 2345 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2346 } 2347 2348 /* 2349 * Build an appropriate transfer negotiation message for the 2350 * currently active target. 2351 */ 2352 static void 2353 ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2354 { 2355 /* 2356 * We need to initiate transfer negotiations. 2357 * If our current and goal settings are identical, 2358 * we want to renegotiate due to a check condition. 2359 */ 2360 struct ahc_initiator_tinfo *tinfo; 2361 struct ahc_tmode_tstate *tstate; 2362 struct ahc_syncrate *rate; 2363 int dowide; 2364 int dosync; 2365 int doppr; 2366 u_int period; 2367 u_int ppr_options; 2368 u_int offset; 2369 2370 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 2371 devinfo->target, &tstate); 2372 /* 2373 * Filter our period based on the current connection. 2374 * If we can't perform DT transfers on this segment (not in LVD 2375 * mode for instance), then our decision to issue a PPR message 2376 * may change. 2377 */ 2378 period = tinfo->goal.period; 2379 offset = tinfo->goal.offset; 2380 ppr_options = tinfo->goal.ppr_options; 2381 /* Target initiated PPR is not allowed in the SCSI spec */ 2382 if (devinfo->role == ROLE_TARGET) 2383 ppr_options = 0; 2384 rate = ahc_devlimited_syncrate(ahc, tinfo, &period, 2385 &ppr_options, devinfo->role); 2386 dowide = tinfo->curr.width != tinfo->goal.width; 2387 dosync = tinfo->curr.offset != offset || tinfo->curr.period != period; 2388 /* 2389 * Only use PPR if we have options that need it, even if the device 2390 * claims to support it. There might be an expander in the way 2391 * that doesn't. 2392 */ 2393 doppr = ppr_options != 0; 2394 2395 if (!dowide && !dosync && !doppr) { 2396 dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; 2397 dosync = tinfo->goal.offset != 0; 2398 } 2399 2400 if (!dowide && !dosync && !doppr) { 2401 /* 2402 * Force async with a WDTR message if we have a wide bus, 2403 * or just issue an SDTR with a 0 offset. 2404 */ 2405 if ((ahc->features & AHC_WIDE) != 0) 2406 dowide = 1; 2407 else 2408 dosync = 1; 2409 2410 if (bootverbose) { 2411 ahc_print_devinfo(ahc, devinfo); 2412 printf("Ensuring async\n"); 2413 } 2414 } 2415 2416 /* Target initiated PPR is not allowed in the SCSI spec */ 2417 if (devinfo->role == ROLE_TARGET) 2418 doppr = 0; 2419 2420 /* 2421 * Both the PPR message and SDTR message require the 2422 * goal syncrate to be limited to what the target device 2423 * is capable of handling (based on whether an LVD->SE 2424 * expander is on the bus), so combine these two cases. 2425 * Regardless, guarantee that if we are using WDTR and SDTR 2426 * messages that WDTR comes first. 2427 */ 2428 if (doppr || (dosync && !dowide)) { 2429 2430 offset = tinfo->goal.offset; 2431 ahc_validate_offset(ahc, tinfo, rate, &offset, 2432 doppr ? tinfo->goal.width 2433 : tinfo->curr.width, 2434 devinfo->role); 2435 if (doppr) { 2436 ahc_construct_ppr(ahc, devinfo, period, offset, 2437 tinfo->goal.width, ppr_options); 2438 } else { 2439 ahc_construct_sdtr(ahc, devinfo, period, offset); 2440 } 2441 } else { 2442 ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width); 2443 } 2444 } 2445 2446 /* 2447 * Build a synchronous negotiation message in our message 2448 * buffer based on the input parameters. 2449 */ 2450 static void 2451 ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2452 u_int period, u_int offset) 2453 { 2454 if (offset == 0) 2455 period = AHC_ASYNC_XFER_PERIOD; 2456 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2457 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN; 2458 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR; 2459 ahc->msgout_buf[ahc->msgout_index++] = period; 2460 ahc->msgout_buf[ahc->msgout_index++] = offset; 2461 ahc->msgout_len += 5; 2462 if (bootverbose) { 2463 printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", 2464 ahc_name(ahc), devinfo->channel, devinfo->target, 2465 devinfo->lun, period, offset); 2466 } 2467 } 2468 2469 /* 2470 * Build a wide negotiation message in our message 2471 * buffer based on the input parameters. 2472 */ 2473 static void 2474 ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2475 u_int bus_width) 2476 { 2477 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2478 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN; 2479 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR; 2480 ahc->msgout_buf[ahc->msgout_index++] = bus_width; 2481 ahc->msgout_len += 4; 2482 if (bootverbose) { 2483 printf("(%s:%c:%d:%d): Sending WDTR %x\n", 2484 ahc_name(ahc), devinfo->channel, devinfo->target, 2485 devinfo->lun, bus_width); 2486 } 2487 } 2488 2489 /* 2490 * Build a parallel protocol request message in our message 2491 * buffer based on the input parameters. 2492 */ 2493 static void 2494 ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2495 u_int period, u_int offset, u_int bus_width, 2496 u_int ppr_options) 2497 { 2498 if (offset == 0) 2499 period = AHC_ASYNC_XFER_PERIOD; 2500 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2501 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR_LEN; 2502 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR; 2503 ahc->msgout_buf[ahc->msgout_index++] = period; 2504 ahc->msgout_buf[ahc->msgout_index++] = 0; 2505 ahc->msgout_buf[ahc->msgout_index++] = offset; 2506 ahc->msgout_buf[ahc->msgout_index++] = bus_width; 2507 ahc->msgout_buf[ahc->msgout_index++] = ppr_options; 2508 ahc->msgout_len += 8; 2509 if (bootverbose) { 2510 printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " 2511 "offset %x, ppr_options %x\n", ahc_name(ahc), 2512 devinfo->channel, devinfo->target, devinfo->lun, 2513 bus_width, period, offset, ppr_options); 2514 } 2515 } 2516 2517 /* 2518 * Clear any active message state. 2519 */ 2520 static void 2521 ahc_clear_msg_state(struct ahc_softc *ahc) 2522 { 2523 ahc->msgout_len = 0; 2524 ahc->msgin_index = 0; 2525 ahc->msg_type = MSG_TYPE_NONE; 2526 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) { 2527 /* 2528 * The target didn't care to respond to our 2529 * message request, so clear ATN. 2530 */ 2531 ahc_outb(ahc, CLRSINT1, CLRATNO); 2532 } 2533 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 2534 ahc_outb(ahc, SEQ_FLAGS2, 2535 ahc_inb(ahc, SEQ_FLAGS2) & ~TARGET_MSG_PENDING); 2536 } 2537 2538 static void 2539 ahc_handle_proto_violation(struct ahc_softc *ahc) 2540 { 2541 struct ahc_devinfo devinfo; 2542 struct scb *scb; 2543 u_int scbid; 2544 u_int seq_flags; 2545 u_int curphase; 2546 u_int lastphase; 2547 int found; 2548 2549 ahc_fetch_devinfo(ahc, &devinfo); 2550 scbid = ahc_inb(ahc, SCB_TAG); 2551 scb = ahc_lookup_scb(ahc, scbid); 2552 seq_flags = ahc_inb(ahc, SEQ_FLAGS); 2553 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 2554 lastphase = ahc_inb(ahc, LASTPHASE); 2555 if ((seq_flags & NOT_IDENTIFIED) != 0) { 2556 2557 /* 2558 * The reconnecting target either did not send an 2559 * identify message, or did, but we didn't find an SCB 2560 * to match. 2561 */ 2562 ahc_print_devinfo(ahc, &devinfo); 2563 printf("Target did not send an IDENTIFY message. " 2564 "LASTPHASE = 0x%x.\n", lastphase); 2565 scb = NULL; 2566 } else if (scb == NULL) { 2567 /* 2568 * We don't seem to have an SCB active for this 2569 * transaction. Print an error and reset the bus. 2570 */ 2571 ahc_print_devinfo(ahc, &devinfo); 2572 printf("No SCB found during protocol violation\n"); 2573 goto proto_violation_reset; 2574 } else { 2575 ahc_set_transaction_status(scb, CAM_SEQUENCE_FAIL); 2576 if ((seq_flags & NO_CDB_SENT) != 0) { 2577 ahc_print_path(ahc, scb); 2578 printf("No or incomplete CDB sent to device.\n"); 2579 } else if ((ahc_inb(ahc, SCB_CONTROL) & STATUS_RCVD) == 0) { 2580 /* 2581 * The target never bothered to provide status to 2582 * us prior to completing the command. Since we don't 2583 * know the disposition of this command, we must attempt 2584 * to abort it. Assert ATN and prepare to send an abort 2585 * message. 2586 */ 2587 ahc_print_path(ahc, scb); 2588 printf("Completed command without status.\n"); 2589 } else { 2590 ahc_print_path(ahc, scb); 2591 printf("Unknown protocol violation.\n"); 2592 ahc_dump_card_state(ahc); 2593 } 2594 } 2595 if ((lastphase & ~P_DATAIN_DT) == 0 2596 || lastphase == P_COMMAND) { 2597 proto_violation_reset: 2598 /* 2599 * Target either went directly to data/command 2600 * phase or didn't respond to our ATN. 2601 * The only safe thing to do is to blow 2602 * it away with a bus reset. 2603 */ 2604 found = ahc_reset_channel(ahc, 'A', TRUE); 2605 printf("%s: Issued Channel %c Bus Reset. " 2606 "%d SCBs aborted\n", ahc_name(ahc), 'A', found); 2607 } else { 2608 /* 2609 * Leave the selection hardware off in case 2610 * this abort attempt will affect yet to 2611 * be sent commands. 2612 */ 2613 ahc_outb(ahc, SCSISEQ, 2614 ahc_inb(ahc, SCSISEQ) & ~ENSELO); 2615 ahc_assert_atn(ahc); 2616 ahc_outb(ahc, MSG_OUT, HOST_MSG); 2617 if (scb == NULL) { 2618 ahc_print_devinfo(ahc, &devinfo); 2619 ahc->msgout_buf[0] = MSG_ABORT_TASK; 2620 ahc->msgout_len = 1; 2621 ahc->msgout_index = 0; 2622 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2623 } else { 2624 ahc_print_path(ahc, scb); 2625 scb->flags |= SCB_ABORT; 2626 } 2627 printf("Protocol violation %s. Attempting to abort.\n", 2628 ahc_lookup_phase_entry(curphase)->phasemsg); 2629 } 2630 } 2631 2632 /* 2633 * Manual message loop handler. 2634 */ 2635 static void 2636 ahc_handle_message_phase(struct ahc_softc *ahc) 2637 { 2638 struct ahc_devinfo devinfo; 2639 u_int bus_phase; 2640 int end_session; 2641 2642 ahc_fetch_devinfo(ahc, &devinfo); 2643 end_session = FALSE; 2644 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 2645 2646 reswitch: 2647 switch (ahc->msg_type) { 2648 case MSG_TYPE_INITIATOR_MSGOUT: 2649 { 2650 int lastbyte; 2651 int phasemis; 2652 int msgdone; 2653 2654 if (ahc->msgout_len == 0) 2655 panic("HOST_MSG_LOOP interrupt with no active message"); 2656 2657 #ifdef AHC_DEBUG 2658 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2659 ahc_print_devinfo(ahc, &devinfo); 2660 printf("INITIATOR_MSG_OUT"); 2661 } 2662 #endif 2663 phasemis = bus_phase != P_MESGOUT; 2664 if (phasemis) { 2665 #ifdef AHC_DEBUG 2666 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2667 printf(" PHASEMIS %s\n", 2668 ahc_lookup_phase_entry(bus_phase) 2669 ->phasemsg); 2670 } 2671 #endif 2672 if (bus_phase == P_MESGIN) { 2673 /* 2674 * Change gears and see if 2675 * this messages is of interest to 2676 * us or should be passed back to 2677 * the sequencer. 2678 */ 2679 ahc_outb(ahc, CLRSINT1, CLRATNO); 2680 ahc->send_msg_perror = FALSE; 2681 ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN; 2682 ahc->msgin_index = 0; 2683 goto reswitch; 2684 } 2685 end_session = TRUE; 2686 break; 2687 } 2688 2689 if (ahc->send_msg_perror) { 2690 ahc_outb(ahc, CLRSINT1, CLRATNO); 2691 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2692 #ifdef AHC_DEBUG 2693 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) 2694 printf(" byte 0x%x\n", ahc->send_msg_perror); 2695 #endif 2696 ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR); 2697 break; 2698 } 2699 2700 msgdone = ahc->msgout_index == ahc->msgout_len; 2701 if (msgdone) { 2702 /* 2703 * The target has requested a retry. 2704 * Re-assert ATN, reset our message index to 2705 * 0, and try again. 2706 */ 2707 ahc->msgout_index = 0; 2708 ahc_assert_atn(ahc); 2709 } 2710 2711 lastbyte = ahc->msgout_index == (ahc->msgout_len - 1); 2712 if (lastbyte) { 2713 /* Last byte is signified by dropping ATN */ 2714 ahc_outb(ahc, CLRSINT1, CLRATNO); 2715 } 2716 2717 /* 2718 * Clear our interrupt status and present 2719 * the next byte on the bus. 2720 */ 2721 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2722 #ifdef AHC_DEBUG 2723 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) 2724 printf(" byte 0x%x\n", 2725 ahc->msgout_buf[ahc->msgout_index]); 2726 #endif 2727 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 2728 break; 2729 } 2730 case MSG_TYPE_INITIATOR_MSGIN: 2731 { 2732 int phasemis; 2733 int message_done; 2734 2735 #ifdef AHC_DEBUG 2736 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2737 ahc_print_devinfo(ahc, &devinfo); 2738 printf("INITIATOR_MSG_IN"); 2739 } 2740 #endif 2741 phasemis = bus_phase != P_MESGIN; 2742 if (phasemis) { 2743 #ifdef AHC_DEBUG 2744 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2745 printf(" PHASEMIS %s\n", 2746 ahc_lookup_phase_entry(bus_phase) 2747 ->phasemsg); 2748 } 2749 #endif 2750 ahc->msgin_index = 0; 2751 if (bus_phase == P_MESGOUT 2752 && (ahc->send_msg_perror == TRUE 2753 || (ahc->msgout_len != 0 2754 && ahc->msgout_index == 0))) { 2755 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2756 goto reswitch; 2757 } 2758 end_session = TRUE; 2759 break; 2760 } 2761 2762 /* Pull the byte in without acking it */ 2763 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL); 2764 #ifdef AHC_DEBUG 2765 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) 2766 printf(" byte 0x%x\n", 2767 ahc->msgin_buf[ahc->msgin_index]); 2768 #endif 2769 2770 message_done = ahc_parse_msg(ahc, &devinfo); 2771 2772 if (message_done) { 2773 /* 2774 * Clear our incoming message buffer in case there 2775 * is another message following this one. 2776 */ 2777 ahc->msgin_index = 0; 2778 2779 /* 2780 * If this message illicited a response, 2781 * assert ATN so the target takes us to the 2782 * message out phase. 2783 */ 2784 if (ahc->msgout_len != 0) { 2785 #ifdef AHC_DEBUG 2786 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { 2787 ahc_print_devinfo(ahc, &devinfo); 2788 printf("Asserting ATN for response\n"); 2789 } 2790 #endif 2791 ahc_assert_atn(ahc); 2792 } 2793 } else 2794 ahc->msgin_index++; 2795 2796 if (message_done == MSGLOOP_TERMINATED) { 2797 end_session = TRUE; 2798 } else { 2799 /* Ack the byte */ 2800 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2801 ahc_inb(ahc, SCSIDATL); 2802 } 2803 break; 2804 } 2805 case MSG_TYPE_TARGET_MSGIN: 2806 { 2807 int msgdone; 2808 int msgout_request; 2809 2810 if (ahc->msgout_len == 0) 2811 panic("Target MSGIN with no active message"); 2812 2813 /* 2814 * If we interrupted a mesgout session, the initiator 2815 * will not know this until our first REQ. So, we 2816 * only honor mesgout requests after we've sent our 2817 * first byte. 2818 */ 2819 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0 2820 && ahc->msgout_index > 0) 2821 msgout_request = TRUE; 2822 else 2823 msgout_request = FALSE; 2824 2825 if (msgout_request) { 2826 2827 /* 2828 * Change gears and see if 2829 * this messages is of interest to 2830 * us or should be passed back to 2831 * the sequencer. 2832 */ 2833 ahc->msg_type = MSG_TYPE_TARGET_MSGOUT; 2834 ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO); 2835 ahc->msgin_index = 0; 2836 /* Dummy read to REQ for first byte */ 2837 ahc_inb(ahc, SCSIDATL); 2838 ahc_outb(ahc, SXFRCTL0, 2839 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2840 break; 2841 } 2842 2843 msgdone = ahc->msgout_index == ahc->msgout_len; 2844 if (msgdone) { 2845 ahc_outb(ahc, SXFRCTL0, 2846 ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 2847 end_session = TRUE; 2848 break; 2849 } 2850 2851 /* 2852 * Present the next byte on the bus. 2853 */ 2854 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2855 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 2856 break; 2857 } 2858 case MSG_TYPE_TARGET_MSGOUT: 2859 { 2860 int lastbyte; 2861 int msgdone; 2862 2863 /* 2864 * The initiator signals that this is 2865 * the last byte by dropping ATN. 2866 */ 2867 lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0; 2868 2869 /* 2870 * Read the latched byte, but turn off SPIOEN first 2871 * so that we don't inadvertently cause a REQ for the 2872 * next byte. 2873 */ 2874 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 2875 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL); 2876 msgdone = ahc_parse_msg(ahc, &devinfo); 2877 if (msgdone == MSGLOOP_TERMINATED) { 2878 /* 2879 * The message is *really* done in that it caused 2880 * us to go to bus free. The sequencer has already 2881 * been reset at this point, so pull the ejection 2882 * handle. 2883 */ 2884 return; 2885 } 2886 2887 ahc->msgin_index++; 2888 2889 /* 2890 * XXX Read spec about initiator dropping ATN too soon 2891 * and use msgdone to detect it. 2892 */ 2893 if (msgdone == MSGLOOP_MSGCOMPLETE) { 2894 ahc->msgin_index = 0; 2895 2896 /* 2897 * If this message illicited a response, transition 2898 * to the Message in phase and send it. 2899 */ 2900 if (ahc->msgout_len != 0) { 2901 ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO); 2902 ahc_outb(ahc, SXFRCTL0, 2903 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2904 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 2905 ahc->msgin_index = 0; 2906 break; 2907 } 2908 } 2909 2910 if (lastbyte) 2911 end_session = TRUE; 2912 else { 2913 /* Ask for the next byte. */ 2914 ahc_outb(ahc, SXFRCTL0, 2915 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2916 } 2917 2918 break; 2919 } 2920 default: 2921 panic("Unknown REQINIT message type"); 2922 } 2923 2924 if (end_session) { 2925 ahc_clear_msg_state(ahc); 2926 ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP); 2927 } else 2928 ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP); 2929 } 2930 2931 /* 2932 * See if we sent a particular extended message to the target. 2933 * If "full" is true, return true only if the target saw the full 2934 * message. If "full" is false, return true if the target saw at 2935 * least the first byte of the message. 2936 */ 2937 static int 2938 ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full) 2939 { 2940 int found; 2941 u_int index; 2942 2943 found = FALSE; 2944 index = 0; 2945 2946 while (index < ahc->msgout_len) { 2947 if (ahc->msgout_buf[index] == MSG_EXTENDED) { 2948 u_int end_index; 2949 2950 end_index = index + 1 + ahc->msgout_buf[index + 1]; 2951 if (ahc->msgout_buf[index+2] == msgval 2952 && type == AHCMSG_EXT) { 2953 2954 if (full) { 2955 if (ahc->msgout_index > end_index) 2956 found = TRUE; 2957 } else if (ahc->msgout_index > index) 2958 found = TRUE; 2959 } 2960 index = end_index; 2961 } else if (ahc->msgout_buf[index] >= MSG_SIMPLE_TASK 2962 && ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) { 2963 2964 /* Skip tag type and tag id or residue param*/ 2965 index += 2; 2966 } else { 2967 /* Single byte message */ 2968 if (type == AHCMSG_1B 2969 && ahc->msgout_buf[index] == msgval 2970 && ahc->msgout_index > index) 2971 found = TRUE; 2972 index++; 2973 } 2974 2975 if (found) 2976 break; 2977 } 2978 return (found); 2979 } 2980 2981 /* 2982 * Wait for a complete incoming message, parse it, and respond accordingly. 2983 */ 2984 static int 2985 ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2986 { 2987 struct ahc_initiator_tinfo *tinfo; 2988 struct ahc_tmode_tstate *tstate; 2989 int reject; 2990 int done; 2991 int response; 2992 u_int targ_scsirate; 2993 2994 done = MSGLOOP_IN_PROG; 2995 response = FALSE; 2996 reject = FALSE; 2997 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 2998 devinfo->target, &tstate); 2999 targ_scsirate = tinfo->scsirate; 3000 3001 /* 3002 * Parse as much of the message as is available, 3003 * rejecting it if we don't support it. When 3004 * the entire message is available and has been 3005 * handled, return MSGLOOP_MSGCOMPLETE, indicating 3006 * that we have parsed an entire message. 3007 * 3008 * In the case of extended messages, we accept the length 3009 * byte outright and perform more checking once we know the 3010 * extended message type. 3011 */ 3012 switch (ahc->msgin_buf[0]) { 3013 case MSG_DISCONNECT: 3014 case MSG_SAVEDATAPOINTER: 3015 case MSG_CMDCOMPLETE: 3016 case MSG_RESTOREPOINTERS: 3017 case MSG_IGN_WIDE_RESIDUE: 3018 /* 3019 * End our message loop as these are messages 3020 * the sequencer handles on its own. 3021 */ 3022 done = MSGLOOP_TERMINATED; 3023 break; 3024 case MSG_MESSAGE_REJECT: 3025 response = ahc_handle_msg_reject(ahc, devinfo); 3026 /* FALLTHROUGH */ 3027 case MSG_NOOP: 3028 done = MSGLOOP_MSGCOMPLETE; 3029 break; 3030 case MSG_EXTENDED: 3031 { 3032 /* Wait for enough of the message to begin validation */ 3033 if (ahc->msgin_index < 2) 3034 break; 3035 switch (ahc->msgin_buf[2]) { 3036 case MSG_EXT_SDTR: 3037 { 3038 struct ahc_syncrate *syncrate; 3039 u_int period; 3040 u_int ppr_options; 3041 u_int offset; 3042 u_int saved_offset; 3043 3044 if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) { 3045 reject = TRUE; 3046 break; 3047 } 3048 3049 /* 3050 * Wait until we have both args before validating 3051 * and acting on this message. 3052 * 3053 * Add one to MSG_EXT_SDTR_LEN to account for 3054 * the extended message preamble. 3055 */ 3056 if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1)) 3057 break; 3058 3059 period = ahc->msgin_buf[3]; 3060 ppr_options = 0; 3061 saved_offset = offset = ahc->msgin_buf[4]; 3062 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, 3063 &ppr_options, 3064 devinfo->role); 3065 ahc_validate_offset(ahc, tinfo, syncrate, &offset, 3066 targ_scsirate & WIDEXFER, 3067 devinfo->role); 3068 if (bootverbose) { 3069 printf("(%s:%c:%d:%d): Received " 3070 "SDTR period %x, offset %x\n\t" 3071 "Filtered to period %x, offset %x\n", 3072 ahc_name(ahc), devinfo->channel, 3073 devinfo->target, devinfo->lun, 3074 ahc->msgin_buf[3], saved_offset, 3075 period, offset); 3076 } 3077 ahc_set_syncrate(ahc, devinfo, 3078 syncrate, period, 3079 offset, ppr_options, 3080 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3081 /*paused*/TRUE); 3082 3083 /* 3084 * See if we initiated Sync Negotiation 3085 * and didn't have to fall down to async 3086 * transfers. 3087 */ 3088 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) { 3089 /* We started it */ 3090 if (saved_offset != offset) { 3091 /* Went too low - force async */ 3092 reject = TRUE; 3093 } 3094 } else { 3095 /* 3096 * Send our own SDTR in reply 3097 */ 3098 if (bootverbose 3099 && devinfo->role == ROLE_INITIATOR) { 3100 printf("(%s:%c:%d:%d): Target " 3101 "Initiated SDTR\n", 3102 ahc_name(ahc), devinfo->channel, 3103 devinfo->target, devinfo->lun); 3104 } 3105 ahc->msgout_index = 0; 3106 ahc->msgout_len = 0; 3107 ahc_construct_sdtr(ahc, devinfo, 3108 period, offset); 3109 ahc->msgout_index = 0; 3110 response = TRUE; 3111 } 3112 done = MSGLOOP_MSGCOMPLETE; 3113 break; 3114 } 3115 case MSG_EXT_WDTR: 3116 { 3117 u_int bus_width; 3118 u_int saved_width; 3119 u_int sending_reply; 3120 3121 sending_reply = FALSE; 3122 if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) { 3123 reject = TRUE; 3124 break; 3125 } 3126 3127 /* 3128 * Wait until we have our arg before validating 3129 * and acting on this message. 3130 * 3131 * Add one to MSG_EXT_WDTR_LEN to account for 3132 * the extended message preamble. 3133 */ 3134 if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1)) 3135 break; 3136 3137 bus_width = ahc->msgin_buf[3]; 3138 saved_width = bus_width; 3139 ahc_validate_width(ahc, tinfo, &bus_width, 3140 devinfo->role); 3141 if (bootverbose) { 3142 printf("(%s:%c:%d:%d): Received WDTR " 3143 "%x filtered to %x\n", 3144 ahc_name(ahc), devinfo->channel, 3145 devinfo->target, devinfo->lun, 3146 saved_width, bus_width); 3147 } 3148 3149 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) { 3150 /* 3151 * Don't send a WDTR back to the 3152 * target, since we asked first. 3153 * If the width went higher than our 3154 * request, reject it. 3155 */ 3156 if (saved_width > bus_width) { 3157 reject = TRUE; 3158 printf("(%s:%c:%d:%d): requested %dBit " 3159 "transfers. Rejecting...\n", 3160 ahc_name(ahc), devinfo->channel, 3161 devinfo->target, devinfo->lun, 3162 8 * (0x01 << bus_width)); 3163 bus_width = 0; 3164 } 3165 } else { 3166 /* 3167 * Send our own WDTR in reply 3168 */ 3169 if (bootverbose 3170 && devinfo->role == ROLE_INITIATOR) { 3171 printf("(%s:%c:%d:%d): Target " 3172 "Initiated WDTR\n", 3173 ahc_name(ahc), devinfo->channel, 3174 devinfo->target, devinfo->lun); 3175 } 3176 ahc->msgout_index = 0; 3177 ahc->msgout_len = 0; 3178 ahc_construct_wdtr(ahc, devinfo, bus_width); 3179 ahc->msgout_index = 0; 3180 response = TRUE; 3181 sending_reply = TRUE; 3182 } 3183 /* 3184 * After a wide message, we are async, but 3185 * some devices don't seem to honor this portion 3186 * of the spec. Force a renegotiation of the 3187 * sync component of our transfer agreement even 3188 * if our goal is async. By updating our width 3189 * after forcing the negotiation, we avoid 3190 * renegotiating for width. 3191 */ 3192 ahc_update_neg_request(ahc, devinfo, tstate, 3193 tinfo, AHC_NEG_ALWAYS); 3194 ahc_set_width(ahc, devinfo, bus_width, 3195 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3196 /*paused*/TRUE); 3197 if (sending_reply == FALSE && reject == FALSE) { 3198 3199 /* 3200 * We will always have an SDTR to send. 3201 */ 3202 ahc->msgout_index = 0; 3203 ahc->msgout_len = 0; 3204 ahc_build_transfer_msg(ahc, devinfo); 3205 ahc->msgout_index = 0; 3206 response = TRUE; 3207 } 3208 done = MSGLOOP_MSGCOMPLETE; 3209 break; 3210 } 3211 case MSG_EXT_PPR: 3212 { 3213 struct ahc_syncrate *syncrate; 3214 u_int period; 3215 u_int offset; 3216 u_int bus_width; 3217 u_int ppr_options; 3218 u_int saved_width; 3219 u_int saved_offset; 3220 u_int saved_ppr_options; 3221 3222 if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) { 3223 reject = TRUE; 3224 break; 3225 } 3226 3227 /* 3228 * Wait until we have all args before validating 3229 * and acting on this message. 3230 * 3231 * Add one to MSG_EXT_PPR_LEN to account for 3232 * the extended message preamble. 3233 */ 3234 if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1)) 3235 break; 3236 3237 period = ahc->msgin_buf[3]; 3238 offset = ahc->msgin_buf[5]; 3239 bus_width = ahc->msgin_buf[6]; 3240 saved_width = bus_width; 3241 ppr_options = ahc->msgin_buf[7]; 3242 /* 3243 * According to the spec, a DT only 3244 * period factor with no DT option 3245 * set implies async. 3246 */ 3247 if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 3248 && period == 9) 3249 offset = 0; 3250 saved_ppr_options = ppr_options; 3251 saved_offset = offset; 3252 3253 /* 3254 * Mask out any options we don't support 3255 * on any controller. Transfer options are 3256 * only available if we are negotiating wide. 3257 */ 3258 ppr_options &= MSG_EXT_PPR_DT_REQ; 3259 if (bus_width == 0) 3260 ppr_options = 0; 3261 3262 ahc_validate_width(ahc, tinfo, &bus_width, 3263 devinfo->role); 3264 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, 3265 &ppr_options, 3266 devinfo->role); 3267 ahc_validate_offset(ahc, tinfo, syncrate, 3268 &offset, bus_width, 3269 devinfo->role); 3270 3271 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) { 3272 /* 3273 * If we are unable to do any of the 3274 * requested options (we went too low), 3275 * then we'll have to reject the message. 3276 */ 3277 if (saved_width > bus_width 3278 || saved_offset != offset 3279 || saved_ppr_options != ppr_options) { 3280 reject = TRUE; 3281 period = 0; 3282 offset = 0; 3283 bus_width = 0; 3284 ppr_options = 0; 3285 syncrate = NULL; 3286 } 3287 } else { 3288 if (devinfo->role != ROLE_TARGET) 3289 printf("(%s:%c:%d:%d): Target " 3290 "Initiated PPR\n", 3291 ahc_name(ahc), devinfo->channel, 3292 devinfo->target, devinfo->lun); 3293 else 3294 printf("(%s:%c:%d:%d): Initiator " 3295 "Initiated PPR\n", 3296 ahc_name(ahc), devinfo->channel, 3297 devinfo->target, devinfo->lun); 3298 ahc->msgout_index = 0; 3299 ahc->msgout_len = 0; 3300 ahc_construct_ppr(ahc, devinfo, period, offset, 3301 bus_width, ppr_options); 3302 ahc->msgout_index = 0; 3303 response = TRUE; 3304 } 3305 if (bootverbose) { 3306 printf("(%s:%c:%d:%d): Received PPR width %x, " 3307 "period %x, offset %x,options %x\n" 3308 "\tFiltered to width %x, period %x, " 3309 "offset %x, options %x\n", 3310 ahc_name(ahc), devinfo->channel, 3311 devinfo->target, devinfo->lun, 3312 saved_width, ahc->msgin_buf[3], 3313 saved_offset, saved_ppr_options, 3314 bus_width, period, offset, ppr_options); 3315 } 3316 ahc_set_width(ahc, devinfo, bus_width, 3317 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3318 /*paused*/TRUE); 3319 ahc_set_syncrate(ahc, devinfo, 3320 syncrate, period, 3321 offset, ppr_options, 3322 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3323 /*paused*/TRUE); 3324 done = MSGLOOP_MSGCOMPLETE; 3325 break; 3326 } 3327 default: 3328 /* Unknown extended message. Reject it. */ 3329 reject = TRUE; 3330 break; 3331 } 3332 break; 3333 } 3334 #ifdef AHC_TARGET_MODE 3335 case MSG_BUS_DEV_RESET: 3336 ahc_handle_devreset(ahc, devinfo, 3337 CAM_BDR_SENT, 3338 "Bus Device Reset Received", 3339 /*verbose_level*/0); 3340 ahc_restart(ahc); 3341 done = MSGLOOP_TERMINATED; 3342 break; 3343 case MSG_ABORT_TAG: 3344 case MSG_ABORT: 3345 case MSG_CLEAR_QUEUE: 3346 { 3347 int tag; 3348 3349 /* Target mode messages */ 3350 if (devinfo->role != ROLE_TARGET) { 3351 reject = TRUE; 3352 break; 3353 } 3354 tag = SCB_LIST_NULL; 3355 if (ahc->msgin_buf[0] == MSG_ABORT_TAG) 3356 tag = ahc_inb(ahc, INITIATOR_TAG); 3357 ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 3358 devinfo->lun, tag, ROLE_TARGET, 3359 CAM_REQ_ABORTED); 3360 3361 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 3362 if (tstate != NULL) { 3363 struct ahc_tmode_lstate* lstate; 3364 3365 lstate = tstate->enabled_luns[devinfo->lun]; 3366 if (lstate != NULL) { 3367 ahc_queue_lstate_event(ahc, lstate, 3368 devinfo->our_scsiid, 3369 ahc->msgin_buf[0], 3370 /*arg*/tag); 3371 ahc_send_lstate_events(ahc, lstate); 3372 } 3373 } 3374 ahc_restart(ahc); 3375 done = MSGLOOP_TERMINATED; 3376 break; 3377 } 3378 #endif 3379 case MSG_TERM_IO_PROC: 3380 default: 3381 reject = TRUE; 3382 break; 3383 } 3384 3385 if (reject) { 3386 /* 3387 * Setup to reject the message. 3388 */ 3389 ahc->msgout_index = 0; 3390 ahc->msgout_len = 1; 3391 ahc->msgout_buf[0] = MSG_MESSAGE_REJECT; 3392 done = MSGLOOP_MSGCOMPLETE; 3393 response = TRUE; 3394 } 3395 3396 if (done != MSGLOOP_IN_PROG && !response) 3397 /* Clear the outgoing message buffer */ 3398 ahc->msgout_len = 0; 3399 3400 return (done); 3401 } 3402 3403 /* 3404 * Process a message reject message. 3405 */ 3406 static int 3407 ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 3408 { 3409 /* 3410 * What we care about here is if we had an 3411 * outstanding SDTR or WDTR message for this 3412 * target. If we did, this is a signal that 3413 * the target is refusing negotiation. 3414 */ 3415 struct scb *scb; 3416 struct ahc_initiator_tinfo *tinfo; 3417 struct ahc_tmode_tstate *tstate; 3418 u_int scb_index; 3419 u_int last_msg; 3420 int response = 0; 3421 3422 scb_index = ahc_inb(ahc, SCB_TAG); 3423 scb = ahc_lookup_scb(ahc, scb_index); 3424 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, 3425 devinfo->our_scsiid, 3426 devinfo->target, &tstate); 3427 /* Might be necessary */ 3428 last_msg = ahc_inb(ahc, LAST_MSG); 3429 3430 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) { 3431 /* 3432 * Target does not support the PPR message. 3433 * Attempt to negotiate SPI-2 style. 3434 */ 3435 if (bootverbose) { 3436 printf("(%s:%c:%d:%d): PPR Rejected. " 3437 "Trying WDTR/SDTR\n", 3438 ahc_name(ahc), devinfo->channel, 3439 devinfo->target, devinfo->lun); 3440 } 3441 tinfo->goal.ppr_options = 0; 3442 tinfo->curr.transport_version = 2; 3443 tinfo->goal.transport_version = 2; 3444 ahc->msgout_index = 0; 3445 ahc->msgout_len = 0; 3446 ahc_build_transfer_msg(ahc, devinfo); 3447 ahc->msgout_index = 0; 3448 response = 1; 3449 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) { 3450 3451 /* note 8bit xfers */ 3452 printf("(%s:%c:%d:%d): refuses WIDE negotiation. Using " 3453 "8bit transfers\n", ahc_name(ahc), 3454 devinfo->channel, devinfo->target, devinfo->lun); 3455 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 3456 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3457 /*paused*/TRUE); 3458 /* 3459 * No need to clear the sync rate. If the target 3460 * did not accept the command, our syncrate is 3461 * unaffected. If the target started the negotiation, 3462 * but rejected our response, we already cleared the 3463 * sync rate before sending our WDTR. 3464 */ 3465 if (tinfo->goal.offset != tinfo->curr.offset) { 3466 3467 /* Start the sync negotiation */ 3468 ahc->msgout_index = 0; 3469 ahc->msgout_len = 0; 3470 ahc_build_transfer_msg(ahc, devinfo); 3471 ahc->msgout_index = 0; 3472 response = 1; 3473 } 3474 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) { 3475 /* note asynch xfers and clear flag */ 3476 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0, 3477 /*offset*/0, /*ppr_options*/0, 3478 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3479 /*paused*/TRUE); 3480 printf("(%s:%c:%d:%d): refuses synchronous negotiation. " 3481 "Using asynchronous transfers\n", 3482 ahc_name(ahc), devinfo->channel, 3483 devinfo->target, devinfo->lun); 3484 } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) { 3485 int tag_type; 3486 int mask; 3487 3488 tag_type = (scb->hscb->control & MSG_SIMPLE_TASK); 3489 3490 if (tag_type == MSG_SIMPLE_TASK) { 3491 printf("(%s:%c:%d:%d): refuses tagged commands. " 3492 "Performing non-tagged I/O\n", ahc_name(ahc), 3493 devinfo->channel, devinfo->target, devinfo->lun); 3494 ahc_set_tags(ahc, devinfo, AHC_QUEUE_NONE); 3495 mask = ~0x23; 3496 } else { 3497 printf("(%s:%c:%d:%d): refuses %s tagged commands. " 3498 "Performing simple queue tagged I/O only\n", 3499 ahc_name(ahc), devinfo->channel, devinfo->target, 3500 devinfo->lun, tag_type == MSG_ORDERED_TASK 3501 ? "ordered" : "head of queue"); 3502 ahc_set_tags(ahc, devinfo, AHC_QUEUE_BASIC); 3503 mask = ~0x03; 3504 } 3505 3506 /* 3507 * Resend the identify for this CCB as the target 3508 * may believe that the selection is invalid otherwise. 3509 */ 3510 ahc_outb(ahc, SCB_CONTROL, 3511 ahc_inb(ahc, SCB_CONTROL) & mask); 3512 scb->hscb->control &= mask; 3513 ahc_set_transaction_tag(scb, /*enabled*/FALSE, 3514 /*type*/MSG_SIMPLE_TASK); 3515 ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG); 3516 ahc_assert_atn(ahc); 3517 3518 /* 3519 * This transaction is now at the head of 3520 * the untagged queue for this target. 3521 */ 3522 if ((ahc->flags & AHC_SCB_BTT) == 0) { 3523 struct scb_tailq *untagged_q; 3524 3525 untagged_q = 3526 &(ahc->untagged_queues[devinfo->target_offset]); 3527 TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe); 3528 scb->flags |= SCB_UNTAGGEDQ; 3529 } 3530 ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), 3531 scb->hscb->tag); 3532 3533 /* 3534 * Requeue all tagged commands for this target 3535 * currently in our posession so they can be 3536 * converted to untagged commands. 3537 */ 3538 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), 3539 SCB_GET_CHANNEL(ahc, scb), 3540 SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, 3541 ROLE_INITIATOR, CAM_REQUEUE_REQ, 3542 SEARCH_COMPLETE); 3543 } else { 3544 /* 3545 * Otherwise, we ignore it. 3546 */ 3547 printf("%s:%c:%d: Message reject for %x -- ignored\n", 3548 ahc_name(ahc), devinfo->channel, devinfo->target, 3549 last_msg); 3550 } 3551 return (response); 3552 } 3553 3554 /* 3555 * Process an ingnore wide residue message. 3556 */ 3557 static void 3558 ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 3559 { 3560 u_int scb_index; 3561 struct scb *scb; 3562 3563 scb_index = ahc_inb(ahc, SCB_TAG); 3564 scb = ahc_lookup_scb(ahc, scb_index); 3565 /* 3566 * XXX Actually check data direction in the sequencer? 3567 * Perhaps add datadir to some spare bits in the hscb? 3568 */ 3569 if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0 3570 || ahc_get_transfer_dir(scb) != CAM_DIR_IN) { 3571 /* 3572 * Ignore the message if we haven't 3573 * seen an appropriate data phase yet. 3574 */ 3575 } else { 3576 /* 3577 * If the residual occurred on the last 3578 * transfer and the transfer request was 3579 * expected to end on an odd count, do 3580 * nothing. Otherwise, subtract a byte 3581 * and update the residual count accordingly. 3582 */ 3583 uint32_t sgptr; 3584 3585 sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR); 3586 if ((sgptr & SG_LIST_NULL) != 0 3587 && (ahc_inb(ahc, SCB_LUN) & SCB_XFERLEN_ODD) != 0) { 3588 /* 3589 * If the residual occurred on the last 3590 * transfer and the transfer request was 3591 * expected to end on an odd count, do 3592 * nothing. 3593 */ 3594 } else { 3595 struct ahc_dma_seg *sg; 3596 uint32_t data_cnt; 3597 uint32_t data_addr; 3598 uint32_t sglen; 3599 3600 /* Pull in all of the sgptr */ 3601 sgptr = ahc_inl(ahc, SCB_RESIDUAL_SGPTR); 3602 data_cnt = ahc_inl(ahc, SCB_RESIDUAL_DATACNT); 3603 3604 if ((sgptr & SG_LIST_NULL) != 0) { 3605 /* 3606 * The residual data count is not updated 3607 * for the command run to completion case. 3608 * Explicitly zero the count. 3609 */ 3610 data_cnt &= ~AHC_SG_LEN_MASK; 3611 } 3612 3613 data_addr = ahc_inl(ahc, SHADDR); 3614 3615 data_cnt += 1; 3616 data_addr -= 1; 3617 sgptr &= SG_PTR_MASK; 3618 3619 sg = ahc_sg_bus_to_virt(scb, sgptr); 3620 3621 /* 3622 * The residual sg ptr points to the next S/G 3623 * to load so we must go back one. 3624 */ 3625 sg--; 3626 sglen = ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; 3627 if (sg != scb->sg_list 3628 && sglen < (data_cnt & AHC_SG_LEN_MASK)) { 3629 3630 sg--; 3631 sglen = ahc_le32toh(sg->len); 3632 /* 3633 * Preserve High Address and SG_LIST bits 3634 * while setting the count to 1. 3635 */ 3636 data_cnt = 1 | (sglen & (~AHC_SG_LEN_MASK)); 3637 data_addr = ahc_le32toh(sg->addr) 3638 + (sglen & AHC_SG_LEN_MASK) - 1; 3639 3640 /* 3641 * Increment sg so it points to the 3642 * "next" sg. 3643 */ 3644 sg++; 3645 sgptr = ahc_sg_virt_to_bus(scb, sg); 3646 } 3647 ahc_outl(ahc, SCB_RESIDUAL_SGPTR, sgptr); 3648 ahc_outl(ahc, SCB_RESIDUAL_DATACNT, data_cnt); 3649 /* 3650 * Toggle the "oddness" of the transfer length 3651 * to handle this mid-transfer ignore wide 3652 * residue. This ensures that the oddness is 3653 * correct for subsequent data transfers. 3654 */ 3655 ahc_outb(ahc, SCB_LUN, 3656 ahc_inb(ahc, SCB_LUN) ^ SCB_XFERLEN_ODD); 3657 } 3658 } 3659 } 3660 3661 3662 /* 3663 * Reinitialize the data pointers for the active transfer 3664 * based on its current residual. 3665 */ 3666 static void 3667 ahc_reinitialize_dataptrs(struct ahc_softc *ahc) 3668 { 3669 struct scb *scb; 3670 struct ahc_dma_seg *sg; 3671 u_int scb_index; 3672 uint32_t sgptr; 3673 uint32_t resid; 3674 uint32_t dataptr; 3675 3676 scb_index = ahc_inb(ahc, SCB_TAG); 3677 scb = ahc_lookup_scb(ahc, scb_index); 3678 sgptr = (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24) 3679 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16) 3680 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8) 3681 | ahc_inb(ahc, SCB_RESIDUAL_SGPTR); 3682 3683 sgptr &= SG_PTR_MASK; 3684 sg = ahc_sg_bus_to_virt(scb, sgptr); 3685 3686 /* The residual sg_ptr always points to the next sg */ 3687 sg--; 3688 3689 resid = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 2) << 16) 3690 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 1) << 8) 3691 | ahc_inb(ahc, SCB_RESIDUAL_DATACNT); 3692 3693 dataptr = ahc_le32toh(sg->addr) 3694 + (ahc_le32toh(sg->len) & AHC_SG_LEN_MASK) 3695 - resid; 3696 if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { 3697 u_int dscommand1; 3698 3699 dscommand1 = ahc_inb(ahc, DSCOMMAND1); 3700 ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); 3701 ahc_outb(ahc, HADDR, 3702 (ahc_le32toh(sg->len) >> 24) & SG_HIGH_ADDR_BITS); 3703 ahc_outb(ahc, DSCOMMAND1, dscommand1); 3704 } 3705 ahc_outb(ahc, HADDR + 3, dataptr >> 24); 3706 ahc_outb(ahc, HADDR + 2, dataptr >> 16); 3707 ahc_outb(ahc, HADDR + 1, dataptr >> 8); 3708 ahc_outb(ahc, HADDR, dataptr); 3709 ahc_outb(ahc, HCNT + 2, resid >> 16); 3710 ahc_outb(ahc, HCNT + 1, resid >> 8); 3711 ahc_outb(ahc, HCNT, resid); 3712 if ((ahc->features & AHC_ULTRA2) == 0) { 3713 ahc_outb(ahc, STCNT + 2, resid >> 16); 3714 ahc_outb(ahc, STCNT + 1, resid >> 8); 3715 ahc_outb(ahc, STCNT, resid); 3716 } 3717 } 3718 3719 /* 3720 * Handle the effects of issuing a bus device reset message. 3721 */ 3722 static void 3723 ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 3724 cam_status status, char *message, int verbose_level) 3725 { 3726 #ifdef AHC_TARGET_MODE 3727 struct ahc_tmode_tstate* tstate; 3728 u_int lun; 3729 #endif 3730 int found; 3731 3732 found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 3733 CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role, 3734 status); 3735 3736 #ifdef AHC_TARGET_MODE 3737 /* 3738 * Send an immediate notify ccb to all target mord peripheral 3739 * drivers affected by this action. 3740 */ 3741 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 3742 if (tstate != NULL) { 3743 for (lun = 0; lun < AHC_NUM_LUNS; lun++) { 3744 struct ahc_tmode_lstate* lstate; 3745 3746 lstate = tstate->enabled_luns[lun]; 3747 if (lstate == NULL) 3748 continue; 3749 3750 ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid, 3751 MSG_BUS_DEV_RESET, /*arg*/0); 3752 ahc_send_lstate_events(ahc, lstate); 3753 } 3754 } 3755 #endif 3756 3757 /* 3758 * Go back to async/narrow transfers and renegotiate. 3759 */ 3760 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 3761 AHC_TRANS_CUR, /*paused*/TRUE); 3762 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, 3763 /*period*/0, /*offset*/0, /*ppr_options*/0, 3764 AHC_TRANS_CUR, /*paused*/TRUE); 3765 3766 ahc_send_async(ahc, devinfo->channel, devinfo->target, 3767 CAM_LUN_WILDCARD, AC_SENT_BDR, NULL); 3768 3769 if (message != NULL 3770 && (verbose_level <= bootverbose)) 3771 printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc), 3772 message, devinfo->channel, devinfo->target, found); 3773 } 3774 3775 #ifdef AHC_TARGET_MODE 3776 static void 3777 ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 3778 struct scb *scb) 3779 { 3780 3781 /* 3782 * To facilitate adding multiple messages together, 3783 * each routine should increment the index and len 3784 * variables instead of setting them explicitly. 3785 */ 3786 ahc->msgout_index = 0; 3787 ahc->msgout_len = 0; 3788 3789 if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) 3790 ahc_build_transfer_msg(ahc, devinfo); 3791 else 3792 panic("ahc_intr: AWAITING target message with no message"); 3793 3794 ahc->msgout_index = 0; 3795 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 3796 } 3797 #endif 3798 /**************************** Initialization **********************************/ 3799 /* 3800 * Allocate a controller structure for a new device 3801 * and perform initial initializion. 3802 */ 3803 struct ahc_softc * 3804 ahc_alloc(void *platform_arg, char *name) 3805 { 3806 struct ahc_softc *ahc; 3807 int i; 3808 3809 #ifndef __FreeBSD__ 3810 ahc = malloc(sizeof(*ahc), M_DEVBUF, M_NOWAIT); 3811 if (!ahc) { 3812 printf("aic7xxx: cannot malloc softc!\n"); 3813 free(name, M_DEVBUF); 3814 return NULL; 3815 } 3816 #else 3817 ahc = device_get_softc((device_t)platform_arg); 3818 #endif 3819 memset(ahc, 0, sizeof(*ahc)); 3820 ahc->seep_config = malloc(sizeof(*ahc->seep_config), 3821 M_DEVBUF, M_NOWAIT); 3822 if (ahc->seep_config == NULL) { 3823 #ifndef __FreeBSD__ 3824 free(ahc, M_DEVBUF); 3825 #endif 3826 free(name, M_DEVBUF); 3827 return (NULL); 3828 } 3829 LIST_INIT(&ahc->pending_scbs); 3830 /* We don't know our unit number until the OSM sets it */ 3831 ahc->name = name; 3832 ahc->unit = -1; 3833 ahc->description = NULL; 3834 ahc->channel = 'A'; 3835 ahc->channel_b = 'B'; 3836 ahc->chip = AHC_NONE; 3837 ahc->features = AHC_FENONE; 3838 ahc->bugs = AHC_BUGNONE; 3839 ahc->flags = AHC_FNONE; 3840 /* 3841 * Default to all error reporting enabled with the 3842 * sequencer operating at its fastest speed. 3843 * The bus attach code may modify this. 3844 */ 3845 ahc->seqctl = FASTMODE; 3846 3847 for (i = 0; i < AHC_NUM_TARGETS; i++) 3848 TAILQ_INIT(&ahc->untagged_queues[i]); 3849 if (ahc_platform_alloc(ahc, platform_arg) != 0) { 3850 ahc_free(ahc); 3851 ahc = NULL; 3852 } 3853 return (ahc); 3854 } 3855 3856 int 3857 ahc_softc_init(struct ahc_softc *ahc) 3858 { 3859 3860 /* The IRQMS bit is only valid on VL and EISA chips */ 3861 if ((ahc->chip & AHC_PCI) == 0) 3862 ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS; 3863 else 3864 ahc->unpause = 0; 3865 ahc->pause = ahc->unpause | PAUSE; 3866 /* XXX The shared scb data stuff should be deprecated */ 3867 if (ahc->scb_data == NULL) { 3868 ahc->scb_data = malloc(sizeof(*ahc->scb_data), 3869 M_DEVBUF, M_NOWAIT); 3870 if (ahc->scb_data == NULL) 3871 return (ENOMEM); 3872 memset(ahc->scb_data, 0, sizeof(*ahc->scb_data)); 3873 } 3874 3875 return (0); 3876 } 3877 3878 void 3879 ahc_set_unit(struct ahc_softc *ahc, int unit) 3880 { 3881 ahc->unit = unit; 3882 } 3883 3884 void 3885 ahc_set_name(struct ahc_softc *ahc, char *name) 3886 { 3887 if (ahc->name != NULL) 3888 free(ahc->name, M_DEVBUF); 3889 ahc->name = name; 3890 } 3891 3892 void 3893 ahc_free(struct ahc_softc *ahc) 3894 { 3895 int i; 3896 3897 switch (ahc->init_level) { 3898 default: 3899 case 5: 3900 ahc_shutdown(ahc); 3901 /* FALLTHROUGH */ 3902 case 4: 3903 ahc_dmamap_unload(ahc, ahc->shared_data_dmat, 3904 ahc->shared_data_dmamap); 3905 /* FALLTHROUGH */ 3906 case 3: 3907 ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo, 3908 ahc->shared_data_dmamap); 3909 ahc_dmamap_destroy(ahc, ahc->shared_data_dmat, 3910 ahc->shared_data_dmamap); 3911 /* FALLTHROUGH */ 3912 case 2: 3913 ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat); 3914 case 1: 3915 #ifndef __linux__ 3916 ahc_dma_tag_destroy(ahc, ahc->buffer_dmat); 3917 #endif 3918 break; 3919 case 0: 3920 break; 3921 } 3922 3923 #ifndef __linux__ 3924 ahc_dma_tag_destroy(ahc, ahc->parent_dmat); 3925 #endif 3926 ahc_platform_free(ahc); 3927 ahc_fini_scbdata(ahc); 3928 for (i = 0; i < AHC_NUM_TARGETS; i++) { 3929 struct ahc_tmode_tstate *tstate; 3930 3931 tstate = ahc->enabled_targets[i]; 3932 if (tstate != NULL) { 3933 #ifdef AHC_TARGET_MODE 3934 int j; 3935 3936 for (j = 0; j < AHC_NUM_LUNS; j++) { 3937 struct ahc_tmode_lstate *lstate; 3938 3939 lstate = tstate->enabled_luns[j]; 3940 if (lstate != NULL) { 3941 xpt_free_path(lstate->path); 3942 free(lstate, M_DEVBUF); 3943 } 3944 } 3945 #endif 3946 free(tstate, M_DEVBUF); 3947 } 3948 } 3949 #ifdef AHC_TARGET_MODE 3950 if (ahc->black_hole != NULL) { 3951 xpt_free_path(ahc->black_hole->path); 3952 free(ahc->black_hole, M_DEVBUF); 3953 } 3954 #endif 3955 if (ahc->name != NULL) 3956 free(ahc->name, M_DEVBUF); 3957 if (ahc->seep_config != NULL) 3958 free(ahc->seep_config, M_DEVBUF); 3959 #ifndef __FreeBSD__ 3960 free(ahc, M_DEVBUF); 3961 #endif 3962 return; 3963 } 3964 3965 void 3966 ahc_shutdown(void *arg) 3967 { 3968 struct ahc_softc *ahc; 3969 int i; 3970 3971 ahc = (struct ahc_softc *)arg; 3972 3973 /* This will reset most registers to 0, but not all */ 3974 ahc_reset(ahc, /*reinit*/FALSE); 3975 ahc_outb(ahc, SCSISEQ, 0); 3976 ahc_outb(ahc, SXFRCTL0, 0); 3977 ahc_outb(ahc, DSPCISTATUS, 0); 3978 3979 for (i = TARG_SCSIRATE; i < SCSICONF; i++) 3980 ahc_outb(ahc, i, 0); 3981 } 3982 3983 /* 3984 * Reset the controller and record some information about it 3985 * that is only available just after a reset. If "reinit" is 3986 * non-zero, this reset occured after initial configuration 3987 * and the caller requests that the chip be fully reinitialized 3988 * to a runable state. Chip interrupts are *not* enabled after 3989 * a reinitialization. The caller must enable interrupts via 3990 * ahc_intr_enable(). 3991 */ 3992 int 3993 ahc_reset(struct ahc_softc *ahc, int reinit) 3994 { 3995 u_int sblkctl; 3996 u_int sxfrctl1_a, sxfrctl1_b; 3997 int error; 3998 int wait; 3999 4000 /* 4001 * Preserve the value of the SXFRCTL1 register for all channels. 4002 * It contains settings that affect termination and we don't want 4003 * to disturb the integrity of the bus. 4004 */ 4005 ahc_pause(ahc); 4006 if ((ahc_inb(ahc, HCNTRL) & CHIPRST) != 0) { 4007 /* 4008 * The chip has not been initialized since 4009 * PCI/EISA/VLB bus reset. Don't trust 4010 * "left over BIOS data". 4011 */ 4012 ahc->flags |= AHC_NO_BIOS_INIT; 4013 } 4014 sxfrctl1_b = 0; 4015 if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) { 4016 u_int sblkctl; 4017 4018 /* 4019 * Save channel B's settings in case this chip 4020 * is setup for TWIN channel operation. 4021 */ 4022 sblkctl = ahc_inb(ahc, SBLKCTL); 4023 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); 4024 sxfrctl1_b = ahc_inb(ahc, SXFRCTL1); 4025 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); 4026 } 4027 sxfrctl1_a = ahc_inb(ahc, SXFRCTL1); 4028 4029 ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause); 4030 4031 /* 4032 * Ensure that the reset has finished. We delay 1000us 4033 * prior to reading the register to make sure the chip 4034 * has sufficiently completed its reset to handle register 4035 * accesses. 4036 */ 4037 wait = 1000; 4038 do { 4039 ahc_delay(1000); 4040 } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK)); 4041 4042 if (wait == 0) { 4043 printf("%s: WARNING - Failed chip reset! " 4044 "Trying to initialize anyway.\n", ahc_name(ahc)); 4045 } 4046 ahc_outb(ahc, HCNTRL, ahc->pause); 4047 4048 /* Determine channel configuration */ 4049 sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE); 4050 /* No Twin Channel PCI cards */ 4051 if ((ahc->chip & AHC_PCI) != 0) 4052 sblkctl &= ~SELBUSB; 4053 switch (sblkctl) { 4054 case 0: 4055 /* Single Narrow Channel */ 4056 break; 4057 case 2: 4058 /* Wide Channel */ 4059 ahc->features |= AHC_WIDE; 4060 break; 4061 case 8: 4062 /* Twin Channel */ 4063 ahc->features |= AHC_TWIN; 4064 break; 4065 default: 4066 printf(" Unsupported adapter type. Ignoring\n"); 4067 return(-1); 4068 } 4069 4070 /* 4071 * Reload sxfrctl1. 4072 * 4073 * We must always initialize STPWEN to 1 before we 4074 * restore the saved values. STPWEN is initialized 4075 * to a tri-state condition which can only be cleared 4076 * by turning it on. 4077 */ 4078 if ((ahc->features & AHC_TWIN) != 0) { 4079 u_int sblkctl; 4080 4081 sblkctl = ahc_inb(ahc, SBLKCTL); 4082 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); 4083 ahc_outb(ahc, SXFRCTL1, sxfrctl1_b); 4084 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); 4085 } 4086 ahc_outb(ahc, SXFRCTL1, sxfrctl1_a); 4087 4088 error = 0; 4089 if (reinit != 0) 4090 /* 4091 * If a recovery action has forced a chip reset, 4092 * re-initialize the chip to our liking. 4093 */ 4094 error = ahc->bus_chip_init(ahc); 4095 #ifdef AHC_DUMP_SEQ 4096 else 4097 ahc_dumpseq(ahc); 4098 #endif 4099 4100 return (error); 4101 } 4102 4103 /* 4104 * Determine the number of SCBs available on the controller 4105 */ 4106 int 4107 ahc_probe_scbs(struct ahc_softc *ahc) { 4108 int i; 4109 4110 for (i = 0; i < AHC_SCB_MAX; i++) { 4111 4112 ahc_outb(ahc, SCBPTR, i); 4113 ahc_outb(ahc, SCB_BASE, i); 4114 if (ahc_inb(ahc, SCB_BASE) != i) 4115 break; 4116 ahc_outb(ahc, SCBPTR, 0); 4117 if (ahc_inb(ahc, SCB_BASE) != 0) 4118 break; 4119 } 4120 return (i); 4121 } 4122 4123 static void 4124 ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 4125 { 4126 dma_addr_t *baddr; 4127 4128 baddr = (dma_addr_t *)arg; 4129 *baddr = segs->ds_addr; 4130 } 4131 4132 static void 4133 ahc_build_free_scb_list(struct ahc_softc *ahc) 4134 { 4135 int scbsize; 4136 int i; 4137 4138 scbsize = 32; 4139 if ((ahc->flags & AHC_LSCBS_ENABLED) != 0) 4140 scbsize = 64; 4141 4142 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 4143 int j; 4144 4145 ahc_outb(ahc, SCBPTR, i); 4146 4147 /* 4148 * Touch all SCB bytes to avoid parity errors 4149 * should one of our debugging routines read 4150 * an otherwise uninitiatlized byte. 4151 */ 4152 for (j = 0; j < scbsize; j++) 4153 ahc_outb(ahc, SCB_BASE+j, 0xFF); 4154 4155 /* Clear the control byte. */ 4156 ahc_outb(ahc, SCB_CONTROL, 0); 4157 4158 /* Set the next pointer */ 4159 if ((ahc->flags & AHC_PAGESCBS) != 0) 4160 ahc_outb(ahc, SCB_NEXT, i+1); 4161 else 4162 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); 4163 4164 /* Make the tag number, SCSIID, and lun invalid */ 4165 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 4166 ahc_outb(ahc, SCB_SCSIID, 0xFF); 4167 ahc_outb(ahc, SCB_LUN, 0xFF); 4168 } 4169 4170 if ((ahc->flags & AHC_PAGESCBS) != 0) { 4171 /* SCB 0 heads the free list. */ 4172 ahc_outb(ahc, FREE_SCBH, 0); 4173 } else { 4174 /* No free list. */ 4175 ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL); 4176 } 4177 4178 /* Make sure that the last SCB terminates the free list */ 4179 ahc_outb(ahc, SCBPTR, i-1); 4180 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); 4181 } 4182 4183 static int 4184 ahc_init_scbdata(struct ahc_softc *ahc) 4185 { 4186 struct scb_data *scb_data; 4187 4188 scb_data = ahc->scb_data; 4189 SLIST_INIT(&scb_data->free_scbs); 4190 SLIST_INIT(&scb_data->sg_maps); 4191 4192 /* Allocate SCB resources */ 4193 scb_data->scbarray = 4194 (struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, 4195 M_DEVBUF, M_NOWAIT); 4196 if (scb_data->scbarray == NULL) 4197 return (ENOMEM); 4198 memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC); 4199 4200 /* Determine the number of hardware SCBs and initialize them */ 4201 4202 scb_data->maxhscbs = ahc_probe_scbs(ahc); 4203 if (ahc->scb_data->maxhscbs == 0) { 4204 printf("%s: No SCB space found\n", ahc_name(ahc)); 4205 return (ENXIO); 4206 } 4207 4208 /* 4209 * Create our DMA tags. These tags define the kinds of device 4210 * accessible memory allocations and memory mappings we will 4211 * need to perform during normal operation. 4212 * 4213 * Unless we need to further restrict the allocation, we rely 4214 * on the restrictions of the parent dmat, hence the common 4215 * use of MAXADDR and MAXSIZE. 4216 */ 4217 4218 /* DMA tag for our hardware scb structures */ 4219 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 4220 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4221 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 4222 /*highaddr*/BUS_SPACE_MAXADDR, 4223 /*filter*/NULL, /*filterarg*/NULL, 4224 AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb), 4225 /*nsegments*/1, 4226 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 4227 /*flags*/0, &scb_data->hscb_dmat) != 0) { 4228 goto error_exit; 4229 } 4230 4231 scb_data->init_level++; 4232 4233 /* Allocation for our hscbs */ 4234 if (ahc_dmamem_alloc(ahc, scb_data->hscb_dmat, 4235 (void **)&scb_data->hscbs, 4236 BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) { 4237 goto error_exit; 4238 } 4239 4240 scb_data->init_level++; 4241 4242 /* And permanently map them */ 4243 ahc_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap, 4244 scb_data->hscbs, 4245 AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb), 4246 ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0); 4247 4248 scb_data->init_level++; 4249 4250 /* DMA tag for our sense buffers */ 4251 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 4252 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4253 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 4254 /*highaddr*/BUS_SPACE_MAXADDR, 4255 /*filter*/NULL, /*filterarg*/NULL, 4256 AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data), 4257 /*nsegments*/1, 4258 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 4259 /*flags*/0, &scb_data->sense_dmat) != 0) { 4260 goto error_exit; 4261 } 4262 4263 scb_data->init_level++; 4264 4265 /* Allocate them */ 4266 if (ahc_dmamem_alloc(ahc, scb_data->sense_dmat, 4267 (void **)&scb_data->sense, 4268 BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) { 4269 goto error_exit; 4270 } 4271 4272 scb_data->init_level++; 4273 4274 /* And permanently map them */ 4275 ahc_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap, 4276 scb_data->sense, 4277 AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data), 4278 ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0); 4279 4280 scb_data->init_level++; 4281 4282 /* DMA tag for our S/G structures. We allocate in page sized chunks */ 4283 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/8, 4284 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4285 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 4286 /*highaddr*/BUS_SPACE_MAXADDR, 4287 /*filter*/NULL, /*filterarg*/NULL, 4288 PAGE_SIZE, /*nsegments*/1, 4289 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 4290 /*flags*/0, &scb_data->sg_dmat) != 0) { 4291 goto error_exit; 4292 } 4293 4294 scb_data->init_level++; 4295 4296 /* Perform initial CCB allocation */ 4297 memset(scb_data->hscbs, 0, 4298 AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb)); 4299 ahc_alloc_scbs(ahc); 4300 4301 if (scb_data->numscbs == 0) { 4302 printf("%s: ahc_init_scbdata - " 4303 "Unable to allocate initial scbs\n", 4304 ahc_name(ahc)); 4305 goto error_exit; 4306 } 4307 4308 /* 4309 * Reserve the next queued SCB. 4310 */ 4311 ahc->next_queued_scb = ahc_get_scb(ahc); 4312 4313 /* 4314 * Note that we were successfull 4315 */ 4316 return (0); 4317 4318 error_exit: 4319 4320 return (ENOMEM); 4321 } 4322 4323 static void 4324 ahc_fini_scbdata(struct ahc_softc *ahc) 4325 { 4326 struct scb_data *scb_data; 4327 4328 scb_data = ahc->scb_data; 4329 if (scb_data == NULL) 4330 return; 4331 4332 switch (scb_data->init_level) { 4333 default: 4334 case 7: 4335 { 4336 struct sg_map_node *sg_map; 4337 4338 while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) { 4339 SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); 4340 ahc_dmamap_unload(ahc, scb_data->sg_dmat, 4341 sg_map->sg_dmamap); 4342 ahc_dmamem_free(ahc, scb_data->sg_dmat, 4343 sg_map->sg_vaddr, 4344 sg_map->sg_dmamap); 4345 free(sg_map, M_DEVBUF); 4346 } 4347 ahc_dma_tag_destroy(ahc, scb_data->sg_dmat); 4348 } 4349 case 6: 4350 ahc_dmamap_unload(ahc, scb_data->sense_dmat, 4351 scb_data->sense_dmamap); 4352 case 5: 4353 ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense, 4354 scb_data->sense_dmamap); 4355 ahc_dmamap_destroy(ahc, scb_data->sense_dmat, 4356 scb_data->sense_dmamap); 4357 case 4: 4358 ahc_dma_tag_destroy(ahc, scb_data->sense_dmat); 4359 case 3: 4360 ahc_dmamap_unload(ahc, scb_data->hscb_dmat, 4361 scb_data->hscb_dmamap); 4362 case 2: 4363 ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs, 4364 scb_data->hscb_dmamap); 4365 ahc_dmamap_destroy(ahc, scb_data->hscb_dmat, 4366 scb_data->hscb_dmamap); 4367 case 1: 4368 ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat); 4369 break; 4370 case 0: 4371 break; 4372 } 4373 if (scb_data->scbarray != NULL) 4374 free(scb_data->scbarray, M_DEVBUF); 4375 } 4376 4377 void 4378 ahc_alloc_scbs(struct ahc_softc *ahc) 4379 { 4380 struct scb_data *scb_data; 4381 struct scb *next_scb; 4382 struct sg_map_node *sg_map; 4383 dma_addr_t physaddr; 4384 struct ahc_dma_seg *segs; 4385 int newcount; 4386 int i; 4387 4388 scb_data = ahc->scb_data; 4389 if (scb_data->numscbs >= AHC_SCB_MAX_ALLOC) 4390 /* Can't allocate any more */ 4391 return; 4392 4393 next_scb = &scb_data->scbarray[scb_data->numscbs]; 4394 4395 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); 4396 4397 if (sg_map == NULL) 4398 return; 4399 4400 /* Allocate S/G space for the next batch of SCBS */ 4401 if (ahc_dmamem_alloc(ahc, scb_data->sg_dmat, 4402 (void **)&sg_map->sg_vaddr, 4403 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { 4404 free(sg_map, M_DEVBUF); 4405 return; 4406 } 4407 4408 SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); 4409 4410 ahc_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap, 4411 sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb, 4412 &sg_map->sg_physaddr, /*flags*/0); 4413 4414 segs = sg_map->sg_vaddr; 4415 physaddr = sg_map->sg_physaddr; 4416 4417 newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg))); 4418 newcount = MIN(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs)); 4419 for (i = 0; i < newcount; i++) { 4420 struct scb_platform_data *pdata; 4421 #ifndef __linux__ 4422 int error; 4423 #endif 4424 pdata = (struct scb_platform_data *)malloc(sizeof(*pdata), 4425 M_DEVBUF, M_NOWAIT); 4426 if (pdata == NULL) 4427 break; 4428 next_scb->platform_data = pdata; 4429 next_scb->sg_map = sg_map; 4430 next_scb->sg_list = segs; 4431 /* 4432 * The sequencer always starts with the second entry. 4433 * The first entry is embedded in the scb. 4434 */ 4435 next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg); 4436 next_scb->ahc_softc = ahc; 4437 next_scb->flags = SCB_FREE; 4438 #ifndef __linux__ 4439 error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0, 4440 &next_scb->dmamap); 4441 if (error != 0) 4442 break; 4443 #endif 4444 next_scb->hscb = &scb_data->hscbs[scb_data->numscbs]; 4445 next_scb->hscb->tag = ahc->scb_data->numscbs; 4446 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, 4447 next_scb, links.sle); 4448 segs += AHC_NSEG; 4449 physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg)); 4450 next_scb++; 4451 ahc->scb_data->numscbs++; 4452 } 4453 } 4454 4455 void 4456 ahc_controller_info(struct ahc_softc *ahc, char *buf) 4457 { 4458 int len; 4459 4460 len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]); 4461 buf += len; 4462 if ((ahc->features & AHC_TWIN) != 0) 4463 len = sprintf(buf, "Twin Channel, A SCSI Id=%d, " 4464 "B SCSI Id=%d, primary %c, ", 4465 ahc->our_id, ahc->our_id_b, 4466 (ahc->flags & AHC_PRIMARY_CHANNEL) + 'A'); 4467 else { 4468 const char *speed; 4469 const char *type; 4470 4471 speed = ""; 4472 if ((ahc->features & AHC_ULTRA) != 0) { 4473 speed = "Ultra "; 4474 } else if ((ahc->features & AHC_DT) != 0) { 4475 speed = "Ultra160 "; 4476 } else if ((ahc->features & AHC_ULTRA2) != 0) { 4477 speed = "Ultra2 "; 4478 } 4479 if ((ahc->features & AHC_WIDE) != 0) { 4480 type = "Wide"; 4481 } else { 4482 type = "Single"; 4483 } 4484 len = sprintf(buf, "%s%s Channel %c, SCSI Id=%d, ", 4485 speed, type, ahc->channel, ahc->our_id); 4486 } 4487 buf += len; 4488 4489 if ((ahc->flags & AHC_PAGESCBS) != 0) 4490 sprintf(buf, "%d/%d SCBs", 4491 ahc->scb_data->maxhscbs, AHC_MAX_QUEUE); 4492 else 4493 sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs); 4494 } 4495 4496 int 4497 ahc_chip_init(struct ahc_softc *ahc) 4498 { 4499 int term; 4500 int error; 4501 u_int i; 4502 u_int scsi_conf; 4503 u_int scsiseq_template; 4504 uint32_t physaddr; 4505 4506 ahc_outb(ahc, SEQ_FLAGS, 0); 4507 ahc_outb(ahc, SEQ_FLAGS2, 0); 4508 4509 /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/ 4510 if (ahc->features & AHC_TWIN) { 4511 4512 /* 4513 * Setup Channel B first. 4514 */ 4515 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); 4516 term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0; 4517 ahc_outb(ahc, SCSIID, ahc->our_id_b); 4518 scsi_conf = ahc_inb(ahc, SCSICONF + 1); 4519 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 4520 |term|ahc->seltime_b|ENSTIMER|ACTNEGEN); 4521 if ((ahc->features & AHC_ULTRA2) != 0) 4522 ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); 4523 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 4524 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 4525 4526 /* Select Channel A */ 4527 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 4528 } 4529 term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0; 4530 if ((ahc->features & AHC_ULTRA2) != 0) 4531 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); 4532 else 4533 ahc_outb(ahc, SCSIID, ahc->our_id); 4534 scsi_conf = ahc_inb(ahc, SCSICONF); 4535 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 4536 |term|ahc->seltime 4537 |ENSTIMER|ACTNEGEN); 4538 if ((ahc->features & AHC_ULTRA2) != 0) 4539 ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); 4540 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 4541 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 4542 4543 /* There are no untagged SCBs active yet. */ 4544 for (i = 0; i < 16; i++) { 4545 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0)); 4546 if ((ahc->flags & AHC_SCB_BTT) != 0) { 4547 int lun; 4548 4549 /* 4550 * The SCB based BTT allows an entry per 4551 * target and lun pair. 4552 */ 4553 for (lun = 1; lun < AHC_NUM_LUNS; lun++) 4554 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun)); 4555 } 4556 } 4557 4558 /* All of our queues are empty */ 4559 for (i = 0; i < 256; i++) 4560 ahc->qoutfifo[i] = SCB_LIST_NULL; 4561 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_PREREAD); 4562 4563 for (i = 0; i < 256; i++) 4564 ahc->qinfifo[i] = SCB_LIST_NULL; 4565 4566 if ((ahc->features & AHC_MULTI_TID) != 0) { 4567 ahc_outb(ahc, TARGID, 0); 4568 ahc_outb(ahc, TARGID + 1, 0); 4569 } 4570 4571 /* 4572 * Tell the sequencer where it can find our arrays in memory. 4573 */ 4574 physaddr = ahc->scb_data->hscb_busaddr; 4575 ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF); 4576 ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF); 4577 ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF); 4578 ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF); 4579 4580 physaddr = ahc->shared_data_busaddr; 4581 ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF); 4582 ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF); 4583 ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF); 4584 ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF); 4585 4586 /* 4587 * Initialize the group code to command length table. 4588 * This overrides the values in TARG_SCSIRATE, so only 4589 * setup the table after we have processed that information. 4590 */ 4591 ahc_outb(ahc, CMDSIZE_TABLE, 5); 4592 ahc_outb(ahc, CMDSIZE_TABLE + 1, 9); 4593 ahc_outb(ahc, CMDSIZE_TABLE + 2, 9); 4594 ahc_outb(ahc, CMDSIZE_TABLE + 3, 0); 4595 ahc_outb(ahc, CMDSIZE_TABLE + 4, 15); 4596 ahc_outb(ahc, CMDSIZE_TABLE + 5, 11); 4597 ahc_outb(ahc, CMDSIZE_TABLE + 6, 0); 4598 ahc_outb(ahc, CMDSIZE_TABLE + 7, 0); 4599 4600 if ((ahc->features & AHC_HS_MAILBOX) != 0) 4601 ahc_outb(ahc, HS_MAILBOX, 0); 4602 4603 /* Tell the sequencer of our initial queue positions */ 4604 if ((ahc->features & AHC_TARGETMODE) != 0) { 4605 ahc->tqinfifonext = 1; 4606 ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1); 4607 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); 4608 } 4609 ahc->qinfifonext = 0; 4610 ahc->qoutfifonext = 0; 4611 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 4612 ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256); 4613 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 4614 ahc_outb(ahc, SNSCB_QOFF, ahc->qinfifonext); 4615 ahc_outb(ahc, SDSCB_QOFF, 0); 4616 } else { 4617 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 4618 ahc_outb(ahc, QINPOS, ahc->qinfifonext); 4619 ahc_outb(ahc, QOUTPOS, ahc->qoutfifonext); 4620 } 4621 4622 /* We don't have any waiting selections */ 4623 ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL); 4624 4625 /* Our disconnection list is empty too */ 4626 ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL); 4627 4628 /* Message out buffer starts empty */ 4629 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 4630 4631 /* 4632 * Setup the allowed SCSI Sequences based on operational mode. 4633 * If we are a target, we'll enalbe select in operations once 4634 * we've had a lun enabled. 4635 */ 4636 scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP; 4637 if ((ahc->flags & AHC_INITIATORROLE) != 0) 4638 scsiseq_template |= ENRSELI; 4639 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template); 4640 4641 /* Initialize our list of free SCBs. */ 4642 ahc_build_free_scb_list(ahc); 4643 4644 /* 4645 * Tell the sequencer which SCB will be the next one it receives. 4646 */ 4647 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); 4648 4649 /* 4650 * Load the Sequencer program and Enable the adapter 4651 * in "fast" mode. 4652 */ 4653 if (bootverbose) 4654 printf("%s: Downloading Sequencer Program...", 4655 ahc_name(ahc)); 4656 4657 error = ahc_loadseq(ahc); 4658 if (error != 0) 4659 return (error); 4660 4661 if ((ahc->features & AHC_ULTRA2) != 0) { 4662 int wait; 4663 4664 /* 4665 * Wait for up to 500ms for our transceivers 4666 * to settle. If the adapter does not have 4667 * a cable attached, the transceivers may 4668 * never settle, so don't complain if we 4669 * fail here. 4670 */ 4671 for (wait = 5000; 4672 (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; 4673 wait--) 4674 ahc_delay(100); 4675 } 4676 ahc_restart(ahc); 4677 return (0); 4678 } 4679 4680 /* 4681 * Start the board, ready for normal operation 4682 */ 4683 int 4684 ahc_init(struct ahc_softc *ahc) 4685 { 4686 int max_targ; 4687 u_int i; 4688 u_int scsi_conf; 4689 u_int ultraenb; 4690 u_int discenable; 4691 u_int tagenable; 4692 size_t driver_data_size; 4693 4694 #ifdef AHC_DEBUG 4695 if ((ahc_debug & AHC_DEBUG_SEQUENCER) != 0) 4696 ahc->flags |= AHC_SEQUENCER_DEBUG; 4697 #endif 4698 4699 #ifdef AHC_PRINT_SRAM 4700 printf("Scratch Ram:"); 4701 for (i = 0x20; i < 0x5f; i++) { 4702 if (((i % 8) == 0) && (i != 0)) { 4703 printf ("\n "); 4704 } 4705 printf (" 0x%x", ahc_inb(ahc, i)); 4706 } 4707 if ((ahc->features & AHC_MORE_SRAM) != 0) { 4708 for (i = 0x70; i < 0x7f; i++) { 4709 if (((i % 8) == 0) && (i != 0)) { 4710 printf ("\n "); 4711 } 4712 printf (" 0x%x", ahc_inb(ahc, i)); 4713 } 4714 } 4715 printf ("\n"); 4716 /* 4717 * Reading uninitialized scratch ram may 4718 * generate parity errors. 4719 */ 4720 ahc_outb(ahc, CLRINT, CLRPARERR); 4721 ahc_outb(ahc, CLRINT, CLRBRKADRINT); 4722 #endif 4723 max_targ = 15; 4724 4725 /* 4726 * Assume we have a board at this stage and it has been reset. 4727 */ 4728 if ((ahc->flags & AHC_USEDEFAULTS) != 0) 4729 ahc->our_id = ahc->our_id_b = 7; 4730 4731 /* 4732 * Default to allowing initiator operations. 4733 */ 4734 ahc->flags |= AHC_INITIATORROLE; 4735 4736 /* 4737 * Only allow target mode features if this unit has them enabled. 4738 */ 4739 if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0) 4740 ahc->features &= ~AHC_TARGETMODE; 4741 4742 #ifndef __linux__ 4743 /* DMA tag for mapping buffers into device visible space. */ 4744 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 4745 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4746 /*lowaddr*/ahc->flags & AHC_39BIT_ADDRESSING 4747 ? (dma_addr_t)0x7FFFFFFFFFULL 4748 : BUS_SPACE_MAXADDR_32BIT, 4749 /*highaddr*/BUS_SPACE_MAXADDR, 4750 /*filter*/NULL, /*filterarg*/NULL, 4751 /*maxsize*/(AHC_NSEG - 1) * PAGE_SIZE, 4752 /*nsegments*/AHC_NSEG, 4753 /*maxsegsz*/AHC_MAXTRANSFER_SIZE, 4754 /*flags*/BUS_DMA_ALLOCNOW, 4755 &ahc->buffer_dmat) != 0) { 4756 return (ENOMEM); 4757 } 4758 #endif 4759 4760 ahc->init_level++; 4761 4762 /* 4763 * DMA tag for our command fifos and other data in system memory 4764 * the card's sequencer must be able to access. For initiator 4765 * roles, we need to allocate space for the qinfifo and qoutfifo. 4766 * The qinfifo and qoutfifo are composed of 256 1 byte elements. 4767 * When providing for the target mode role, we must additionally 4768 * provide space for the incoming target command fifo and an extra 4769 * byte to deal with a dma bug in some chip versions. 4770 */ 4771 driver_data_size = 2 * 256 * sizeof(uint8_t); 4772 if ((ahc->features & AHC_TARGETMODE) != 0) 4773 driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd) 4774 + /*DMA WideOdd Bug Buffer*/1; 4775 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 4776 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4777 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 4778 /*highaddr*/BUS_SPACE_MAXADDR, 4779 /*filter*/NULL, /*filterarg*/NULL, 4780 driver_data_size, 4781 /*nsegments*/1, 4782 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 4783 /*flags*/0, &ahc->shared_data_dmat) != 0) { 4784 return (ENOMEM); 4785 } 4786 4787 ahc->init_level++; 4788 4789 /* Allocation of driver data */ 4790 if (ahc_dmamem_alloc(ahc, ahc->shared_data_dmat, 4791 (void **)&ahc->qoutfifo, 4792 BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) { 4793 return (ENOMEM); 4794 } 4795 4796 ahc->init_level++; 4797 4798 /* And permanently map it in */ 4799 ahc_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, 4800 ahc->qoutfifo, driver_data_size, ahc_dmamap_cb, 4801 &ahc->shared_data_busaddr, /*flags*/0); 4802 4803 if ((ahc->features & AHC_TARGETMODE) != 0) { 4804 ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo; 4805 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS]; 4806 ahc->dma_bug_buf = ahc->shared_data_busaddr 4807 + driver_data_size - 1; 4808 /* All target command blocks start out invalid. */ 4809 for (i = 0; i < AHC_TMODE_CMDS; i++) 4810 ahc->targetcmds[i].cmd_valid = 0; 4811 ahc_sync_tqinfifo(ahc, BUS_DMASYNC_PREREAD); 4812 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256]; 4813 } 4814 ahc->qinfifo = &ahc->qoutfifo[256]; 4815 4816 ahc->init_level++; 4817 4818 /* Allocate SCB data now that buffer_dmat is initialized */ 4819 if (ahc->scb_data->maxhscbs == 0) 4820 if (ahc_init_scbdata(ahc) != 0) 4821 return (ENOMEM); 4822 4823 /* 4824 * Allocate a tstate to house information for our 4825 * initiator presence on the bus as well as the user 4826 * data for any target mode initiator. 4827 */ 4828 if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) { 4829 printf("%s: unable to allocate ahc_tmode_tstate. " 4830 "Failing attach\n", ahc_name(ahc)); 4831 return (ENOMEM); 4832 } 4833 4834 if ((ahc->features & AHC_TWIN) != 0) { 4835 if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) { 4836 printf("%s: unable to allocate ahc_tmode_tstate. " 4837 "Failing attach\n", ahc_name(ahc)); 4838 return (ENOMEM); 4839 } 4840 } 4841 4842 if (ahc->scb_data->maxhscbs < AHC_SCB_MAX_ALLOC) { 4843 ahc->flags |= AHC_PAGESCBS; 4844 } else { 4845 ahc->flags &= ~AHC_PAGESCBS; 4846 } 4847 4848 #ifdef AHC_DEBUG 4849 if (ahc_debug & AHC_SHOW_MISC) { 4850 printf("%s: hardware scb %u bytes; kernel scb %u bytes; " 4851 "ahc_dma %u bytes\n", 4852 ahc_name(ahc), 4853 (u_int)sizeof(struct hardware_scb), 4854 (u_int)sizeof(struct scb), 4855 (u_int)sizeof(struct ahc_dma_seg)); 4856 } 4857 #endif /* AHC_DEBUG */ 4858 4859 /* 4860 * Look at the information that board initialization or 4861 * the board bios has left us. 4862 */ 4863 if (ahc->features & AHC_TWIN) { 4864 scsi_conf = ahc_inb(ahc, SCSICONF + 1); 4865 if ((scsi_conf & RESET_SCSI) != 0 4866 && (ahc->flags & AHC_INITIATORROLE) != 0) 4867 ahc->flags |= AHC_RESET_BUS_B; 4868 } 4869 4870 scsi_conf = ahc_inb(ahc, SCSICONF); 4871 if ((scsi_conf & RESET_SCSI) != 0 4872 && (ahc->flags & AHC_INITIATORROLE) != 0) 4873 ahc->flags |= AHC_RESET_BUS_A; 4874 4875 ultraenb = 0; 4876 tagenable = ALL_TARGETS_MASK; 4877 4878 /* Grab the disconnection disable table and invert it for our needs */ 4879 if ((ahc->flags & AHC_USEDEFAULTS) != 0) { 4880 printf("%s: Host Adapter Bios disabled. Using default SCSI " 4881 "device parameters\n", ahc_name(ahc)); 4882 ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B| 4883 AHC_TERM_ENB_A|AHC_TERM_ENB_B; 4884 discenable = ALL_TARGETS_MASK; 4885 if ((ahc->features & AHC_ULTRA) != 0) 4886 ultraenb = ALL_TARGETS_MASK; 4887 } else { 4888 discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8) 4889 | ahc_inb(ahc, DISC_DSB)); 4890 if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0) 4891 ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8) 4892 | ahc_inb(ahc, ULTRA_ENB); 4893 } 4894 4895 if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) 4896 max_targ = 7; 4897 4898 for (i = 0; i <= max_targ; i++) { 4899 struct ahc_initiator_tinfo *tinfo; 4900 struct ahc_tmode_tstate *tstate; 4901 u_int our_id; 4902 u_int target_id; 4903 char channel; 4904 4905 channel = 'A'; 4906 our_id = ahc->our_id; 4907 target_id = i; 4908 if (i > 7 && (ahc->features & AHC_TWIN) != 0) { 4909 channel = 'B'; 4910 our_id = ahc->our_id_b; 4911 target_id = i % 8; 4912 } 4913 tinfo = ahc_fetch_transinfo(ahc, channel, our_id, 4914 target_id, &tstate); 4915 /* Default to async narrow across the board */ 4916 memset(tinfo, 0, sizeof(*tinfo)); 4917 if (ahc->flags & AHC_USEDEFAULTS) { 4918 if ((ahc->features & AHC_WIDE) != 0) 4919 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 4920 4921 /* 4922 * These will be truncated when we determine the 4923 * connection type we have with the target. 4924 */ 4925 tinfo->user.period = ahc_syncrates->period; 4926 tinfo->user.offset = MAX_OFFSET; 4927 } else { 4928 u_int scsirate; 4929 uint16_t mask; 4930 4931 /* Take the settings leftover in scratch RAM. */ 4932 scsirate = ahc_inb(ahc, TARG_SCSIRATE + i); 4933 mask = (0x01 << i); 4934 if ((ahc->features & AHC_ULTRA2) != 0) { 4935 u_int offset; 4936 u_int maxsync; 4937 4938 if ((scsirate & SOFS) == 0x0F) { 4939 /* 4940 * Haven't negotiated yet, 4941 * so the format is different. 4942 */ 4943 scsirate = (scsirate & SXFR) >> 4 4944 | (ultraenb & mask) 4945 ? 0x08 : 0x0 4946 | (scsirate & WIDEXFER); 4947 offset = MAX_OFFSET_ULTRA2; 4948 } else 4949 offset = ahc_inb(ahc, TARG_OFFSET + i); 4950 if ((scsirate & ~WIDEXFER) == 0 && offset != 0) 4951 /* Set to the lowest sync rate, 5MHz */ 4952 scsirate |= 0x1c; 4953 maxsync = AHC_SYNCRATE_ULTRA2; 4954 if ((ahc->features & AHC_DT) != 0) 4955 maxsync = AHC_SYNCRATE_DT; 4956 tinfo->user.period = 4957 ahc_find_period(ahc, scsirate, maxsync); 4958 if (offset == 0) 4959 tinfo->user.period = 0; 4960 else 4961 tinfo->user.offset = MAX_OFFSET; 4962 if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/ 4963 && (ahc->features & AHC_DT) != 0) 4964 tinfo->user.ppr_options = 4965 MSG_EXT_PPR_DT_REQ; 4966 } else if ((scsirate & SOFS) != 0) { 4967 if ((scsirate & SXFR) == 0x40 4968 && (ultraenb & mask) != 0) { 4969 /* Treat 10MHz as a non-ultra speed */ 4970 scsirate &= ~SXFR; 4971 ultraenb &= ~mask; 4972 } 4973 tinfo->user.period = 4974 ahc_find_period(ahc, scsirate, 4975 (ultraenb & mask) 4976 ? AHC_SYNCRATE_ULTRA 4977 : AHC_SYNCRATE_FAST); 4978 if (tinfo->user.period != 0) 4979 tinfo->user.offset = MAX_OFFSET; 4980 } 4981 if (tinfo->user.period == 0) 4982 tinfo->user.offset = 0; 4983 if ((scsirate & WIDEXFER) != 0 4984 && (ahc->features & AHC_WIDE) != 0) 4985 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 4986 tinfo->user.protocol_version = 4; 4987 if ((ahc->features & AHC_DT) != 0) 4988 tinfo->user.transport_version = 3; 4989 else 4990 tinfo->user.transport_version = 2; 4991 tinfo->goal.protocol_version = 2; 4992 tinfo->goal.transport_version = 2; 4993 tinfo->curr.protocol_version = 2; 4994 tinfo->curr.transport_version = 2; 4995 } 4996 tstate->ultraenb = 0; 4997 } 4998 ahc->user_discenable = discenable; 4999 ahc->user_tagenable = tagenable; 5000 5001 return (ahc->bus_chip_init(ahc)); 5002 } 5003 5004 void 5005 ahc_intr_enable(struct ahc_softc *ahc, int enable) 5006 { 5007 u_int hcntrl; 5008 5009 hcntrl = ahc_inb(ahc, HCNTRL); 5010 hcntrl &= ~INTEN; 5011 ahc->pause &= ~INTEN; 5012 ahc->unpause &= ~INTEN; 5013 if (enable) { 5014 hcntrl |= INTEN; 5015 ahc->pause |= INTEN; 5016 ahc->unpause |= INTEN; 5017 } 5018 ahc_outb(ahc, HCNTRL, hcntrl); 5019 } 5020 5021 /* 5022 * Ensure that the card is paused in a location 5023 * outside of all critical sections and that all 5024 * pending work is completed prior to returning. 5025 * This routine should only be called from outside 5026 * an interrupt context. 5027 */ 5028 void 5029 ahc_pause_and_flushwork(struct ahc_softc *ahc) 5030 { 5031 int intstat; 5032 int maxloops; 5033 int paused; 5034 5035 maxloops = 1000; 5036 ahc->flags |= AHC_ALL_INTERRUPTS; 5037 paused = FALSE; 5038 do { 5039 if (paused) 5040 ahc_unpause(ahc); 5041 ahc_intr(ahc); 5042 ahc_pause(ahc); 5043 paused = TRUE; 5044 ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO); 5045 ahc_clear_critical_section(ahc); 5046 intstat = ahc_inb(ahc, INTSTAT); 5047 } while (--maxloops 5048 && (intstat != 0xFF || (ahc->features & AHC_REMOVABLE) == 0) 5049 && ((intstat & INT_PEND) != 0 5050 || (ahc_inb(ahc, SSTAT0) & (SELDO|SELINGO)) != 0)); 5051 if (maxloops == 0) { 5052 printf("Infinite interrupt loop, INTSTAT = %x", 5053 ahc_inb(ahc, INTSTAT)); 5054 } 5055 ahc_platform_flushwork(ahc); 5056 ahc->flags &= ~AHC_ALL_INTERRUPTS; 5057 } 5058 5059 int 5060 ahc_suspend(struct ahc_softc *ahc) 5061 { 5062 5063 ahc_pause_and_flushwork(ahc); 5064 5065 if (LIST_FIRST(&ahc->pending_scbs) != NULL) { 5066 ahc_unpause(ahc); 5067 return (EBUSY); 5068 } 5069 5070 #ifdef AHC_TARGET_MODE 5071 /* 5072 * XXX What about ATIOs that have not yet been serviced? 5073 * Perhaps we should just refuse to be suspended if we 5074 * are acting in a target role. 5075 */ 5076 if (ahc->pending_device != NULL) { 5077 ahc_unpause(ahc); 5078 return (EBUSY); 5079 } 5080 #endif 5081 ahc_shutdown(ahc); 5082 return (0); 5083 } 5084 5085 int 5086 ahc_resume(struct ahc_softc *ahc) 5087 { 5088 5089 ahc_reset(ahc, /*reinit*/TRUE); 5090 ahc_intr_enable(ahc, TRUE); 5091 ahc_restart(ahc); 5092 return (0); 5093 } 5094 5095 /************************** Busy Target Table *********************************/ 5096 /* 5097 * Return the untagged transaction id for a given target/channel lun. 5098 * Optionally, clear the entry. 5099 */ 5100 u_int 5101 ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl) 5102 { 5103 u_int scbid; 5104 u_int target_offset; 5105 5106 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5107 u_int saved_scbptr; 5108 5109 saved_scbptr = ahc_inb(ahc, SCBPTR); 5110 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 5111 scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl)); 5112 ahc_outb(ahc, SCBPTR, saved_scbptr); 5113 } else { 5114 target_offset = TCL_TARGET_OFFSET(tcl); 5115 scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset); 5116 } 5117 5118 return (scbid); 5119 } 5120 5121 void 5122 ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl) 5123 { 5124 u_int target_offset; 5125 5126 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5127 u_int saved_scbptr; 5128 5129 saved_scbptr = ahc_inb(ahc, SCBPTR); 5130 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 5131 ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL); 5132 ahc_outb(ahc, SCBPTR, saved_scbptr); 5133 } else { 5134 target_offset = TCL_TARGET_OFFSET(tcl); 5135 ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL); 5136 } 5137 } 5138 5139 void 5140 ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid) 5141 { 5142 u_int target_offset; 5143 5144 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5145 u_int saved_scbptr; 5146 5147 saved_scbptr = ahc_inb(ahc, SCBPTR); 5148 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 5149 ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid); 5150 ahc_outb(ahc, SCBPTR, saved_scbptr); 5151 } else { 5152 target_offset = TCL_TARGET_OFFSET(tcl); 5153 ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid); 5154 } 5155 } 5156 5157 /************************** SCB and SCB queue management **********************/ 5158 int 5159 ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target, 5160 char channel, int lun, u_int tag, role_t role) 5161 { 5162 int targ = SCB_GET_TARGET(ahc, scb); 5163 char chan = SCB_GET_CHANNEL(ahc, scb); 5164 int slun = SCB_GET_LUN(scb); 5165 int match; 5166 5167 match = ((chan == channel) || (channel == ALL_CHANNELS)); 5168 if (match != 0) 5169 match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); 5170 if (match != 0) 5171 match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); 5172 if (match != 0) { 5173 #ifdef AHC_TARGET_MODE 5174 int group; 5175 5176 group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); 5177 if (role == ROLE_INITIATOR) { 5178 match = (group != XPT_FC_GROUP_TMODE) 5179 && ((tag == scb->hscb->tag) 5180 || (tag == SCB_LIST_NULL)); 5181 } else if (role == ROLE_TARGET) { 5182 match = (group == XPT_FC_GROUP_TMODE) 5183 && ((tag == scb->io_ctx->csio.tag_id) 5184 || (tag == SCB_LIST_NULL)); 5185 } 5186 #else /* !AHC_TARGET_MODE */ 5187 match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL)); 5188 #endif /* AHC_TARGET_MODE */ 5189 } 5190 5191 return match; 5192 } 5193 5194 void 5195 ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb) 5196 { 5197 int target; 5198 char channel; 5199 int lun; 5200 5201 target = SCB_GET_TARGET(ahc, scb); 5202 lun = SCB_GET_LUN(scb); 5203 channel = SCB_GET_CHANNEL(ahc, scb); 5204 5205 ahc_search_qinfifo(ahc, target, channel, lun, 5206 /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, 5207 CAM_REQUEUE_REQ, SEARCH_COMPLETE); 5208 5209 ahc_platform_freeze_devq(ahc, scb); 5210 } 5211 5212 void 5213 ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb) 5214 { 5215 struct scb *prev_scb; 5216 5217 prev_scb = NULL; 5218 if (ahc_qinfifo_count(ahc) != 0) { 5219 u_int prev_tag; 5220 uint8_t prev_pos; 5221 5222 prev_pos = ahc->qinfifonext - 1; 5223 prev_tag = ahc->qinfifo[prev_pos]; 5224 prev_scb = ahc_lookup_scb(ahc, prev_tag); 5225 } 5226 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5227 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5228 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 5229 } else { 5230 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 5231 } 5232 } 5233 5234 static void 5235 ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb, 5236 struct scb *scb) 5237 { 5238 if (prev_scb == NULL) { 5239 ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); 5240 } else { 5241 prev_scb->hscb->next = scb->hscb->tag; 5242 ahc_sync_scb(ahc, prev_scb, 5243 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5244 } 5245 ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; 5246 scb->hscb->next = ahc->next_queued_scb->hscb->tag; 5247 ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5248 } 5249 5250 static int 5251 ahc_qinfifo_count(struct ahc_softc *ahc) 5252 { 5253 uint8_t qinpos; 5254 uint8_t diff; 5255 5256 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5257 qinpos = ahc_inb(ahc, SNSCB_QOFF); 5258 ahc_outb(ahc, SNSCB_QOFF, qinpos); 5259 } else 5260 qinpos = ahc_inb(ahc, QINPOS); 5261 diff = ahc->qinfifonext - qinpos; 5262 return (diff); 5263 } 5264 5265 int 5266 ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel, 5267 int lun, u_int tag, role_t role, uint32_t status, 5268 ahc_search_action action) 5269 { 5270 struct scb *scb; 5271 struct scb *prev_scb; 5272 uint8_t qinstart; 5273 uint8_t qinpos; 5274 uint8_t qintail; 5275 uint8_t next; 5276 uint8_t prev; 5277 uint8_t curscbptr; 5278 int found; 5279 int have_qregs; 5280 5281 qintail = ahc->qinfifonext; 5282 have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0; 5283 if (have_qregs) { 5284 qinstart = ahc_inb(ahc, SNSCB_QOFF); 5285 ahc_outb(ahc, SNSCB_QOFF, qinstart); 5286 } else 5287 qinstart = ahc_inb(ahc, QINPOS); 5288 qinpos = qinstart; 5289 found = 0; 5290 prev_scb = NULL; 5291 5292 if (action == SEARCH_COMPLETE) { 5293 /* 5294 * Don't attempt to run any queued untagged transactions 5295 * until we are done with the abort process. 5296 */ 5297 ahc_freeze_untagged_queues(ahc); 5298 } 5299 5300 /* 5301 * Start with an empty queue. Entries that are not chosen 5302 * for removal will be re-added to the queue as we go. 5303 */ 5304 ahc->qinfifonext = qinpos; 5305 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); 5306 5307 while (qinpos != qintail) { 5308 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]); 5309 if (scb == NULL) { 5310 printf("qinpos = %d, SCB index = %d\n", 5311 qinpos, ahc->qinfifo[qinpos]); 5312 panic("Loop 1\n"); 5313 } 5314 5315 if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) { 5316 /* 5317 * We found an scb that needs to be acted on. 5318 */ 5319 found++; 5320 switch (action) { 5321 case SEARCH_COMPLETE: 5322 { 5323 cam_status ostat; 5324 cam_status cstat; 5325 5326 ostat = ahc_get_transaction_status(scb); 5327 if (ostat == CAM_REQ_INPROG) 5328 ahc_set_transaction_status(scb, status); 5329 cstat = ahc_get_transaction_status(scb); 5330 if (cstat != CAM_REQ_CMP) 5331 ahc_freeze_scb(scb); 5332 if ((scb->flags & SCB_ACTIVE) == 0) 5333 printf("Inactive SCB in qinfifo\n"); 5334 ahc_done(ahc, scb); 5335 5336 /* FALLTHROUGH */ 5337 } 5338 case SEARCH_REMOVE: 5339 break; 5340 case SEARCH_COUNT: 5341 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5342 prev_scb = scb; 5343 break; 5344 } 5345 } else { 5346 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5347 prev_scb = scb; 5348 } 5349 qinpos++; 5350 } 5351 5352 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5353 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 5354 } else { 5355 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 5356 } 5357 5358 if (action != SEARCH_COUNT 5359 && (found != 0) 5360 && (qinstart != ahc->qinfifonext)) { 5361 /* 5362 * The sequencer may be in the process of dmaing 5363 * down the SCB at the beginning of the queue. 5364 * This could be problematic if either the first, 5365 * or the second SCB is removed from the queue 5366 * (the first SCB includes a pointer to the "next" 5367 * SCB to dma). If we have removed any entries, swap 5368 * the first element in the queue with the next HSCB 5369 * so the sequencer will notice that NEXT_QUEUED_SCB 5370 * has changed during its dma attempt and will retry 5371 * the DMA. 5372 */ 5373 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]); 5374 5375 if (scb == NULL) { 5376 printf("found = %d, qinstart = %d, qinfifionext = %d\n", 5377 found, qinstart, ahc->qinfifonext); 5378 panic("First/Second Qinfifo fixup\n"); 5379 } 5380 /* 5381 * ahc_swap_with_next_hscb forces our next pointer to 5382 * point to the reserved SCB for future commands. Save 5383 * and restore our original next pointer to maintain 5384 * queue integrity. 5385 */ 5386 next = scb->hscb->next; 5387 ahc->scb_data->scbindex[scb->hscb->tag] = NULL; 5388 ahc_swap_with_next_hscb(ahc, scb); 5389 scb->hscb->next = next; 5390 ahc->qinfifo[qinstart] = scb->hscb->tag; 5391 5392 /* Tell the card about the new head of the qinfifo. */ 5393 ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); 5394 5395 /* Fixup the tail "next" pointer. */ 5396 qintail = ahc->qinfifonext - 1; 5397 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]); 5398 scb->hscb->next = ahc->next_queued_scb->hscb->tag; 5399 } 5400 5401 /* 5402 * Search waiting for selection list. 5403 */ 5404 curscbptr = ahc_inb(ahc, SCBPTR); 5405 next = ahc_inb(ahc, WAITING_SCBH); /* Start at head of list. */ 5406 prev = SCB_LIST_NULL; 5407 5408 while (next != SCB_LIST_NULL) { 5409 uint8_t scb_index; 5410 5411 ahc_outb(ahc, SCBPTR, next); 5412 scb_index = ahc_inb(ahc, SCB_TAG); 5413 if (scb_index >= ahc->scb_data->numscbs) { 5414 printf("Waiting List inconsistency. " 5415 "SCB index == %d, yet numscbs == %d.", 5416 scb_index, ahc->scb_data->numscbs); 5417 ahc_dump_card_state(ahc); 5418 panic("for safety"); 5419 } 5420 scb = ahc_lookup_scb(ahc, scb_index); 5421 if (scb == NULL) { 5422 printf("scb_index = %d, next = %d\n", 5423 scb_index, next); 5424 panic("Waiting List traversal\n"); 5425 } 5426 if (ahc_match_scb(ahc, scb, target, channel, 5427 lun, SCB_LIST_NULL, role)) { 5428 /* 5429 * We found an scb that needs to be acted on. 5430 */ 5431 found++; 5432 switch (action) { 5433 case SEARCH_COMPLETE: 5434 { 5435 cam_status ostat; 5436 cam_status cstat; 5437 5438 ostat = ahc_get_transaction_status(scb); 5439 if (ostat == CAM_REQ_INPROG) 5440 ahc_set_transaction_status(scb, 5441 status); 5442 cstat = ahc_get_transaction_status(scb); 5443 if (cstat != CAM_REQ_CMP) 5444 ahc_freeze_scb(scb); 5445 if ((scb->flags & SCB_ACTIVE) == 0) 5446 printf("Inactive SCB in Waiting List\n"); 5447 ahc_done(ahc, scb); 5448 /* FALLTHROUGH */ 5449 } 5450 case SEARCH_REMOVE: 5451 next = ahc_rem_wscb(ahc, next, prev); 5452 break; 5453 case SEARCH_COUNT: 5454 prev = next; 5455 next = ahc_inb(ahc, SCB_NEXT); 5456 break; 5457 } 5458 } else { 5459 5460 prev = next; 5461 next = ahc_inb(ahc, SCB_NEXT); 5462 } 5463 } 5464 ahc_outb(ahc, SCBPTR, curscbptr); 5465 5466 found += ahc_search_untagged_queues(ahc, /*ahc_io_ctx_t*/NULL, target, 5467 channel, lun, status, action); 5468 5469 if (action == SEARCH_COMPLETE) 5470 ahc_release_untagged_queues(ahc); 5471 return (found); 5472 } 5473 5474 int 5475 ahc_search_untagged_queues(struct ahc_softc *ahc, ahc_io_ctx_t ctx, 5476 int target, char channel, int lun, uint32_t status, 5477 ahc_search_action action) 5478 { 5479 struct scb *scb; 5480 int maxtarget; 5481 int found; 5482 int i; 5483 5484 if (action == SEARCH_COMPLETE) { 5485 /* 5486 * Don't attempt to run any queued untagged transactions 5487 * until we are done with the abort process. 5488 */ 5489 ahc_freeze_untagged_queues(ahc); 5490 } 5491 5492 found = 0; 5493 i = 0; 5494 if ((ahc->flags & AHC_SCB_BTT) == 0) { 5495 5496 maxtarget = 16; 5497 if (target != CAM_TARGET_WILDCARD) { 5498 5499 i = target; 5500 if (channel == 'B') 5501 i += 8; 5502 maxtarget = i + 1; 5503 } 5504 } else { 5505 maxtarget = 0; 5506 } 5507 5508 for (; i < maxtarget; i++) { 5509 struct scb_tailq *untagged_q; 5510 struct scb *next_scb; 5511 5512 untagged_q = &(ahc->untagged_queues[i]); 5513 next_scb = TAILQ_FIRST(untagged_q); 5514 while (next_scb != NULL) { 5515 5516 scb = next_scb; 5517 next_scb = TAILQ_NEXT(scb, links.tqe); 5518 5519 /* 5520 * The head of the list may be the currently 5521 * active untagged command for a device. 5522 * We're only searching for commands that 5523 * have not been started. A transaction 5524 * marked active but still in the qinfifo 5525 * is removed by the qinfifo scanning code 5526 * above. 5527 */ 5528 if ((scb->flags & SCB_ACTIVE) != 0) 5529 continue; 5530 5531 if (ahc_match_scb(ahc, scb, target, channel, lun, 5532 SCB_LIST_NULL, ROLE_INITIATOR) == 0 5533 || (ctx != NULL && ctx != scb->io_ctx)) 5534 continue; 5535 5536 /* 5537 * We found an scb that needs to be acted on. 5538 */ 5539 found++; 5540 switch (action) { 5541 case SEARCH_COMPLETE: 5542 { 5543 cam_status ostat; 5544 cam_status cstat; 5545 5546 ostat = ahc_get_transaction_status(scb); 5547 if (ostat == CAM_REQ_INPROG) 5548 ahc_set_transaction_status(scb, status); 5549 cstat = ahc_get_transaction_status(scb); 5550 if (cstat != CAM_REQ_CMP) 5551 ahc_freeze_scb(scb); 5552 if ((scb->flags & SCB_ACTIVE) == 0) 5553 printf("Inactive SCB in untaggedQ\n"); 5554 ahc_done(ahc, scb); 5555 break; 5556 } 5557 case SEARCH_REMOVE: 5558 scb->flags &= ~SCB_UNTAGGEDQ; 5559 TAILQ_REMOVE(untagged_q, scb, links.tqe); 5560 break; 5561 case SEARCH_COUNT: 5562 break; 5563 } 5564 } 5565 } 5566 5567 if (action == SEARCH_COMPLETE) 5568 ahc_release_untagged_queues(ahc); 5569 return (found); 5570 } 5571 5572 int 5573 ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel, 5574 int lun, u_int tag, int stop_on_first, int remove, 5575 int save_state) 5576 { 5577 struct scb *scbp; 5578 u_int next; 5579 u_int prev; 5580 u_int count; 5581 u_int active_scb; 5582 5583 count = 0; 5584 next = ahc_inb(ahc, DISCONNECTED_SCBH); 5585 prev = SCB_LIST_NULL; 5586 5587 if (save_state) { 5588 /* restore this when we're done */ 5589 active_scb = ahc_inb(ahc, SCBPTR); 5590 } else 5591 /* Silence compiler */ 5592 active_scb = SCB_LIST_NULL; 5593 5594 while (next != SCB_LIST_NULL) { 5595 u_int scb_index; 5596 5597 ahc_outb(ahc, SCBPTR, next); 5598 scb_index = ahc_inb(ahc, SCB_TAG); 5599 if (scb_index >= ahc->scb_data->numscbs) { 5600 printf("Disconnected List inconsistency. " 5601 "SCB index == %d, yet numscbs == %d.", 5602 scb_index, ahc->scb_data->numscbs); 5603 ahc_dump_card_state(ahc); 5604 panic("for safety"); 5605 } 5606 5607 if (next == prev) { 5608 panic("Disconnected List Loop. " 5609 "cur SCBPTR == %x, prev SCBPTR == %x.", 5610 next, prev); 5611 } 5612 scbp = ahc_lookup_scb(ahc, scb_index); 5613 if (ahc_match_scb(ahc, scbp, target, channel, lun, 5614 tag, ROLE_INITIATOR)) { 5615 count++; 5616 if (remove) { 5617 next = 5618 ahc_rem_scb_from_disc_list(ahc, prev, next); 5619 } else { 5620 prev = next; 5621 next = ahc_inb(ahc, SCB_NEXT); 5622 } 5623 if (stop_on_first) 5624 break; 5625 } else { 5626 prev = next; 5627 next = ahc_inb(ahc, SCB_NEXT); 5628 } 5629 } 5630 if (save_state) 5631 ahc_outb(ahc, SCBPTR, active_scb); 5632 return (count); 5633 } 5634 5635 /* 5636 * Remove an SCB from the on chip list of disconnected transactions. 5637 * This is empty/unused if we are not performing SCB paging. 5638 */ 5639 static u_int 5640 ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr) 5641 { 5642 u_int next; 5643 5644 ahc_outb(ahc, SCBPTR, scbptr); 5645 next = ahc_inb(ahc, SCB_NEXT); 5646 5647 ahc_outb(ahc, SCB_CONTROL, 0); 5648 5649 ahc_add_curscb_to_free_list(ahc); 5650 5651 if (prev != SCB_LIST_NULL) { 5652 ahc_outb(ahc, SCBPTR, prev); 5653 ahc_outb(ahc, SCB_NEXT, next); 5654 } else 5655 ahc_outb(ahc, DISCONNECTED_SCBH, next); 5656 5657 return (next); 5658 } 5659 5660 /* 5661 * Add the SCB as selected by SCBPTR onto the on chip list of 5662 * free hardware SCBs. This list is empty/unused if we are not 5663 * performing SCB paging. 5664 */ 5665 static void 5666 ahc_add_curscb_to_free_list(struct ahc_softc *ahc) 5667 { 5668 /* 5669 * Invalidate the tag so that our abort 5670 * routines don't think it's active. 5671 */ 5672 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 5673 5674 if ((ahc->flags & AHC_PAGESCBS) != 0) { 5675 ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH)); 5676 ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR)); 5677 } 5678 } 5679 5680 /* 5681 * Manipulate the waiting for selection list and return the 5682 * scb that follows the one that we remove. 5683 */ 5684 static u_int 5685 ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev) 5686 { 5687 u_int curscb, next; 5688 5689 /* 5690 * Select the SCB we want to abort and 5691 * pull the next pointer out of it. 5692 */ 5693 curscb = ahc_inb(ahc, SCBPTR); 5694 ahc_outb(ahc, SCBPTR, scbpos); 5695 next = ahc_inb(ahc, SCB_NEXT); 5696 5697 /* Clear the necessary fields */ 5698 ahc_outb(ahc, SCB_CONTROL, 0); 5699 5700 ahc_add_curscb_to_free_list(ahc); 5701 5702 /* update the waiting list */ 5703 if (prev == SCB_LIST_NULL) { 5704 /* First in the list */ 5705 ahc_outb(ahc, WAITING_SCBH, next); 5706 5707 /* 5708 * Ensure we aren't attempting to perform 5709 * selection for this entry. 5710 */ 5711 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 5712 } else { 5713 /* 5714 * Select the scb that pointed to us 5715 * and update its next pointer. 5716 */ 5717 ahc_outb(ahc, SCBPTR, prev); 5718 ahc_outb(ahc, SCB_NEXT, next); 5719 } 5720 5721 /* 5722 * Point us back at the original scb position. 5723 */ 5724 ahc_outb(ahc, SCBPTR, curscb); 5725 return next; 5726 } 5727 5728 /******************************** Error Handling ******************************/ 5729 /* 5730 * Abort all SCBs that match the given description (target/channel/lun/tag), 5731 * setting their status to the passed in status if the status has not already 5732 * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer 5733 * is paused before it is called. 5734 */ 5735 int 5736 ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, 5737 int lun, u_int tag, role_t role, uint32_t status) 5738 { 5739 struct scb *scbp; 5740 struct scb *scbp_next; 5741 u_int active_scb; 5742 int i, j; 5743 int maxtarget; 5744 int minlun; 5745 int maxlun; 5746 5747 int found; 5748 5749 /* 5750 * Don't attempt to run any queued untagged transactions 5751 * until we are done with the abort process. 5752 */ 5753 ahc_freeze_untagged_queues(ahc); 5754 5755 /* restore this when we're done */ 5756 active_scb = ahc_inb(ahc, SCBPTR); 5757 5758 found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL, 5759 role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); 5760 5761 /* 5762 * Clean out the busy target table for any untagged commands. 5763 */ 5764 i = 0; 5765 maxtarget = 16; 5766 if (target != CAM_TARGET_WILDCARD) { 5767 i = target; 5768 if (channel == 'B') 5769 i += 8; 5770 maxtarget = i + 1; 5771 } 5772 5773 if (lun == CAM_LUN_WILDCARD) { 5774 5775 /* 5776 * Unless we are using an SCB based 5777 * busy targets table, there is only 5778 * one table entry for all luns of 5779 * a target. 5780 */ 5781 minlun = 0; 5782 maxlun = 1; 5783 if ((ahc->flags & AHC_SCB_BTT) != 0) 5784 maxlun = AHC_NUM_LUNS; 5785 } else { 5786 minlun = lun; 5787 maxlun = lun + 1; 5788 } 5789 5790 if (role != ROLE_TARGET) { 5791 for (;i < maxtarget; i++) { 5792 for (j = minlun;j < maxlun; j++) { 5793 u_int scbid; 5794 u_int tcl; 5795 5796 tcl = BUILD_TCL(i << 4, j); 5797 scbid = ahc_index_busy_tcl(ahc, tcl); 5798 scbp = ahc_lookup_scb(ahc, scbid); 5799 if (scbp == NULL 5800 || ahc_match_scb(ahc, scbp, target, channel, 5801 lun, tag, role) == 0) 5802 continue; 5803 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j)); 5804 } 5805 } 5806 5807 /* 5808 * Go through the disconnected list and remove any entries we 5809 * have queued for completion, 0'ing their control byte too. 5810 * We save the active SCB and restore it ourselves, so there 5811 * is no reason for this search to restore it too. 5812 */ 5813 ahc_search_disc_list(ahc, target, channel, lun, tag, 5814 /*stop_on_first*/FALSE, /*remove*/TRUE, 5815 /*save_state*/FALSE); 5816 } 5817 5818 /* 5819 * Go through the hardware SCB array looking for commands that 5820 * were active but not on any list. In some cases, these remnants 5821 * might not still have mappings in the scbindex array (e.g. unexpected 5822 * bus free with the same scb queued for an abort). Don't hold this 5823 * against them. 5824 */ 5825 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 5826 u_int scbid; 5827 5828 ahc_outb(ahc, SCBPTR, i); 5829 scbid = ahc_inb(ahc, SCB_TAG); 5830 scbp = ahc_lookup_scb(ahc, scbid); 5831 if ((scbp == NULL && scbid != SCB_LIST_NULL) 5832 || (scbp != NULL 5833 && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role))) 5834 ahc_add_curscb_to_free_list(ahc); 5835 } 5836 5837 /* 5838 * Go through the pending CCB list and look for 5839 * commands for this target that are still active. 5840 * These are other tagged commands that were 5841 * disconnected when the reset occurred. 5842 */ 5843 scbp_next = LIST_FIRST(&ahc->pending_scbs); 5844 while (scbp_next != NULL) { 5845 scbp = scbp_next; 5846 scbp_next = LIST_NEXT(scbp, pending_links); 5847 if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) { 5848 cam_status ostat; 5849 5850 ostat = ahc_get_transaction_status(scbp); 5851 if (ostat == CAM_REQ_INPROG) 5852 ahc_set_transaction_status(scbp, status); 5853 if (ahc_get_transaction_status(scbp) != CAM_REQ_CMP) 5854 ahc_freeze_scb(scbp); 5855 if ((scbp->flags & SCB_ACTIVE) == 0) 5856 printf("Inactive SCB on pending list\n"); 5857 ahc_done(ahc, scbp); 5858 found++; 5859 } 5860 } 5861 ahc_outb(ahc, SCBPTR, active_scb); 5862 ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status); 5863 ahc_release_untagged_queues(ahc); 5864 return found; 5865 } 5866 5867 static void 5868 ahc_reset_current_bus(struct ahc_softc *ahc) 5869 { 5870 uint8_t scsiseq; 5871 5872 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST); 5873 scsiseq = ahc_inb(ahc, SCSISEQ); 5874 ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO); 5875 ahc_flush_device_writes(ahc); 5876 ahc_delay(AHC_BUSRESET_DELAY); 5877 /* Turn off the bus reset */ 5878 ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO); 5879 5880 ahc_clear_intstat(ahc); 5881 5882 /* Re-enable reset interrupts */ 5883 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST); 5884 } 5885 5886 int 5887 ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset) 5888 { 5889 struct ahc_devinfo devinfo; 5890 u_int initiator, target, max_scsiid; 5891 u_int sblkctl; 5892 u_int scsiseq; 5893 u_int simode1; 5894 int found; 5895 int restart_needed; 5896 char cur_channel; 5897 5898 ahc->pending_device = NULL; 5899 5900 ahc_compile_devinfo(&devinfo, 5901 CAM_TARGET_WILDCARD, 5902 CAM_TARGET_WILDCARD, 5903 CAM_LUN_WILDCARD, 5904 channel, ROLE_UNKNOWN); 5905 ahc_pause(ahc); 5906 5907 /* Make sure the sequencer is in a safe location. */ 5908 ahc_clear_critical_section(ahc); 5909 5910 /* 5911 * Run our command complete fifos to ensure that we perform 5912 * completion processing on any commands that 'completed' 5913 * before the reset occurred. 5914 */ 5915 ahc_run_qoutfifo(ahc); 5916 #ifdef AHC_TARGET_MODE 5917 /* 5918 * XXX - In Twin mode, the tqinfifo may have commands 5919 * for an unaffected channel in it. However, if 5920 * we have run out of ATIO resources to drain that 5921 * queue, we may not get them all out here. Further, 5922 * the blocked transactions for the reset channel 5923 * should just be killed off, irrespecitve of whether 5924 * we are blocked on ATIO resources. Write a routine 5925 * to compact the tqinfifo appropriately. 5926 */ 5927 if ((ahc->flags & AHC_TARGETROLE) != 0) { 5928 ahc_run_tqinfifo(ahc, /*paused*/TRUE); 5929 } 5930 #endif 5931 5932 /* 5933 * Reset the bus if we are initiating this reset 5934 */ 5935 sblkctl = ahc_inb(ahc, SBLKCTL); 5936 cur_channel = 'A'; 5937 if ((ahc->features & AHC_TWIN) != 0 5938 && ((sblkctl & SELBUSB) != 0)) 5939 cur_channel = 'B'; 5940 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 5941 if (cur_channel != channel) { 5942 /* Case 1: Command for another bus is active 5943 * Stealthily reset the other bus without 5944 * upsetting the current bus. 5945 */ 5946 ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB); 5947 simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); 5948 #ifdef AHC_TARGET_MODE 5949 /* 5950 * Bus resets clear ENSELI, so we cannot 5951 * defer re-enabling bus reset interrupts 5952 * if we are in target mode. 5953 */ 5954 if ((ahc->flags & AHC_TARGETROLE) != 0) 5955 simode1 |= ENSCSIRST; 5956 #endif 5957 ahc_outb(ahc, SIMODE1, simode1); 5958 if (initiate_reset) 5959 ahc_reset_current_bus(ahc); 5960 ahc_clear_intstat(ahc); 5961 ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); 5962 ahc_outb(ahc, SBLKCTL, sblkctl); 5963 restart_needed = FALSE; 5964 } else { 5965 /* Case 2: A command from this bus is active or we're idle */ 5966 simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); 5967 #ifdef AHC_TARGET_MODE 5968 /* 5969 * Bus resets clear ENSELI, so we cannot 5970 * defer re-enabling bus reset interrupts 5971 * if we are in target mode. 5972 */ 5973 if ((ahc->flags & AHC_TARGETROLE) != 0) 5974 simode1 |= ENSCSIRST; 5975 #endif 5976 ahc_outb(ahc, SIMODE1, simode1); 5977 if (initiate_reset) 5978 ahc_reset_current_bus(ahc); 5979 ahc_clear_intstat(ahc); 5980 ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); 5981 restart_needed = TRUE; 5982 } 5983 5984 /* 5985 * Clean up all the state information for the 5986 * pending transactions on this bus. 5987 */ 5988 found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel, 5989 CAM_LUN_WILDCARD, SCB_LIST_NULL, 5990 ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); 5991 5992 max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7; 5993 5994 #ifdef AHC_TARGET_MODE 5995 /* 5996 * Send an immediate notify ccb to all target more peripheral 5997 * drivers affected by this action. 5998 */ 5999 for (target = 0; target <= max_scsiid; target++) { 6000 struct ahc_tmode_tstate* tstate; 6001 u_int lun; 6002 6003 tstate = ahc->enabled_targets[target]; 6004 if (tstate == NULL) 6005 continue; 6006 for (lun = 0; lun < AHC_NUM_LUNS; lun++) { 6007 struct ahc_tmode_lstate* lstate; 6008 6009 lstate = tstate->enabled_luns[lun]; 6010 if (lstate == NULL) 6011 continue; 6012 6013 ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD, 6014 EVENT_TYPE_BUS_RESET, /*arg*/0); 6015 ahc_send_lstate_events(ahc, lstate); 6016 } 6017 } 6018 #endif 6019 /* Notify the XPT that a bus reset occurred */ 6020 ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD, 6021 CAM_LUN_WILDCARD, AC_BUS_RESET, NULL); 6022 6023 /* 6024 * Revert to async/narrow transfers until we renegotiate. 6025 */ 6026 for (target = 0; target <= max_scsiid; target++) { 6027 6028 if (ahc->enabled_targets[target] == NULL) 6029 continue; 6030 for (initiator = 0; initiator <= max_scsiid; initiator++) { 6031 struct ahc_devinfo devinfo; 6032 6033 ahc_compile_devinfo(&devinfo, target, initiator, 6034 CAM_LUN_WILDCARD, 6035 channel, ROLE_UNKNOWN); 6036 ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 6037 AHC_TRANS_CUR, /*paused*/TRUE); 6038 ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, 6039 /*period*/0, /*offset*/0, 6040 /*ppr_options*/0, AHC_TRANS_CUR, 6041 /*paused*/TRUE); 6042 } 6043 } 6044 6045 if (restart_needed) 6046 ahc_restart(ahc); 6047 else 6048 ahc_unpause(ahc); 6049 return found; 6050 } 6051 6052 6053 /***************************** Residual Processing ****************************/ 6054 /* 6055 * Calculate the residual for a just completed SCB. 6056 */ 6057 void 6058 ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb) 6059 { 6060 struct hardware_scb *hscb; 6061 struct status_pkt *spkt; 6062 uint32_t sgptr; 6063 uint32_t resid_sgptr; 6064 uint32_t resid; 6065 6066 /* 6067 * 5 cases. 6068 * 1) No residual. 6069 * SG_RESID_VALID clear in sgptr. 6070 * 2) Transferless command 6071 * 3) Never performed any transfers. 6072 * sgptr has SG_FULL_RESID set. 6073 * 4) No residual but target did not 6074 * save data pointers after the 6075 * last transfer, so sgptr was 6076 * never updated. 6077 * 5) We have a partial residual. 6078 * Use residual_sgptr to determine 6079 * where we are. 6080 */ 6081 6082 hscb = scb->hscb; 6083 sgptr = ahc_le32toh(hscb->sgptr); 6084 if ((sgptr & SG_RESID_VALID) == 0) 6085 /* Case 1 */ 6086 return; 6087 sgptr &= ~SG_RESID_VALID; 6088 6089 if ((sgptr & SG_LIST_NULL) != 0) 6090 /* Case 2 */ 6091 return; 6092 6093 spkt = &hscb->shared_data.status; 6094 resid_sgptr = ahc_le32toh(spkt->residual_sg_ptr); 6095 if ((sgptr & SG_FULL_RESID) != 0) { 6096 /* Case 3 */ 6097 resid = ahc_get_transfer_length(scb); 6098 } else if ((resid_sgptr & SG_LIST_NULL) != 0) { 6099 /* Case 4 */ 6100 return; 6101 } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { 6102 panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); 6103 } else { 6104 struct ahc_dma_seg *sg; 6105 6106 /* 6107 * Remainder of the SG where the transfer 6108 * stopped. 6109 */ 6110 resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK; 6111 sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK); 6112 6113 /* The residual sg_ptr always points to the next sg */ 6114 sg--; 6115 6116 /* 6117 * Add up the contents of all residual 6118 * SG segments that are after the SG where 6119 * the transfer stopped. 6120 */ 6121 while ((ahc_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) { 6122 sg++; 6123 resid += ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; 6124 } 6125 } 6126 if ((scb->flags & SCB_SENSE) == 0) 6127 ahc_set_residual(scb, resid); 6128 else 6129 ahc_set_sense_residual(scb, resid); 6130 6131 #ifdef AHC_DEBUG 6132 if ((ahc_debug & AHC_SHOW_MISC) != 0) { 6133 ahc_print_path(ahc, scb); 6134 printf("Handled %sResidual of %d bytes\n", 6135 (scb->flags & SCB_SENSE) ? "Sense " : "", resid); 6136 } 6137 #endif 6138 } 6139 6140 /******************************* Target Mode **********************************/ 6141 #ifdef AHC_TARGET_MODE 6142 /* 6143 * Add a target mode event to this lun's queue 6144 */ 6145 static void 6146 ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate, 6147 u_int initiator_id, u_int event_type, u_int event_arg) 6148 { 6149 struct ahc_tmode_event *event; 6150 int pending; 6151 6152 xpt_freeze_devq(lstate->path, /*count*/1); 6153 if (lstate->event_w_idx >= lstate->event_r_idx) 6154 pending = lstate->event_w_idx - lstate->event_r_idx; 6155 else 6156 pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1 6157 - (lstate->event_r_idx - lstate->event_w_idx); 6158 6159 if (event_type == EVENT_TYPE_BUS_RESET 6160 || event_type == MSG_BUS_DEV_RESET) { 6161 /* 6162 * Any earlier events are irrelevant, so reset our buffer. 6163 * This has the effect of allowing us to deal with reset 6164 * floods (an external device holding down the reset line) 6165 * without losing the event that is really interesting. 6166 */ 6167 lstate->event_r_idx = 0; 6168 lstate->event_w_idx = 0; 6169 xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); 6170 } 6171 6172 if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) { 6173 xpt_print_path(lstate->path); 6174 printf("immediate event %x:%x lost\n", 6175 lstate->event_buffer[lstate->event_r_idx].event_type, 6176 lstate->event_buffer[lstate->event_r_idx].event_arg); 6177 lstate->event_r_idx++; 6178 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6179 lstate->event_r_idx = 0; 6180 xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); 6181 } 6182 6183 event = &lstate->event_buffer[lstate->event_w_idx]; 6184 event->initiator_id = initiator_id; 6185 event->event_type = event_type; 6186 event->event_arg = event_arg; 6187 lstate->event_w_idx++; 6188 if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6189 lstate->event_w_idx = 0; 6190 } 6191 6192 /* 6193 * Send any target mode events queued up waiting 6194 * for immediate notify resources. 6195 */ 6196 void 6197 ahc_send_lstate_events(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate) 6198 { 6199 struct ccb_hdr *ccbh; 6200 struct ccb_immed_notify *inot; 6201 6202 while (lstate->event_r_idx != lstate->event_w_idx 6203 && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { 6204 struct ahc_tmode_event *event; 6205 6206 event = &lstate->event_buffer[lstate->event_r_idx]; 6207 SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); 6208 inot = (struct ccb_immed_notify *)ccbh; 6209 switch (event->event_type) { 6210 case EVENT_TYPE_BUS_RESET: 6211 ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; 6212 break; 6213 default: 6214 ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 6215 inot->message_args[0] = event->event_type; 6216 inot->message_args[1] = event->event_arg; 6217 break; 6218 } 6219 inot->initiator_id = event->initiator_id; 6220 inot->sense_len = 0; 6221 xpt_done((union ccb *)inot); 6222 lstate->event_r_idx++; 6223 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6224 lstate->event_r_idx = 0; 6225 } 6226 } 6227 #endif 6228 6229 /******************** Sequencer Program Patching/Download *********************/ 6230 6231 #ifdef AHC_DUMP_SEQ 6232 void 6233 ahc_dumpseq(struct ahc_softc* ahc) 6234 { 6235 int i; 6236 6237 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 6238 ahc_outb(ahc, SEQADDR0, 0); 6239 ahc_outb(ahc, SEQADDR1, 0); 6240 for (i = 0; i < ahc->instruction_ram_size; i++) { 6241 uint8_t ins_bytes[4]; 6242 6243 ahc_insb(ahc, SEQRAM, ins_bytes, 4); 6244 printf("0x%08x\n", ins_bytes[0] << 24 6245 | ins_bytes[1] << 16 6246 | ins_bytes[2] << 8 6247 | ins_bytes[3]); 6248 } 6249 } 6250 #endif 6251 6252 static int 6253 ahc_loadseq(struct ahc_softc *ahc) 6254 { 6255 struct cs cs_table[num_critical_sections]; 6256 u_int begin_set[num_critical_sections]; 6257 u_int end_set[num_critical_sections]; 6258 struct patch *cur_patch; 6259 u_int cs_count; 6260 u_int cur_cs; 6261 u_int i; 6262 u_int skip_addr; 6263 u_int sg_prefetch_cnt; 6264 int downloaded; 6265 uint8_t download_consts[7]; 6266 6267 /* 6268 * Start out with 0 critical sections 6269 * that apply to this firmware load. 6270 */ 6271 cs_count = 0; 6272 cur_cs = 0; 6273 memset(begin_set, 0, sizeof(begin_set)); 6274 memset(end_set, 0, sizeof(end_set)); 6275 6276 /* Setup downloadable constant table */ 6277 download_consts[QOUTFIFO_OFFSET] = 0; 6278 if (ahc->targetcmds != NULL) 6279 download_consts[QOUTFIFO_OFFSET] += 32; 6280 download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1; 6281 download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1; 6282 download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1); 6283 sg_prefetch_cnt = ahc->pci_cachesize; 6284 if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg))) 6285 sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg); 6286 download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; 6287 download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1); 6288 download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1); 6289 6290 cur_patch = patches; 6291 downloaded = 0; 6292 skip_addr = 0; 6293 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 6294 ahc_outb(ahc, SEQADDR0, 0); 6295 ahc_outb(ahc, SEQADDR1, 0); 6296 6297 for (i = 0; i < sizeof(seqprog)/4; i++) { 6298 if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) { 6299 /* 6300 * Don't download this instruction as it 6301 * is in a patch that was removed. 6302 */ 6303 continue; 6304 } 6305 6306 if (downloaded == ahc->instruction_ram_size) { 6307 /* 6308 * We're about to exceed the instruction 6309 * storage capacity for this chip. Fail 6310 * the load. 6311 */ 6312 printf("\n%s: Program too large for instruction memory " 6313 "size of %d!\n", ahc_name(ahc), 6314 ahc->instruction_ram_size); 6315 return (ENOMEM); 6316 } 6317 6318 /* 6319 * Move through the CS table until we find a CS 6320 * that might apply to this instruction. 6321 */ 6322 for (; cur_cs < num_critical_sections; cur_cs++) { 6323 if (critical_sections[cur_cs].end <= i) { 6324 if (begin_set[cs_count] == TRUE 6325 && end_set[cs_count] == FALSE) { 6326 cs_table[cs_count].end = downloaded; 6327 end_set[cs_count] = TRUE; 6328 cs_count++; 6329 } 6330 continue; 6331 } 6332 if (critical_sections[cur_cs].begin <= i 6333 && begin_set[cs_count] == FALSE) { 6334 cs_table[cs_count].begin = downloaded; 6335 begin_set[cs_count] = TRUE; 6336 } 6337 break; 6338 } 6339 ahc_download_instr(ahc, i, download_consts); 6340 downloaded++; 6341 } 6342 6343 ahc->num_critical_sections = cs_count; 6344 if (cs_count != 0) { 6345 6346 cs_count *= sizeof(struct cs); 6347 ahc->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT); 6348 if (ahc->critical_sections == NULL) 6349 panic("ahc_loadseq: Could not malloc"); 6350 memcpy(ahc->critical_sections, cs_table, cs_count); 6351 } 6352 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE); 6353 6354 if (bootverbose) { 6355 printf(" %d instructions downloaded\n", downloaded); 6356 printf("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n", 6357 ahc_name(ahc), ahc->features, ahc->bugs, ahc->flags); 6358 } 6359 return (0); 6360 } 6361 6362 static int 6363 ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch, 6364 u_int start_instr, u_int *skip_addr) 6365 { 6366 struct patch *cur_patch; 6367 struct patch *last_patch; 6368 u_int num_patches; 6369 6370 num_patches = sizeof(patches)/sizeof(struct patch); 6371 last_patch = &patches[num_patches]; 6372 cur_patch = *start_patch; 6373 6374 while (cur_patch < last_patch && start_instr == cur_patch->begin) { 6375 6376 if (cur_patch->patch_func(ahc) == 0) { 6377 6378 /* Start rejecting code */ 6379 *skip_addr = start_instr + cur_patch->skip_instr; 6380 cur_patch += cur_patch->skip_patch; 6381 } else { 6382 /* Accepted this patch. Advance to the next 6383 * one and wait for our intruction pointer to 6384 * hit this point. 6385 */ 6386 cur_patch++; 6387 } 6388 } 6389 6390 *start_patch = cur_patch; 6391 if (start_instr < *skip_addr) 6392 /* Still skipping */ 6393 return (0); 6394 6395 return (1); 6396 } 6397 6398 static void 6399 ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts) 6400 { 6401 union ins_formats instr; 6402 struct ins_format1 *fmt1_ins; 6403 struct ins_format3 *fmt3_ins; 6404 u_int opcode; 6405 6406 /* 6407 * The firmware is always compiled into a little endian format. 6408 */ 6409 instr.integer = ahc_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); 6410 6411 fmt1_ins = &instr.format1; 6412 fmt3_ins = NULL; 6413 6414 /* Pull the opcode */ 6415 opcode = instr.format1.opcode; 6416 switch (opcode) { 6417 case AIC_OP_JMP: 6418 case AIC_OP_JC: 6419 case AIC_OP_JNC: 6420 case AIC_OP_CALL: 6421 case AIC_OP_JNE: 6422 case AIC_OP_JNZ: 6423 case AIC_OP_JE: 6424 case AIC_OP_JZ: 6425 { 6426 struct patch *cur_patch; 6427 int address_offset; 6428 u_int address; 6429 u_int skip_addr; 6430 u_int i; 6431 6432 fmt3_ins = &instr.format3; 6433 address_offset = 0; 6434 address = fmt3_ins->address; 6435 cur_patch = patches; 6436 skip_addr = 0; 6437 6438 for (i = 0; i < address;) { 6439 6440 ahc_check_patch(ahc, &cur_patch, i, &skip_addr); 6441 6442 if (skip_addr > i) { 6443 int end_addr; 6444 6445 end_addr = MIN(address, skip_addr); 6446 address_offset += end_addr - i; 6447 i = skip_addr; 6448 } else { 6449 i++; 6450 } 6451 } 6452 address -= address_offset; 6453 fmt3_ins->address = address; 6454 /* FALLTHROUGH */ 6455 } 6456 case AIC_OP_OR: 6457 case AIC_OP_AND: 6458 case AIC_OP_XOR: 6459 case AIC_OP_ADD: 6460 case AIC_OP_ADC: 6461 case AIC_OP_BMOV: 6462 if (fmt1_ins->parity != 0) { 6463 fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; 6464 } 6465 fmt1_ins->parity = 0; 6466 if ((ahc->features & AHC_CMD_CHAN) == 0 6467 && opcode == AIC_OP_BMOV) { 6468 /* 6469 * Block move was added at the same time 6470 * as the command channel. Verify that 6471 * this is only a move of a single element 6472 * and convert the BMOV to a MOV 6473 * (AND with an immediate of FF). 6474 */ 6475 if (fmt1_ins->immediate != 1) 6476 panic("%s: BMOV not supported\n", 6477 ahc_name(ahc)); 6478 fmt1_ins->opcode = AIC_OP_AND; 6479 fmt1_ins->immediate = 0xff; 6480 } 6481 /* FALLTHROUGH */ 6482 case AIC_OP_ROL: 6483 if ((ahc->features & AHC_ULTRA2) != 0) { 6484 int i, count; 6485 6486 /* Calculate odd parity for the instruction */ 6487 for (i = 0, count = 0; i < 31; i++) { 6488 uint32_t mask; 6489 6490 mask = 0x01 << i; 6491 if ((instr.integer & mask) != 0) 6492 count++; 6493 } 6494 if ((count & 0x01) == 0) 6495 instr.format1.parity = 1; 6496 } else { 6497 /* Compress the instruction for older sequencers */ 6498 if (fmt3_ins != NULL) { 6499 instr.integer = 6500 fmt3_ins->immediate 6501 | (fmt3_ins->source << 8) 6502 | (fmt3_ins->address << 16) 6503 | (fmt3_ins->opcode << 25); 6504 } else { 6505 instr.integer = 6506 fmt1_ins->immediate 6507 | (fmt1_ins->source << 8) 6508 | (fmt1_ins->destination << 16) 6509 | (fmt1_ins->ret << 24) 6510 | (fmt1_ins->opcode << 25); 6511 } 6512 } 6513 /* The sequencer is a little endian cpu */ 6514 instr.integer = ahc_htole32(instr.integer); 6515 ahc_outsb(ahc, SEQRAM, instr.bytes, 4); 6516 break; 6517 default: 6518 panic("Unknown opcode encountered in seq program"); 6519 break; 6520 } 6521 } 6522 6523 int 6524 ahc_print_register(ahc_reg_parse_entry_t *table, u_int num_entries, 6525 const char *name, u_int address, u_int value, 6526 u_int *cur_column, u_int wrap_point) 6527 { 6528 int printed; 6529 u_int printed_mask; 6530 6531 if (cur_column != NULL && *cur_column >= wrap_point) { 6532 printf("\n"); 6533 *cur_column = 0; 6534 } 6535 printed = printf("%s[0x%x]", name, value); 6536 if (table == NULL) { 6537 printed += printf(" "); 6538 *cur_column += printed; 6539 return (printed); 6540 } 6541 printed_mask = 0; 6542 while (printed_mask != 0xFF) { 6543 int entry; 6544 6545 for (entry = 0; entry < num_entries; entry++) { 6546 if (((value & table[entry].mask) 6547 != table[entry].value) 6548 || ((printed_mask & table[entry].mask) 6549 == table[entry].mask)) 6550 continue; 6551 6552 printed += printf("%s%s", 6553 printed_mask == 0 ? ":(" : "|", 6554 table[entry].name); 6555 printed_mask |= table[entry].mask; 6556 6557 break; 6558 } 6559 if (entry >= num_entries) 6560 break; 6561 } 6562 if (printed_mask != 0) 6563 printed += printf(") "); 6564 else 6565 printed += printf(" "); 6566 if (cur_column != NULL) 6567 *cur_column += printed; 6568 return (printed); 6569 } 6570 6571 void 6572 ahc_dump_card_state(struct ahc_softc *ahc) 6573 { 6574 struct scb *scb; 6575 struct scb_tailq *untagged_q; 6576 u_int cur_col; 6577 int paused; 6578 int target; 6579 int maxtarget; 6580 int i; 6581 uint8_t last_phase; 6582 uint8_t qinpos; 6583 uint8_t qintail; 6584 uint8_t qoutpos; 6585 uint8_t scb_index; 6586 uint8_t saved_scbptr; 6587 6588 if (ahc_is_paused(ahc)) { 6589 paused = 1; 6590 } else { 6591 paused = 0; 6592 ahc_pause(ahc); 6593 } 6594 6595 saved_scbptr = ahc_inb(ahc, SCBPTR); 6596 last_phase = ahc_inb(ahc, LASTPHASE); 6597 printf(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n" 6598 "%s: Dumping Card State %s, at SEQADDR 0x%x\n", 6599 ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg, 6600 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); 6601 if (paused) 6602 printf("Card was paused\n"); 6603 printf("ACCUM = 0x%x, SINDEX = 0x%x, DINDEX = 0x%x, ARG_2 = 0x%x\n", 6604 ahc_inb(ahc, ACCUM), ahc_inb(ahc, SINDEX), ahc_inb(ahc, DINDEX), 6605 ahc_inb(ahc, ARG_2)); 6606 printf("HCNT = 0x%x SCBPTR = 0x%x\n", ahc_inb(ahc, HCNT), 6607 ahc_inb(ahc, SCBPTR)); 6608 cur_col = 0; 6609 if ((ahc->features & AHC_DT) != 0) 6610 ahc_scsiphase_print(ahc_inb(ahc, SCSIPHASE), &cur_col, 50); 6611 ahc_scsisigi_print(ahc_inb(ahc, SCSISIGI), &cur_col, 50); 6612 ahc_error_print(ahc_inb(ahc, ERROR), &cur_col, 50); 6613 ahc_scsibusl_print(ahc_inb(ahc, SCSIBUSL), &cur_col, 50); 6614 ahc_lastphase_print(ahc_inb(ahc, LASTPHASE), &cur_col, 50); 6615 ahc_scsiseq_print(ahc_inb(ahc, SCSISEQ), &cur_col, 50); 6616 ahc_sblkctl_print(ahc_inb(ahc, SBLKCTL), &cur_col, 50); 6617 ahc_scsirate_print(ahc_inb(ahc, SCSIRATE), &cur_col, 50); 6618 ahc_seqctl_print(ahc_inb(ahc, SEQCTL), &cur_col, 50); 6619 ahc_seq_flags_print(ahc_inb(ahc, SEQ_FLAGS), &cur_col, 50); 6620 ahc_sstat0_print(ahc_inb(ahc, SSTAT0), &cur_col, 50); 6621 ahc_sstat1_print(ahc_inb(ahc, SSTAT1), &cur_col, 50); 6622 ahc_sstat2_print(ahc_inb(ahc, SSTAT2), &cur_col, 50); 6623 ahc_sstat3_print(ahc_inb(ahc, SSTAT3), &cur_col, 50); 6624 ahc_simode0_print(ahc_inb(ahc, SIMODE0), &cur_col, 50); 6625 ahc_simode1_print(ahc_inb(ahc, SIMODE1), &cur_col, 50); 6626 ahc_sxfrctl0_print(ahc_inb(ahc, SXFRCTL0), &cur_col, 50); 6627 ahc_dfcntrl_print(ahc_inb(ahc, DFCNTRL), &cur_col, 50); 6628 ahc_dfstatus_print(ahc_inb(ahc, DFSTATUS), &cur_col, 50); 6629 if (cur_col != 0) 6630 printf("\n"); 6631 printf("STACK:"); 6632 for (i = 0; i < STACK_SIZE; i++) 6633 printf(" 0x%x", ahc_inb(ahc, STACK)|(ahc_inb(ahc, STACK) << 8)); 6634 printf("\nSCB count = %d\n", ahc->scb_data->numscbs); 6635 printf("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag); 6636 printf("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB)); 6637 /* QINFIFO */ 6638 printf("QINFIFO entries: "); 6639 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 6640 qinpos = ahc_inb(ahc, SNSCB_QOFF); 6641 ahc_outb(ahc, SNSCB_QOFF, qinpos); 6642 } else 6643 qinpos = ahc_inb(ahc, QINPOS); 6644 qintail = ahc->qinfifonext; 6645 while (qinpos != qintail) { 6646 printf("%d ", ahc->qinfifo[qinpos]); 6647 qinpos++; 6648 } 6649 printf("\n"); 6650 6651 printf("Waiting Queue entries: "); 6652 scb_index = ahc_inb(ahc, WAITING_SCBH); 6653 i = 0; 6654 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6655 ahc_outb(ahc, SCBPTR, scb_index); 6656 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); 6657 scb_index = ahc_inb(ahc, SCB_NEXT); 6658 } 6659 printf("\n"); 6660 6661 printf("Disconnected Queue entries: "); 6662 scb_index = ahc_inb(ahc, DISCONNECTED_SCBH); 6663 i = 0; 6664 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6665 ahc_outb(ahc, SCBPTR, scb_index); 6666 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); 6667 scb_index = ahc_inb(ahc, SCB_NEXT); 6668 } 6669 printf("\n"); 6670 6671 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); 6672 printf("QOUTFIFO entries: "); 6673 qoutpos = ahc->qoutfifonext; 6674 i = 0; 6675 while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) { 6676 printf("%d ", ahc->qoutfifo[qoutpos]); 6677 qoutpos++; 6678 } 6679 printf("\n"); 6680 6681 printf("Sequencer Free SCB List: "); 6682 scb_index = ahc_inb(ahc, FREE_SCBH); 6683 i = 0; 6684 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6685 ahc_outb(ahc, SCBPTR, scb_index); 6686 printf("%d ", scb_index); 6687 scb_index = ahc_inb(ahc, SCB_NEXT); 6688 } 6689 printf("\n"); 6690 6691 printf("Sequencer SCB Info: "); 6692 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 6693 ahc_outb(ahc, SCBPTR, i); 6694 cur_col = printf("\n%3d ", i); 6695 6696 ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60); 6697 ahc_scb_scsiid_print(ahc_inb(ahc, SCB_SCSIID), &cur_col, 60); 6698 ahc_scb_lun_print(ahc_inb(ahc, SCB_LUN), &cur_col, 60); 6699 ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); 6700 } 6701 printf("\n"); 6702 6703 printf("Pending list: "); 6704 i = 0; 6705 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { 6706 if (i++ > 256) 6707 break; 6708 cur_col = printf("\n%3d ", scb->hscb->tag); 6709 ahc_scb_control_print(scb->hscb->control, &cur_col, 60); 6710 ahc_scb_scsiid_print(scb->hscb->scsiid, &cur_col, 60); 6711 ahc_scb_lun_print(scb->hscb->lun, &cur_col, 60); 6712 if ((ahc->flags & AHC_PAGESCBS) == 0) { 6713 ahc_outb(ahc, SCBPTR, scb->hscb->tag); 6714 printf("("); 6715 ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), 6716 &cur_col, 60); 6717 ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); 6718 printf(")"); 6719 } 6720 } 6721 printf("\n"); 6722 6723 printf("Kernel Free SCB list: "); 6724 i = 0; 6725 SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) { 6726 if (i++ > 256) 6727 break; 6728 printf("%d ", scb->hscb->tag); 6729 } 6730 printf("\n"); 6731 6732 maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7; 6733 for (target = 0; target <= maxtarget; target++) { 6734 untagged_q = &ahc->untagged_queues[target]; 6735 if (TAILQ_FIRST(untagged_q) == NULL) 6736 continue; 6737 printf("Untagged Q(%d): ", target); 6738 i = 0; 6739 TAILQ_FOREACH(scb, untagged_q, links.tqe) { 6740 if (i++ > 256) 6741 break; 6742 printf("%d ", scb->hscb->tag); 6743 } 6744 printf("\n"); 6745 } 6746 6747 ahc_platform_dump_card_state(ahc); 6748 printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); 6749 ahc_outb(ahc, SCBPTR, saved_scbptr); 6750 if (paused == 0) 6751 ahc_unpause(ahc); 6752 } 6753 6754 /************************* Target Mode ****************************************/ 6755 #ifdef AHC_TARGET_MODE 6756 cam_status 6757 ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb, 6758 struct ahc_tmode_tstate **tstate, 6759 struct ahc_tmode_lstate **lstate, 6760 int notfound_failure) 6761 { 6762 6763 if ((ahc->features & AHC_TARGETMODE) == 0) 6764 return (CAM_REQ_INVALID); 6765 6766 /* 6767 * Handle the 'black hole' device that sucks up 6768 * requests to unattached luns on enabled targets. 6769 */ 6770 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD 6771 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 6772 *tstate = NULL; 6773 *lstate = ahc->black_hole; 6774 } else { 6775 u_int max_id; 6776 6777 max_id = (ahc->features & AHC_WIDE) ? 15 : 7; 6778 if (ccb->ccb_h.target_id > max_id) 6779 return (CAM_TID_INVALID); 6780 6781 if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS) 6782 return (CAM_LUN_INVALID); 6783 6784 *tstate = ahc->enabled_targets[ccb->ccb_h.target_id]; 6785 *lstate = NULL; 6786 if (*tstate != NULL) 6787 *lstate = 6788 (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; 6789 } 6790 6791 if (notfound_failure != 0 && *lstate == NULL) 6792 return (CAM_PATH_INVALID); 6793 6794 return (CAM_REQ_CMP); 6795 } 6796 6797 void 6798 ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) 6799 { 6800 struct ahc_tmode_tstate *tstate; 6801 struct ahc_tmode_lstate *lstate; 6802 struct ccb_en_lun *cel; 6803 cam_status status; 6804 u_long s; 6805 u_int target; 6806 u_int lun; 6807 u_int target_mask; 6808 u_int our_id; 6809 int error; 6810 char channel; 6811 6812 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate, 6813 /*notfound_failure*/FALSE); 6814 6815 if (status != CAM_REQ_CMP) { 6816 ccb->ccb_h.status = status; 6817 return; 6818 } 6819 6820 if (cam_sim_bus(sim) == 0) 6821 our_id = ahc->our_id; 6822 else 6823 our_id = ahc->our_id_b; 6824 6825 if (ccb->ccb_h.target_id != our_id) { 6826 /* 6827 * our_id represents our initiator ID, or 6828 * the ID of the first target to have an 6829 * enabled lun in target mode. There are 6830 * two cases that may preclude enabling a 6831 * target id other than our_id. 6832 * 6833 * o our_id is for an active initiator role. 6834 * Since the hardware does not support 6835 * reselections to the initiator role at 6836 * anything other than our_id, and our_id 6837 * is used by the hardware to indicate the 6838 * ID to use for both select-out and 6839 * reselect-out operations, the only target 6840 * ID we can support in this mode is our_id. 6841 * 6842 * o The MULTARGID feature is not available and 6843 * a previous target mode ID has been enabled. 6844 */ 6845 if ((ahc->features & AHC_MULTIROLE) != 0) { 6846 6847 if ((ahc->features & AHC_MULTI_TID) != 0 6848 && (ahc->flags & AHC_INITIATORROLE) != 0) { 6849 /* 6850 * Only allow additional targets if 6851 * the initiator role is disabled. 6852 * The hardware cannot handle a re-select-in 6853 * on the initiator id during a re-select-out 6854 * on a different target id. 6855 */ 6856 status = CAM_TID_INVALID; 6857 } else if ((ahc->flags & AHC_INITIATORROLE) != 0 6858 || ahc->enabled_luns > 0) { 6859 /* 6860 * Only allow our target id to change 6861 * if the initiator role is not configured 6862 * and there are no enabled luns which 6863 * are attached to the currently registered 6864 * scsi id. 6865 */ 6866 status = CAM_TID_INVALID; 6867 } 6868 } else if ((ahc->features & AHC_MULTI_TID) == 0 6869 && ahc->enabled_luns > 0) { 6870 6871 status = CAM_TID_INVALID; 6872 } 6873 } 6874 6875 if (status != CAM_REQ_CMP) { 6876 ccb->ccb_h.status = status; 6877 return; 6878 } 6879 6880 /* 6881 * We now have an id that is valid. 6882 * If we aren't in target mode, switch modes. 6883 */ 6884 if ((ahc->flags & AHC_TARGETROLE) == 0 6885 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 6886 u_long s; 6887 ahc_flag saved_flags; 6888 6889 printf("Configuring Target Mode\n"); 6890 ahc_lock(ahc, &s); 6891 if (LIST_FIRST(&ahc->pending_scbs) != NULL) { 6892 ccb->ccb_h.status = CAM_BUSY; 6893 ahc_unlock(ahc, &s); 6894 return; 6895 } 6896 saved_flags = ahc->flags; 6897 ahc->flags |= AHC_TARGETROLE; 6898 if ((ahc->features & AHC_MULTIROLE) == 0) 6899 ahc->flags &= ~AHC_INITIATORROLE; 6900 ahc_pause(ahc); 6901 error = ahc_loadseq(ahc); 6902 if (error != 0) { 6903 /* 6904 * Restore original configuration and notify 6905 * the caller that we cannot support target mode. 6906 * Since the adapter started out in this 6907 * configuration, the firmware load will succeed, 6908 * so there is no point in checking ahc_loadseq's 6909 * return value. 6910 */ 6911 ahc->flags = saved_flags; 6912 (void)ahc_loadseq(ahc); 6913 ahc_restart(ahc); 6914 ahc_unlock(ahc, &s); 6915 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 6916 return; 6917 } 6918 ahc_restart(ahc); 6919 ahc_unlock(ahc, &s); 6920 } 6921 cel = &ccb->cel; 6922 target = ccb->ccb_h.target_id; 6923 lun = ccb->ccb_h.target_lun; 6924 channel = SIM_CHANNEL(ahc, sim); 6925 target_mask = 0x01 << target; 6926 if (channel == 'B') 6927 target_mask <<= 8; 6928 6929 if (cel->enable != 0) { 6930 u_int scsiseq; 6931 6932 /* Are we already enabled?? */ 6933 if (lstate != NULL) { 6934 xpt_print_path(ccb->ccb_h.path); 6935 printf("Lun already enabled\n"); 6936 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 6937 return; 6938 } 6939 6940 if (cel->grp6_len != 0 6941 || cel->grp7_len != 0) { 6942 /* 6943 * Don't (yet?) support vendor 6944 * specific commands. 6945 */ 6946 ccb->ccb_h.status = CAM_REQ_INVALID; 6947 printf("Non-zero Group Codes\n"); 6948 return; 6949 } 6950 6951 /* 6952 * Seems to be okay. 6953 * Setup our data structures. 6954 */ 6955 if (target != CAM_TARGET_WILDCARD && tstate == NULL) { 6956 tstate = ahc_alloc_tstate(ahc, target, channel); 6957 if (tstate == NULL) { 6958 xpt_print_path(ccb->ccb_h.path); 6959 printf("Couldn't allocate tstate\n"); 6960 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 6961 return; 6962 } 6963 } 6964 lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT); 6965 if (lstate == NULL) { 6966 xpt_print_path(ccb->ccb_h.path); 6967 printf("Couldn't allocate lstate\n"); 6968 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 6969 return; 6970 } 6971 memset(lstate, 0, sizeof(*lstate)); 6972 status = xpt_create_path(&lstate->path, /*periph*/NULL, 6973 xpt_path_path_id(ccb->ccb_h.path), 6974 xpt_path_target_id(ccb->ccb_h.path), 6975 xpt_path_lun_id(ccb->ccb_h.path)); 6976 if (status != CAM_REQ_CMP) { 6977 free(lstate, M_DEVBUF); 6978 xpt_print_path(ccb->ccb_h.path); 6979 printf("Couldn't allocate path\n"); 6980 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 6981 return; 6982 } 6983 SLIST_INIT(&lstate->accept_tios); 6984 SLIST_INIT(&lstate->immed_notifies); 6985 ahc_lock(ahc, &s); 6986 ahc_pause(ahc); 6987 if (target != CAM_TARGET_WILDCARD) { 6988 tstate->enabled_luns[lun] = lstate; 6989 ahc->enabled_luns++; 6990 6991 if ((ahc->features & AHC_MULTI_TID) != 0) { 6992 u_int targid_mask; 6993 6994 targid_mask = ahc_inb(ahc, TARGID) 6995 | (ahc_inb(ahc, TARGID + 1) << 8); 6996 6997 targid_mask |= target_mask; 6998 ahc_outb(ahc, TARGID, targid_mask); 6999 ahc_outb(ahc, TARGID+1, (targid_mask >> 8)); 7000 7001 ahc_update_scsiid(ahc, targid_mask); 7002 } else { 7003 u_int our_id; 7004 char channel; 7005 7006 channel = SIM_CHANNEL(ahc, sim); 7007 our_id = SIM_SCSI_ID(ahc, sim); 7008 7009 /* 7010 * This can only happen if selections 7011 * are not enabled 7012 */ 7013 if (target != our_id) { 7014 u_int sblkctl; 7015 char cur_channel; 7016 int swap; 7017 7018 sblkctl = ahc_inb(ahc, SBLKCTL); 7019 cur_channel = (sblkctl & SELBUSB) 7020 ? 'B' : 'A'; 7021 if ((ahc->features & AHC_TWIN) == 0) 7022 cur_channel = 'A'; 7023 swap = cur_channel != channel; 7024 if (channel == 'A') 7025 ahc->our_id = target; 7026 else 7027 ahc->our_id_b = target; 7028 7029 if (swap) 7030 ahc_outb(ahc, SBLKCTL, 7031 sblkctl ^ SELBUSB); 7032 7033 ahc_outb(ahc, SCSIID, target); 7034 7035 if (swap) 7036 ahc_outb(ahc, SBLKCTL, sblkctl); 7037 } 7038 } 7039 } else 7040 ahc->black_hole = lstate; 7041 /* Allow select-in operations */ 7042 if (ahc->black_hole != NULL && ahc->enabled_luns > 0) { 7043 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 7044 scsiseq |= ENSELI; 7045 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 7046 scsiseq = ahc_inb(ahc, SCSISEQ); 7047 scsiseq |= ENSELI; 7048 ahc_outb(ahc, SCSISEQ, scsiseq); 7049 } 7050 ahc_unpause(ahc); 7051 ahc_unlock(ahc, &s); 7052 ccb->ccb_h.status = CAM_REQ_CMP; 7053 xpt_print_path(ccb->ccb_h.path); 7054 printf("Lun now enabled for target mode\n"); 7055 } else { 7056 struct scb *scb; 7057 int i, empty; 7058 7059 if (lstate == NULL) { 7060 ccb->ccb_h.status = CAM_LUN_INVALID; 7061 return; 7062 } 7063 7064 ahc_lock(ahc, &s); 7065 7066 ccb->ccb_h.status = CAM_REQ_CMP; 7067 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { 7068 struct ccb_hdr *ccbh; 7069 7070 ccbh = &scb->io_ctx->ccb_h; 7071 if (ccbh->func_code == XPT_CONT_TARGET_IO 7072 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ 7073 printf("CTIO pending\n"); 7074 ccb->ccb_h.status = CAM_REQ_INVALID; 7075 ahc_unlock(ahc, &s); 7076 return; 7077 } 7078 } 7079 7080 if (SLIST_FIRST(&lstate->accept_tios) != NULL) { 7081 printf("ATIOs pending\n"); 7082 ccb->ccb_h.status = CAM_REQ_INVALID; 7083 } 7084 7085 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { 7086 printf("INOTs pending\n"); 7087 ccb->ccb_h.status = CAM_REQ_INVALID; 7088 } 7089 7090 if (ccb->ccb_h.status != CAM_REQ_CMP) { 7091 ahc_unlock(ahc, &s); 7092 return; 7093 } 7094 7095 xpt_print_path(ccb->ccb_h.path); 7096 printf("Target mode disabled\n"); 7097 xpt_free_path(lstate->path); 7098 free(lstate, M_DEVBUF); 7099 7100 ahc_pause(ahc); 7101 /* Can we clean up the target too? */ 7102 if (target != CAM_TARGET_WILDCARD) { 7103 tstate->enabled_luns[lun] = NULL; 7104 ahc->enabled_luns--; 7105 for (empty = 1, i = 0; i < 8; i++) 7106 if (tstate->enabled_luns[i] != NULL) { 7107 empty = 0; 7108 break; 7109 } 7110 7111 if (empty) { 7112 ahc_free_tstate(ahc, target, channel, 7113 /*force*/FALSE); 7114 if (ahc->features & AHC_MULTI_TID) { 7115 u_int targid_mask; 7116 7117 targid_mask = ahc_inb(ahc, TARGID) 7118 | (ahc_inb(ahc, TARGID + 1) 7119 << 8); 7120 7121 targid_mask &= ~target_mask; 7122 ahc_outb(ahc, TARGID, targid_mask); 7123 ahc_outb(ahc, TARGID+1, 7124 (targid_mask >> 8)); 7125 ahc_update_scsiid(ahc, targid_mask); 7126 } 7127 } 7128 } else { 7129 7130 ahc->black_hole = NULL; 7131 7132 /* 7133 * We can't allow selections without 7134 * our black hole device. 7135 */ 7136 empty = TRUE; 7137 } 7138 if (ahc->enabled_luns == 0) { 7139 /* Disallow select-in */ 7140 u_int scsiseq; 7141 7142 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 7143 scsiseq &= ~ENSELI; 7144 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 7145 scsiseq = ahc_inb(ahc, SCSISEQ); 7146 scsiseq &= ~ENSELI; 7147 ahc_outb(ahc, SCSISEQ, scsiseq); 7148 7149 if ((ahc->features & AHC_MULTIROLE) == 0) { 7150 printf("Configuring Initiator Mode\n"); 7151 ahc->flags &= ~AHC_TARGETROLE; 7152 ahc->flags |= AHC_INITIATORROLE; 7153 /* 7154 * Returning to a configuration that 7155 * fit previously will always succeed. 7156 */ 7157 (void)ahc_loadseq(ahc); 7158 ahc_restart(ahc); 7159 /* 7160 * Unpaused. The extra unpause 7161 * that follows is harmless. 7162 */ 7163 } 7164 } 7165 ahc_unpause(ahc); 7166 ahc_unlock(ahc, &s); 7167 } 7168 } 7169 7170 static void 7171 ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask) 7172 { 7173 u_int scsiid_mask; 7174 u_int scsiid; 7175 7176 if ((ahc->features & AHC_MULTI_TID) == 0) 7177 panic("ahc_update_scsiid called on non-multitid unit\n"); 7178 7179 /* 7180 * Since we will rely on the TARGID mask 7181 * for selection enables, ensure that OID 7182 * in SCSIID is not set to some other ID 7183 * that we don't want to allow selections on. 7184 */ 7185 if ((ahc->features & AHC_ULTRA2) != 0) 7186 scsiid = ahc_inb(ahc, SCSIID_ULTRA2); 7187 else 7188 scsiid = ahc_inb(ahc, SCSIID); 7189 scsiid_mask = 0x1 << (scsiid & OID); 7190 if ((targid_mask & scsiid_mask) == 0) { 7191 u_int our_id; 7192 7193 /* ffs counts from 1 */ 7194 our_id = ffs(targid_mask); 7195 if (our_id == 0) 7196 our_id = ahc->our_id; 7197 else 7198 our_id--; 7199 scsiid &= TID; 7200 scsiid |= our_id; 7201 } 7202 if ((ahc->features & AHC_ULTRA2) != 0) 7203 ahc_outb(ahc, SCSIID_ULTRA2, scsiid); 7204 else 7205 ahc_outb(ahc, SCSIID, scsiid); 7206 } 7207 7208 void 7209 ahc_run_tqinfifo(struct ahc_softc *ahc, int paused) 7210 { 7211 struct target_cmd *cmd; 7212 7213 /* 7214 * If the card supports auto-access pause, 7215 * we can access the card directly regardless 7216 * of whether it is paused or not. 7217 */ 7218 if ((ahc->features & AHC_AUTOPAUSE) != 0) 7219 paused = TRUE; 7220 7221 ahc_sync_tqinfifo(ahc, BUS_DMASYNC_POSTREAD); 7222 while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) { 7223 7224 /* 7225 * Only advance through the queue if we 7226 * have the resources to process the command. 7227 */ 7228 if (ahc_handle_target_cmd(ahc, cmd) != 0) 7229 break; 7230 7231 cmd->cmd_valid = 0; 7232 ahc_dmamap_sync(ahc, ahc->shared_data_dmat, 7233 ahc->shared_data_dmamap, 7234 ahc_targetcmd_offset(ahc, ahc->tqinfifonext), 7235 sizeof(struct target_cmd), 7236 BUS_DMASYNC_PREREAD); 7237 ahc->tqinfifonext++; 7238 7239 /* 7240 * Lazily update our position in the target mode incoming 7241 * command queue as seen by the sequencer. 7242 */ 7243 if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { 7244 if ((ahc->features & AHC_HS_MAILBOX) != 0) { 7245 u_int hs_mailbox; 7246 7247 hs_mailbox = ahc_inb(ahc, HS_MAILBOX); 7248 hs_mailbox &= ~HOST_TQINPOS; 7249 hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS; 7250 ahc_outb(ahc, HS_MAILBOX, hs_mailbox); 7251 } else { 7252 if (!paused) 7253 ahc_pause(ahc); 7254 ahc_outb(ahc, KERNEL_TQINPOS, 7255 ahc->tqinfifonext & HOST_TQINPOS); 7256 if (!paused) 7257 ahc_unpause(ahc); 7258 } 7259 } 7260 } 7261 } 7262 7263 static int 7264 ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd) 7265 { 7266 struct ahc_tmode_tstate *tstate; 7267 struct ahc_tmode_lstate *lstate; 7268 struct ccb_accept_tio *atio; 7269 uint8_t *byte; 7270 int initiator; 7271 int target; 7272 int lun; 7273 7274 initiator = SCSIID_TARGET(ahc, cmd->scsiid); 7275 target = SCSIID_OUR_ID(cmd->scsiid); 7276 lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); 7277 7278 byte = cmd->bytes; 7279 tstate = ahc->enabled_targets[target]; 7280 lstate = NULL; 7281 if (tstate != NULL) 7282 lstate = tstate->enabled_luns[lun]; 7283 7284 /* 7285 * Commands for disabled luns go to the black hole driver. 7286 */ 7287 if (lstate == NULL) 7288 lstate = ahc->black_hole; 7289 7290 atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); 7291 if (atio == NULL) { 7292 ahc->flags |= AHC_TQINFIFO_BLOCKED; 7293 /* 7294 * Wait for more ATIOs from the peripheral driver for this lun. 7295 */ 7296 if (bootverbose) 7297 printf("%s: ATIOs exhausted\n", ahc_name(ahc)); 7298 return (1); 7299 } else 7300 ahc->flags &= ~AHC_TQINFIFO_BLOCKED; 7301 #if 0 7302 printf("Incoming command from %d for %d:%d%s\n", 7303 initiator, target, lun, 7304 lstate == ahc->black_hole ? "(Black Holed)" : ""); 7305 #endif 7306 SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); 7307 7308 if (lstate == ahc->black_hole) { 7309 /* Fill in the wildcards */ 7310 atio->ccb_h.target_id = target; 7311 atio->ccb_h.target_lun = lun; 7312 } 7313 7314 /* 7315 * Package it up and send it off to 7316 * whomever has this lun enabled. 7317 */ 7318 atio->sense_len = 0; 7319 atio->init_id = initiator; 7320 if (byte[0] != 0xFF) { 7321 /* Tag was included */ 7322 atio->tag_action = *byte++; 7323 atio->tag_id = *byte++; 7324 atio->ccb_h.flags = CAM_TAG_ACTION_VALID; 7325 } else { 7326 atio->ccb_h.flags = 0; 7327 } 7328 byte++; 7329 7330 /* Okay. Now determine the cdb size based on the command code */ 7331 switch (*byte >> CMD_GROUP_CODE_SHIFT) { 7332 case 0: 7333 atio->cdb_len = 6; 7334 break; 7335 case 1: 7336 case 2: 7337 atio->cdb_len = 10; 7338 break; 7339 case 4: 7340 atio->cdb_len = 16; 7341 break; 7342 case 5: 7343 atio->cdb_len = 12; 7344 break; 7345 case 3: 7346 default: 7347 /* Only copy the opcode. */ 7348 atio->cdb_len = 1; 7349 printf("Reserved or VU command code type encountered\n"); 7350 break; 7351 } 7352 7353 memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); 7354 7355 atio->ccb_h.status |= CAM_CDB_RECVD; 7356 7357 if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { 7358 /* 7359 * We weren't allowed to disconnect. 7360 * We're hanging on the bus until a 7361 * continue target I/O comes in response 7362 * to this accept tio. 7363 */ 7364 #if 0 7365 printf("Received Immediate Command %d:%d:%d - %p\n", 7366 initiator, target, lun, ahc->pending_device); 7367 #endif 7368 ahc->pending_device = lstate; 7369 ahc_freeze_ccb((union ccb *)atio); 7370 atio->ccb_h.flags |= CAM_DIS_DISCONNECT; 7371 } 7372 xpt_done((union ccb*)atio); 7373 return (0); 7374 } 7375 7376 #endif 7377