1 /* 2 * Generic driver for the aic7xxx based adaptec SCSI controllers 3 * Product specific probe and attach routines can be found in: 4 * i386/eisa/ahc_eisa.c 27/284X and aic7770 motherboard controllers 5 * pci/ahc_pci.c 3985, 3980, 3940, 2940, aic7895, aic7890, 6 * aic7880, aic7870, aic7860, and aic7850 controllers 7 * 8 * Copyright (c) 1994, 1995, 1996, 1997, 1998, 1999, 2000 Justin T. Gibbs. 9 * All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions, and the following disclaimer, 16 * without modification. 17 * 2. The name of the author may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * the GNU Public License ("GPL"). 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 27 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * $FreeBSD$ 36 */ 37 /* 38 * A few notes on features of the driver. 39 * 40 * SCB paging takes advantage of the fact that devices stay disconnected 41 * from the bus a relatively long time and that while they're disconnected, 42 * having the SCBs for these transactions down on the host adapter is of 43 * little use. Instead of leaving this idle SCB down on the card we copy 44 * it back up into kernel memory and reuse the SCB slot on the card to 45 * schedule another transaction. This can be a real payoff when doing random 46 * I/O to tagged queueing devices since there are more transactions active at 47 * once for the device to sort for optimal seek reduction. The algorithm goes 48 * like this... 49 * 50 * The sequencer maintains two lists of its hardware SCBs. The first is the 51 * singly linked free list which tracks all SCBs that are not currently in 52 * use. The second is the doubly linked disconnected list which holds the 53 * SCBs of transactions that are in the disconnected state sorted most 54 * recently disconnected first. When the kernel queues a transaction to 55 * the card, a hardware SCB to "house" this transaction is retrieved from 56 * either of these two lists. If the SCB came from the disconnected list, 57 * a check is made to see if any data transfer or SCB linking (more on linking 58 * in a bit) information has been changed since it was copied from the host 59 * and if so, DMAs the SCB back up before it can be used. Once a hardware 60 * SCB has been obtained, the SCB is DMAed from the host. Before any work 61 * can begin on this SCB, the sequencer must ensure that either the SCB is 62 * for a tagged transaction or the target is not already working on another 63 * non-tagged transaction. If a conflict arises in the non-tagged case, the 64 * sequencer finds the SCB for the active transactions and sets the SCB_LINKED 65 * field in that SCB to this next SCB to execute. To facilitate finding 66 * active non-tagged SCBs, the last four bytes of up to the first four hardware 67 * SCBs serve as a storage area for the currently active SCB ID for each 68 * target. 69 * 70 * When a device reconnects, a search is made of the hardware SCBs to find 71 * the SCB for this transaction. If the search fails, a hardware SCB is 72 * pulled from either the free or disconnected SCB list and the proper 73 * SCB is DMAed from the host. If the MK_MESSAGE control bit is set 74 * in the control byte of the SCB while it was disconnected, the sequencer 75 * will assert ATN and attempt to issue a message to the host. 76 * 77 * When a command completes, a check for non-zero status and residuals is 78 * made. If either of these conditions exists, the SCB is DMAed back up to 79 * the host so that it can interpret this information. Additionally, in the 80 * case of bad status, the sequencer generates a special interrupt and pauses 81 * itself. This allows the host to setup a request sense command if it 82 * chooses for this target synchronously with the error so that sense 83 * information isn't lost. 84 * 85 */ 86 87 #include <opt_aic7xxx.h> 88 89 #include <pci.h> 90 #include <stddef.h> /* For offsetof */ 91 92 #include <sys/param.h> 93 #include <sys/systm.h> 94 #include <sys/malloc.h> 95 #include <sys/eventhandler.h> 96 #include <sys/proc.h> 97 98 #include <cam/cam.h> 99 #include <cam/cam_ccb.h> 100 #include <cam/cam_sim.h> 101 #include <cam/cam_xpt_sim.h> 102 #include <cam/cam_debug.h> 103 104 #include <cam/scsi/scsi_all.h> 105 #include <cam/scsi/scsi_message.h> 106 107 #if NPCI > 0 108 #include <machine/bus_memio.h> 109 #endif 110 #include <machine/bus_pio.h> 111 #include <machine/bus.h> 112 #include <machine/clock.h> 113 #include <sys/rman.h> 114 115 #include <vm/vm.h> 116 #include <vm/vm_param.h> 117 #include <vm/pmap.h> 118 119 #include <dev/aic7xxx/aic7xxx.h> 120 #include <dev/aic7xxx/sequencer.h> 121 122 #include <aic7xxx_reg.h> 123 #include <aic7xxx_seq.h> 124 125 #include <sys/kernel.h> 126 127 #ifndef AHC_TMODE_ENABLE 128 #define AHC_TMODE_ENABLE 0 129 #endif 130 131 #define MAX(a,b) (((a) > (b)) ? (a) : (b)) 132 #define MIN(a,b) (((a) < (b)) ? (a) : (b)) 133 #define ALL_CHANNELS '\0' 134 #define ALL_TARGETS_MASK 0xFFFF 135 #define INITIATOR_WILDCARD (~0) 136 137 #define SIM_IS_SCSIBUS_B(ahc, sim) \ 138 ((sim) == ahc->sim_b) 139 #define SIM_CHANNEL(ahc, sim) \ 140 (((sim) == ahc->sim_b) ? 'B' : 'A') 141 #define SIM_SCSI_ID(ahc, sim) \ 142 (((sim) == ahc->sim_b) ? ahc->our_id_b : ahc->our_id) 143 #define SIM_PATH(ahc, sim) \ 144 (((sim) == ahc->sim_b) ? ahc->path_b : ahc->path) 145 #define SCB_IS_SCSIBUS_B(scb) \ 146 (((scb)->hscb->tcl & SELBUSB) != 0) 147 #define SCB_TARGET(scb) \ 148 (((scb)->hscb->tcl & TID) >> 4) 149 #define SCB_CHANNEL(scb) \ 150 (SCB_IS_SCSIBUS_B(scb) ? 'B' : 'A') 151 #define SCB_LUN(scb) \ 152 ((scb)->hscb->tcl & LID) 153 #define SCB_TARGET_OFFSET(scb) \ 154 (SCB_TARGET(scb) + (SCB_IS_SCSIBUS_B(scb) ? 8 : 0)) 155 #define SCB_TARGET_MASK(scb) \ 156 (0x01 << (SCB_TARGET_OFFSET(scb))) 157 #define TCL_CHANNEL(ahc, tcl) \ 158 ((((ahc)->features & AHC_TWIN) && ((tcl) & SELBUSB)) ? 'B' : 'A') 159 #define TCL_SCSI_ID(ahc, tcl) \ 160 (TCL_CHANNEL((ahc), (tcl)) == 'B' ? (ahc)->our_id_b : (ahc)->our_id) 161 #define TCL_TARGET(tcl) (((tcl) & TID) >> TCL_TARGET_SHIFT) 162 #define TCL_LUN(tcl) ((tcl) & LID) 163 164 #define ccb_scb_ptr spriv_ptr0 165 #define ccb_ahc_ptr spriv_ptr1 166 167 char *ahc_chip_names[] = 168 { 169 "NONE", 170 "aic7770", 171 "aic7850", 172 "aic7855", 173 "aic7859", 174 "aic7860", 175 "aic7870", 176 "aic7880", 177 "aic7890/91", 178 "aic7892", 179 "aic7895", 180 "aic7896/97", 181 "aic7899" 182 }; 183 184 typedef enum { 185 ROLE_UNKNOWN, 186 ROLE_INITIATOR, 187 ROLE_TARGET 188 } role_t; 189 190 struct ahc_devinfo { 191 int our_scsiid; 192 int target_offset; 193 u_int16_t target_mask; 194 u_int8_t target; 195 u_int8_t lun; 196 char channel; 197 role_t role; /* 198 * Only guaranteed to be correct if not 199 * in the busfree state. 200 */ 201 }; 202 203 typedef enum { 204 SEARCH_COMPLETE, 205 SEARCH_COUNT, 206 SEARCH_REMOVE 207 } ahc_search_action; 208 209 #ifdef AHC_DEBUG 210 static int ahc_debug = AHC_DEBUG; 211 #endif 212 213 #if NPCI > 0 214 void ahc_pci_intr(struct ahc_softc *ahc); 215 #endif 216 217 static int ahcinitscbdata(struct ahc_softc *ahc); 218 static void ahcfiniscbdata(struct ahc_softc *ahc); 219 220 static bus_dmamap_callback_t ahcdmamapcb; 221 222 #if UNUSED 223 static void ahc_dump_targcmd(struct target_cmd *cmd); 224 #endif 225 static void ahc_shutdown(void *arg, int howto); 226 static cam_status 227 ahc_find_tmode_devs(struct ahc_softc *ahc, 228 struct cam_sim *sim, union ccb *ccb, 229 struct tmode_tstate **tstate, 230 struct tmode_lstate **lstate, 231 int notfound_failure); 232 static void ahc_action(struct cam_sim *sim, union ccb *ccb); 233 static void ahc_async(void *callback_arg, u_int32_t code, 234 struct cam_path *path, void *arg); 235 static void ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, 236 int nsegments, int error); 237 static void ahc_poll(struct cam_sim *sim); 238 static void ahc_setup_data(struct ahc_softc *ahc, 239 struct ccb_scsiio *csio, struct scb *scb); 240 static void ahc_freeze_devq(struct ahc_softc *ahc, struct cam_path *path); 241 static void ahcallocscbs(struct ahc_softc *ahc); 242 #if UNUSED 243 static void ahc_scb_devinfo(struct ahc_softc *ahc, 244 struct ahc_devinfo *devinfo, 245 struct scb *scb); 246 #endif 247 static void ahc_fetch_devinfo(struct ahc_softc *ahc, 248 struct ahc_devinfo *devinfo); 249 static void ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, 250 u_int target, u_int lun, char channel, 251 role_t role); 252 static u_int ahc_abort_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev); 253 static void ahc_done(struct ahc_softc *ahc, struct scb *scbp); 254 static struct tmode_tstate * 255 ahc_alloc_tstate(struct ahc_softc *ahc, 256 u_int scsi_id, char channel); 257 static void ahc_free_tstate(struct ahc_softc *ahc, 258 u_int scsi_id, char channel, int force); 259 static void ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, 260 union ccb *ccb); 261 static void ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask); 262 static int ahc_handle_target_cmd(struct ahc_softc *ahc, 263 struct target_cmd *cmd); 264 static void ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat); 265 static void ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat); 266 static void ahc_build_transfer_msg(struct ahc_softc *ahc, 267 struct ahc_devinfo *devinfo); 268 static void ahc_setup_initiator_msgout(struct ahc_softc *ahc, 269 struct ahc_devinfo *devinfo, 270 struct scb *scb); 271 static void ahc_setup_target_msgin(struct ahc_softc *ahc, 272 struct ahc_devinfo *devinfo); 273 static int ahc_handle_msg_reject(struct ahc_softc *ahc, 274 struct ahc_devinfo *devinfo); 275 static void ahc_clear_msg_state(struct ahc_softc *ahc); 276 static void ahc_handle_message_phase(struct ahc_softc *ahc, 277 struct cam_path *path); 278 static int ahc_sent_msg(struct ahc_softc *ahc, u_int msgtype, int full); 279 typedef enum { 280 MSGLOOP_IN_PROG, 281 MSGLOOP_MSGCOMPLETE, 282 MSGLOOP_TERMINATED 283 } msg_loop_stat; 284 static int ahc_parse_msg(struct ahc_softc *ahc, struct cam_path *path, 285 struct ahc_devinfo *devinfo); 286 static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc, 287 struct ahc_devinfo *devinfo); 288 static void ahc_handle_devreset(struct ahc_softc *ahc, 289 struct ahc_devinfo *devinfo, 290 cam_status status, ac_code acode, 291 char *message, 292 int verbose_level); 293 #ifdef AHC_DUMP_SEQ 294 static void ahc_dumpseq(struct ahc_softc *ahc); 295 #endif 296 static void ahc_loadseq(struct ahc_softc *ahc); 297 static int ahc_check_patch(struct ahc_softc *ahc, 298 struct patch **start_patch, 299 int start_instr, int *skip_addr); 300 static void ahc_download_instr(struct ahc_softc *ahc, 301 int instrptr, u_int8_t *dconsts); 302 static int ahc_match_scb(struct scb *scb, int target, char channel, 303 int lun, u_int tag, role_t role); 304 #ifdef AHC_DEBUG 305 static void ahc_print_scb(struct scb *scb); 306 #endif 307 static int ahc_search_qinfifo(struct ahc_softc *ahc, int target, 308 char channel, int lun, u_int tag, 309 role_t role, u_int32_t status, 310 ahc_search_action action); 311 static void ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, 312 union ccb *ccb); 313 static int ahc_reset_channel(struct ahc_softc *ahc, char channel, 314 int initiate_reset); 315 static int ahc_abort_scbs(struct ahc_softc *ahc, int target, 316 char channel, int lun, u_int tag, role_t role, 317 u_int32_t status); 318 static int ahc_search_disc_list(struct ahc_softc *ahc, int target, 319 char channel, int lun, u_int tag, 320 int stop_on_first, int remove, 321 int save_state); 322 static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, 323 u_int prev, u_int scbptr); 324 static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc); 325 static void ahc_clear_intstat(struct ahc_softc *ahc); 326 static void ahc_reset_current_bus(struct ahc_softc *ahc); 327 static struct ahc_syncrate * 328 ahc_devlimited_syncrate(struct ahc_softc *ahc, u_int *period); 329 static struct ahc_syncrate * 330 ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, 331 u_int maxsync); 332 static u_int ahc_find_period(struct ahc_softc *ahc, u_int scsirate, 333 u_int maxsync); 334 static void ahc_validate_offset(struct ahc_softc *ahc, 335 struct ahc_syncrate *syncrate, 336 u_int *offset, int wide); 337 static void ahc_update_target_msg_request(struct ahc_softc *ahc, 338 struct ahc_devinfo *devinfo, 339 struct ahc_initiator_tinfo *tinfo, 340 int force, int paused); 341 static int ahc_create_path(struct ahc_softc *ahc, 342 struct ahc_devinfo *devinfo, 343 struct cam_path **path); 344 static void ahc_set_syncrate(struct ahc_softc *ahc, 345 struct ahc_devinfo *devinfo, 346 struct cam_path *path, 347 struct ahc_syncrate *syncrate, 348 u_int period, u_int offset, u_int type, 349 int paused); 350 static void ahc_set_width(struct ahc_softc *ahc, 351 struct ahc_devinfo *devinfo, 352 struct cam_path *path, u_int width, u_int type, 353 int paused); 354 static void ahc_set_tags(struct ahc_softc *ahc, 355 struct ahc_devinfo *devinfo, 356 int enable); 357 static void ahc_construct_sdtr(struct ahc_softc *ahc, 358 u_int period, u_int offset); 359 360 static void ahc_construct_wdtr(struct ahc_softc *ahc, u_int bus_width); 361 362 static void ahc_calc_residual(struct scb *scb); 363 364 static void ahc_update_pending_syncrates(struct ahc_softc *ahc); 365 366 static void ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb); 367 368 static timeout_t 369 ahc_timeout; 370 static void ahc_queue_lstate_event(struct ahc_softc *ahc, 371 struct tmode_lstate *lstate, 372 u_int initiator_id, u_int event_type, 373 u_int event_arg); 374 static void ahc_send_lstate_events(struct ahc_softc *ahc, 375 struct tmode_lstate *lstate); 376 static __inline int sequencer_paused(struct ahc_softc *ahc); 377 static __inline void pause_sequencer(struct ahc_softc *ahc); 378 static __inline void unpause_sequencer(struct ahc_softc *ahc); 379 static void restart_sequencer(struct ahc_softc *ahc); 380 static __inline u_int ahc_index_busy_tcl(struct ahc_softc *ahc, 381 u_int tcl, int unbusy); 382 383 static __inline void ahc_busy_tcl(struct ahc_softc *ahc, struct scb *scb); 384 385 static __inline void ahc_freeze_ccb(union ccb* ccb); 386 static __inline cam_status ahc_ccb_status(union ccb* ccb); 387 static __inline void ahcsetccbstatus(union ccb* ccb, 388 cam_status status); 389 static void ahc_run_tqinfifo(struct ahc_softc *ahc, int paused); 390 static void ahc_run_qoutfifo(struct ahc_softc *ahc); 391 392 static __inline struct ahc_initiator_tinfo * 393 ahc_fetch_transinfo(struct ahc_softc *ahc, 394 char channel, 395 u_int our_id, u_int target, 396 struct tmode_tstate **tstate); 397 static void ahcfreescb(struct ahc_softc *ahc, struct scb *scb); 398 static __inline struct scb *ahcgetscb(struct ahc_softc *ahc); 399 400 static __inline u_int32_t 401 ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index) 402 { 403 return (ahc->scb_data->hscb_busaddr 404 + (sizeof(struct hardware_scb) * index)); 405 } 406 407 #define AHC_BUSRESET_DELAY 25 /* Reset delay in us */ 408 409 static __inline int 410 sequencer_paused(struct ahc_softc *ahc) 411 { 412 return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0); 413 } 414 415 static __inline void 416 pause_sequencer(struct ahc_softc *ahc) 417 { 418 ahc_outb(ahc, HCNTRL, ahc->pause); 419 420 /* 421 * Since the sequencer can disable pausing in a critical section, we 422 * must loop until it actually stops. 423 */ 424 while (sequencer_paused(ahc) == 0) 425 ; 426 } 427 428 static __inline void 429 unpause_sequencer(struct ahc_softc *ahc) 430 { 431 if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0) 432 ahc_outb(ahc, HCNTRL, ahc->unpause); 433 } 434 435 /* 436 * Restart the sequencer program from address zero 437 */ 438 static void 439 restart_sequencer(struct ahc_softc *ahc) 440 { 441 u_int i; 442 443 pause_sequencer(ahc); 444 445 /* 446 * Everytime we restart the sequencer, there 447 * is the possiblitity that we have restarted 448 * within a three instruction window where an 449 * SCB has been marked free but has not made it 450 * onto the free list. Since SCSI events(bus reset, 451 * unexpected bus free) will always freeze the 452 * sequencer, we cannot close this window. To 453 * avoid losing an SCB, we reconsitute the free 454 * list every time we restart the sequencer. 455 */ 456 ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL); 457 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 458 459 ahc_outb(ahc, SCBPTR, i); 460 if (ahc_inb(ahc, SCB_TAG) == SCB_LIST_NULL) 461 ahc_add_curscb_to_free_list(ahc); 462 } 463 ahc_outb(ahc, SEQCTL, FASTMODE|SEQRESET); 464 unpause_sequencer(ahc); 465 } 466 467 static __inline u_int 468 ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl, int unbusy) 469 { 470 u_int scbid; 471 472 scbid = ahc->untagged_scbs[tcl]; 473 if (unbusy) 474 ahc->untagged_scbs[tcl] = SCB_LIST_NULL; 475 476 return (scbid); 477 } 478 479 static __inline void 480 ahc_busy_tcl(struct ahc_softc *ahc, struct scb *scb) 481 { 482 ahc->untagged_scbs[scb->hscb->tcl] = scb->hscb->tag; 483 } 484 485 static __inline void 486 ahc_freeze_ccb(union ccb* ccb) 487 { 488 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 489 ccb->ccb_h.status |= CAM_DEV_QFRZN; 490 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 491 } 492 } 493 494 static __inline cam_status 495 ahc_ccb_status(union ccb* ccb) 496 { 497 return (ccb->ccb_h.status & CAM_STATUS_MASK); 498 } 499 500 static __inline void 501 ahcsetccbstatus(union ccb* ccb, cam_status status) 502 { 503 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 504 ccb->ccb_h.status |= status; 505 } 506 507 static __inline struct ahc_initiator_tinfo * 508 ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id, 509 u_int remote_id, struct tmode_tstate **tstate) 510 { 511 /* 512 * Transfer data structures are stored from the perspective 513 * of the target role. Since the parameters for a connection 514 * in the initiator role to a given target are the same as 515 * when the roles are reversed, we pretend we are the target. 516 */ 517 if (channel == 'B') 518 our_id += 8; 519 *tstate = ahc->enabled_targets[our_id]; 520 return (&(*tstate)->transinfo[remote_id]); 521 } 522 523 static void 524 ahc_run_tqinfifo(struct ahc_softc *ahc, int paused) 525 { 526 struct target_cmd *cmd; 527 528 while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) { 529 530 /* 531 * Only advance through the queue if we 532 * had the resources to process the command. 533 */ 534 if (ahc_handle_target_cmd(ahc, cmd) != 0) 535 break; 536 537 ahc->tqinfifonext++; 538 cmd->cmd_valid = 0; 539 540 /* 541 * Lazily update our position in the target mode incomming 542 * command queue as seen by the sequencer. 543 */ 544 if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { 545 if ((ahc->features & AHC_HS_MAILBOX) != 0) { 546 u_int hs_mailbox; 547 548 hs_mailbox = ahc_inb(ahc, HS_MAILBOX); 549 hs_mailbox &= ~HOST_TQINPOS; 550 hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS; 551 ahc_outb(ahc, HS_MAILBOX, hs_mailbox); 552 } else { 553 if (!paused) 554 pause_sequencer(ahc); 555 ahc_outb(ahc, KERNEL_TQINPOS, 556 ahc->tqinfifonext & HOST_TQINPOS); 557 if (!paused) 558 unpause_sequencer(ahc); 559 } 560 } 561 } 562 } 563 564 static void 565 ahc_run_qoutfifo(struct ahc_softc *ahc) 566 { 567 struct scb *scb; 568 u_int scb_index; 569 570 while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) { 571 scb_index = ahc->qoutfifo[ahc->qoutfifonext]; 572 ahc->qoutfifo[ahc->qoutfifonext++] = SCB_LIST_NULL; 573 574 scb = &ahc->scb_data->scbarray[scb_index]; 575 if (scb_index >= ahc->scb_data->numscbs 576 || (scb->flags & SCB_ACTIVE) == 0) { 577 printf("%s: WARNING no command for scb %d " 578 "(cmdcmplt)\nQOUTPOS = %d\n", 579 ahc_name(ahc), scb_index, 580 ahc->qoutfifonext - 1); 581 continue; 582 } 583 584 /* 585 * Save off the residual 586 * if there is one. 587 */ 588 if (scb->hscb->residual_SG_count != 0) 589 ahc_calc_residual(scb); 590 else 591 scb->ccb->csio.resid = 0; 592 ahc_done(ahc, scb); 593 } 594 } 595 596 597 /* 598 * An scb (and hence an scb entry on the board) is put onto the 599 * free list. 600 */ 601 static void 602 ahcfreescb(struct ahc_softc *ahc, struct scb *scb) 603 { 604 struct hardware_scb *hscb; 605 int opri; 606 607 hscb = scb->hscb; 608 609 opri = splcam(); 610 611 if ((ahc->flags & AHC_RESOURCE_SHORTAGE) != 0 612 && (scb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { 613 scb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 614 ahc->flags &= ~AHC_RESOURCE_SHORTAGE; 615 } 616 617 /* Clean up for the next user */ 618 scb->flags = SCB_FREE; 619 hscb->control = 0; 620 hscb->status = 0; 621 622 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links); 623 splx(opri); 624 } 625 626 /* 627 * Get a free scb, either one already assigned to a hardware slot 628 * on the adapter or one that will require an SCB to be paged out before 629 * use. If there are none, see if we can allocate a new SCB. Otherwise 630 * either return an error or sleep. 631 */ 632 static __inline struct scb * 633 ahcgetscb(struct ahc_softc *ahc) 634 { 635 struct scb *scbp; 636 int opri; 637 638 opri = splcam(); 639 if ((scbp = SLIST_FIRST(&ahc->scb_data->free_scbs))) { 640 SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links); 641 } else { 642 ahcallocscbs(ahc); 643 scbp = SLIST_FIRST(&ahc->scb_data->free_scbs); 644 if (scbp != NULL) 645 SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links); 646 } 647 648 splx(opri); 649 650 return (scbp); 651 } 652 653 char * 654 ahc_name(struct ahc_softc *ahc) 655 { 656 static char name[10]; 657 658 snprintf(name, sizeof(name), "ahc%d", ahc->unit); 659 return (name); 660 } 661 662 #ifdef AHC_DEBUG 663 static void 664 ahc_print_scb(struct scb *scb) 665 { 666 struct hardware_scb *hscb = scb->hscb; 667 668 printf("scb:%p control:0x%x tcl:0x%x cmdlen:%d cmdpointer:0x%x\n", 669 scb, 670 hscb->control, 671 hscb->tcl, 672 hscb->cmdlen, 673 hscb->cmdpointer); 674 printf(" datlen:%d data:0x%x segs:0x%x segp:0x%x\n", 675 hscb->datalen, 676 hscb->data, 677 hscb->SG_count, 678 hscb->SG_pointer); 679 printf(" sg_addr:%x sg_len:%d\n", 680 scb->sg_list[0].addr, 681 scb->sg_list[0].len); 682 printf(" cdb:%x %x %x %x %x %x %x %x %x %x %x %x\n", 683 hscb->cmdstore[0], hscb->cmdstore[1], hscb->cmdstore[2], 684 hscb->cmdstore[3], hscb->cmdstore[4], hscb->cmdstore[5], 685 hscb->cmdstore[6], hscb->cmdstore[7], hscb->cmdstore[8], 686 hscb->cmdstore[9], hscb->cmdstore[10], hscb->cmdstore[11]); 687 } 688 #endif 689 690 static struct { 691 u_int8_t errno; 692 char *errmesg; 693 } hard_error[] = { 694 { ILLHADDR, "Illegal Host Access" }, 695 { ILLSADDR, "Illegal Sequencer Address referrenced" }, 696 { ILLOPCODE, "Illegal Opcode in sequencer program" }, 697 { SQPARERR, "Sequencer Parity Error" }, 698 { DPARERR, "Data-path Parity Error" }, 699 { MPARERR, "Scratch or SCB Memory Parity Error" }, 700 { PCIERRSTAT, "PCI Error detected" }, 701 { CIOPARERR, "CIOBUS Parity Error" }, 702 }; 703 static const int num_errors = sizeof(hard_error)/sizeof(hard_error[0]); 704 705 static struct { 706 u_int8_t phase; 707 u_int8_t mesg_out; /* Message response to parity errors */ 708 char *phasemsg; 709 } phase_table[] = { 710 { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, 711 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, 712 { P_COMMAND, MSG_NOOP, "in Command phase" }, 713 { P_MESGOUT, MSG_NOOP, "in Message-out phase" }, 714 { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" }, 715 { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, 716 { P_BUSFREE, MSG_NOOP, "while idle" }, 717 { 0, MSG_NOOP, "in unknown phase" } 718 }; 719 static const int num_phases = (sizeof(phase_table)/sizeof(phase_table[0])) - 1; 720 721 /* 722 * Valid SCSIRATE values. (p. 3-17) 723 * Provides a mapping of tranfer periods in ns to the proper value to 724 * stick in the scsiscfr reg to use that transfer rate. 725 */ 726 #define AHC_SYNCRATE_DT 0 727 #define AHC_SYNCRATE_ULTRA2 1 728 #define AHC_SYNCRATE_ULTRA 3 729 #define AHC_SYNCRATE_FAST 6 730 static struct ahc_syncrate ahc_syncrates[] = { 731 /* ultra2 fast/ultra period rate */ 732 { 0x42, 0x000, 9, "80.0" }, 733 { 0x03, 0x000, 10, "40.0" }, 734 { 0x04, 0x000, 11, "33.0" }, 735 { 0x05, 0x100, 12, "20.0" }, 736 { 0x06, 0x110, 15, "16.0" }, 737 { 0x07, 0x120, 18, "13.4" }, 738 { 0x08, 0x000, 25, "10.0" }, 739 { 0x19, 0x010, 31, "8.0" }, 740 { 0x1a, 0x020, 37, "6.67" }, 741 { 0x1b, 0x030, 43, "5.7" }, 742 { 0x1c, 0x040, 50, "5.0" }, 743 { 0x00, 0x050, 56, "4.4" }, 744 { 0x00, 0x060, 62, "4.0" }, 745 { 0x00, 0x070, 68, "3.6" }, 746 { 0x00, 0x000, 0, NULL } 747 }; 748 749 /* 750 * Allocate a controller structure for a new device and initialize it. 751 */ 752 struct ahc_softc * 753 ahc_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id, 754 bus_dma_tag_t parent_dmat, ahc_chip chip, ahc_feature features, 755 ahc_flag flags, struct scb_data *scb_data) 756 { 757 /* 758 * find unit and check we have that many defined 759 */ 760 struct ahc_softc *ahc; 761 size_t alloc_size; 762 763 /* 764 * Allocate a storage area for us 765 */ 766 if (scb_data == NULL) 767 /* 768 * We are not sharing SCB space with another controller 769 * so allocate our own SCB data space. 770 */ 771 alloc_size = sizeof(struct full_ahc_softc); 772 else 773 alloc_size = sizeof(struct ahc_softc); 774 ahc = malloc(alloc_size, M_DEVBUF, M_NOWAIT); 775 if (!ahc) { 776 device_printf(dev, "cannot malloc softc!\n"); 777 return NULL; 778 } 779 bzero(ahc, alloc_size); 780 LIST_INIT(&ahc->pending_ccbs); 781 ahc->device = dev; 782 ahc->unit = device_get_unit(dev); 783 ahc->regs_res_type = regs_type; 784 ahc->regs_res_id = regs_id; 785 ahc->regs = regs; 786 ahc->tag = rman_get_bustag(regs); 787 ahc->bsh = rman_get_bushandle(regs); 788 ahc->parent_dmat = parent_dmat; 789 ahc->chip = chip; 790 ahc->features = features; 791 ahc->flags = flags; 792 if (scb_data == NULL) { 793 struct full_ahc_softc* full_softc = (struct full_ahc_softc*)ahc; 794 ahc->scb_data = &full_softc->scb_data_storage; 795 } else 796 ahc->scb_data = scb_data; 797 798 ahc->unpause = (ahc_inb(ahc, HCNTRL) & IRQMS) | INTEN; 799 /* The IRQMS bit is only valid on VL and EISA chips */ 800 if ((ahc->chip & AHC_PCI) != 0) 801 ahc->unpause &= ~IRQMS; 802 ahc->pause = ahc->unpause | PAUSE; 803 return (ahc); 804 } 805 806 void 807 ahc_free(ahc) 808 struct ahc_softc *ahc; 809 { 810 ahcfiniscbdata(ahc); 811 switch (ahc->init_level) { 812 case 3: 813 bus_dmamap_unload(ahc->shared_data_dmat, 814 ahc->shared_data_dmamap); 815 case 2: 816 bus_dmamem_free(ahc->shared_data_dmat, ahc->qoutfifo, 817 ahc->shared_data_dmamap); 818 bus_dmamap_destroy(ahc->shared_data_dmat, 819 ahc->shared_data_dmamap); 820 case 1: 821 bus_dma_tag_destroy(ahc->buffer_dmat); 822 break; 823 } 824 825 if (ahc->regs != NULL) 826 bus_release_resource(ahc->device, ahc->regs_res_type, 827 ahc->regs_res_id, ahc->regs); 828 if (ahc->irq != NULL) 829 bus_release_resource(ahc->device, ahc->irq_res_type, 830 0, ahc->irq); 831 832 free(ahc, M_DEVBUF); 833 return; 834 } 835 836 static int 837 ahcinitscbdata(struct ahc_softc *ahc) 838 { 839 struct scb_data *scb_data; 840 int i; 841 842 scb_data = ahc->scb_data; 843 SLIST_INIT(&scb_data->free_scbs); 844 SLIST_INIT(&scb_data->sg_maps); 845 846 /* Allocate SCB resources */ 847 scb_data->scbarray = 848 (struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX, 849 M_DEVBUF, M_NOWAIT); 850 if (scb_data->scbarray == NULL) 851 return (ENOMEM); 852 bzero(scb_data->scbarray, sizeof(struct scb) * AHC_SCB_MAX); 853 854 /* Determine the number of hardware SCBs and initialize them */ 855 856 scb_data->maxhscbs = ahc_probe_scbs(ahc); 857 /* SCB 0 heads the free list */ 858 ahc_outb(ahc, FREE_SCBH, 0); 859 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 860 ahc_outb(ahc, SCBPTR, i); 861 862 /* Clear the control byte. */ 863 ahc_outb(ahc, SCB_CONTROL, 0); 864 865 /* Set the next pointer */ 866 ahc_outb(ahc, SCB_NEXT, i+1); 867 868 /* Make the tag number invalid */ 869 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 870 } 871 872 /* Make sure that the last SCB terminates the free list */ 873 ahc_outb(ahc, SCBPTR, i-1); 874 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); 875 876 /* Ensure we clear the 0 SCB's control byte. */ 877 ahc_outb(ahc, SCBPTR, 0); 878 ahc_outb(ahc, SCB_CONTROL, 0); 879 880 scb_data->maxhscbs = i; 881 882 if (ahc->scb_data->maxhscbs == 0) 883 panic("%s: No SCB space found", ahc_name(ahc)); 884 885 /* 886 * Create our DMA tags. These tags define the kinds of device 887 * accessable memory allocations and memory mappings we will 888 * need to perform during normal operation. 889 * 890 * Unless we need to further restrict the allocation, we rely 891 * on the restrictions of the parent dmat, hence the common 892 * use of MAXADDR and MAXSIZE. 893 */ 894 895 /* DMA tag for our hardware scb structures */ 896 if (bus_dma_tag_create(ahc->parent_dmat, /*alignment*/1, /*boundary*/0, 897 /*lowaddr*/BUS_SPACE_MAXADDR, 898 /*highaddr*/BUS_SPACE_MAXADDR, 899 /*filter*/NULL, /*filterarg*/NULL, 900 AHC_SCB_MAX * sizeof(struct hardware_scb), 901 /*nsegments*/1, 902 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 903 /*flags*/0, &scb_data->hscb_dmat) != 0) { 904 goto error_exit; 905 } 906 907 scb_data->init_level++; 908 909 /* Allocation for our ccbs */ 910 if (bus_dmamem_alloc(scb_data->hscb_dmat, (void **)&scb_data->hscbs, 911 BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) { 912 goto error_exit; 913 } 914 915 scb_data->init_level++; 916 917 /* And permanently map them */ 918 bus_dmamap_load(scb_data->hscb_dmat, scb_data->hscb_dmamap, 919 scb_data->hscbs, 920 AHC_SCB_MAX * sizeof(struct hardware_scb), 921 ahcdmamapcb, &scb_data->hscb_busaddr, /*flags*/0); 922 923 scb_data->init_level++; 924 925 /* DMA tag for our sense buffers */ 926 if (bus_dma_tag_create(ahc->parent_dmat, /*alignment*/1, /*boundary*/0, 927 /*lowaddr*/BUS_SPACE_MAXADDR, 928 /*highaddr*/BUS_SPACE_MAXADDR, 929 /*filter*/NULL, /*filterarg*/NULL, 930 AHC_SCB_MAX * sizeof(struct scsi_sense_data), 931 /*nsegments*/1, 932 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 933 /*flags*/0, &scb_data->sense_dmat) != 0) { 934 goto error_exit; 935 } 936 937 scb_data->init_level++; 938 939 /* Allocate them */ 940 if (bus_dmamem_alloc(scb_data->sense_dmat, (void **)&scb_data->sense, 941 BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) { 942 goto error_exit; 943 } 944 945 scb_data->init_level++; 946 947 /* And permanently map them */ 948 bus_dmamap_load(scb_data->sense_dmat, scb_data->sense_dmamap, 949 scb_data->sense, 950 AHC_SCB_MAX * sizeof(struct scsi_sense_data), 951 ahcdmamapcb, &scb_data->sense_busaddr, /*flags*/0); 952 953 scb_data->init_level++; 954 955 /* DMA tag for our S/G structures. We allocate in page sized chunks */ 956 if (bus_dma_tag_create(ahc->parent_dmat, /*alignment*/1, /*boundary*/0, 957 /*lowaddr*/BUS_SPACE_MAXADDR, 958 /*highaddr*/BUS_SPACE_MAXADDR, 959 /*filter*/NULL, /*filterarg*/NULL, 960 PAGE_SIZE, /*nsegments*/1, 961 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 962 /*flags*/0, &scb_data->sg_dmat) != 0) { 963 goto error_exit; 964 } 965 966 scb_data->init_level++; 967 968 /* Perform initial CCB allocation */ 969 bzero(scb_data->hscbs, AHC_SCB_MAX * sizeof(struct hardware_scb)); 970 ahcallocscbs(ahc); 971 972 if (scb_data->numscbs == 0) { 973 printf("%s: ahc_init_scb_data - " 974 "Unable to allocate initial scbs\n", 975 ahc_name(ahc)); 976 goto error_exit; 977 } 978 979 /* 980 * Note that we were successfull 981 */ 982 return 0; 983 984 error_exit: 985 986 return ENOMEM; 987 } 988 989 static void 990 ahcfiniscbdata(struct ahc_softc *ahc) 991 { 992 struct scb_data *scb_data; 993 994 scb_data = ahc->scb_data; 995 996 switch (scb_data->init_level) { 997 default: 998 case 7: 999 { 1000 struct sg_map_node *sg_map; 1001 1002 while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) { 1003 SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); 1004 bus_dmamap_unload(scb_data->sg_dmat, 1005 sg_map->sg_dmamap); 1006 bus_dmamem_free(scb_data->sg_dmat, sg_map->sg_vaddr, 1007 sg_map->sg_dmamap); 1008 free(sg_map, M_DEVBUF); 1009 } 1010 bus_dma_tag_destroy(scb_data->sg_dmat); 1011 } 1012 case 6: 1013 bus_dmamap_unload(scb_data->sense_dmat, 1014 scb_data->sense_dmamap); 1015 case 5: 1016 bus_dmamem_free(scb_data->sense_dmat, scb_data->sense, 1017 scb_data->sense_dmamap); 1018 bus_dmamap_destroy(scb_data->sense_dmat, 1019 scb_data->sense_dmamap); 1020 case 4: 1021 bus_dma_tag_destroy(scb_data->sense_dmat); 1022 case 3: 1023 bus_dmamap_unload(scb_data->hscb_dmat, scb_data->hscb_dmamap); 1024 case 2: 1025 bus_dmamem_free(scb_data->hscb_dmat, scb_data->hscbs, 1026 scb_data->hscb_dmamap); 1027 bus_dmamap_destroy(scb_data->hscb_dmat, scb_data->hscb_dmamap); 1028 case 1: 1029 bus_dma_tag_destroy(scb_data->hscb_dmat); 1030 break; 1031 } 1032 if (scb_data->scbarray != NULL) 1033 free(scb_data->scbarray, M_DEVBUF); 1034 } 1035 1036 static void 1037 ahcdmamapcb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1038 { 1039 bus_addr_t *baddr; 1040 1041 baddr = (bus_addr_t *)arg; 1042 *baddr = segs->ds_addr; 1043 } 1044 1045 int 1046 ahc_reset(struct ahc_softc *ahc) 1047 { 1048 u_int sblkctl; 1049 int wait; 1050 1051 #ifdef AHC_DUMP_SEQ 1052 if (ahc->init_level == 0) 1053 ahc_dumpseq(ahc); 1054 #endif 1055 ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause); 1056 /* 1057 * Ensure that the reset has finished 1058 */ 1059 wait = 1000; 1060 do { 1061 DELAY(1000); 1062 } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK)); 1063 1064 if (wait == 0) { 1065 printf("%s: WARNING - Failed chip reset! " 1066 "Trying to initialize anyway.\n", ahc_name(ahc)); 1067 } 1068 ahc_outb(ahc, HCNTRL, ahc->pause); 1069 1070 /* Determine channel configuration */ 1071 sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE); 1072 /* No Twin Channel PCI cards */ 1073 if ((ahc->chip & AHC_PCI) != 0) 1074 sblkctl &= ~SELBUSB; 1075 switch (sblkctl) { 1076 case 0: 1077 /* Single Narrow Channel */ 1078 break; 1079 case 2: 1080 /* Wide Channel */ 1081 ahc->features |= AHC_WIDE; 1082 break; 1083 case 8: 1084 /* Twin Channel */ 1085 ahc->features |= AHC_TWIN; 1086 break; 1087 default: 1088 printf(" Unsupported adapter type. Ignoring\n"); 1089 return(-1); 1090 } 1091 1092 return (0); 1093 } 1094 1095 /* 1096 * Called when we have an active connection to a target on the bus, 1097 * this function finds the nearest syncrate to the input period limited 1098 * by the capabilities of the bus connectivity of the target. 1099 */ 1100 static struct ahc_syncrate * 1101 ahc_devlimited_syncrate(struct ahc_softc *ahc, u_int *period) { 1102 u_int maxsync; 1103 1104 if ((ahc->features & AHC_ULTRA2) != 0) { 1105 if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0 1106 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) { 1107 maxsync = AHC_SYNCRATE_ULTRA2; 1108 } else { 1109 maxsync = AHC_SYNCRATE_ULTRA; 1110 } 1111 } else if ((ahc->features & AHC_ULTRA) != 0) { 1112 maxsync = AHC_SYNCRATE_ULTRA; 1113 } else { 1114 maxsync = AHC_SYNCRATE_FAST; 1115 } 1116 return (ahc_find_syncrate(ahc, period, maxsync)); 1117 } 1118 1119 /* 1120 * Look up the valid period to SCSIRATE conversion in our table. 1121 * Return the period and offset that should be sent to the target 1122 * if this was the beginning of an SDTR. 1123 */ 1124 static struct ahc_syncrate * 1125 ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, u_int maxsync) 1126 { 1127 struct ahc_syncrate *syncrate; 1128 1129 syncrate = &ahc_syncrates[maxsync]; 1130 while ((syncrate->rate != NULL) 1131 && ((ahc->features & AHC_ULTRA2) == 0 1132 || (syncrate->sxfr_u2 != 0))) { 1133 1134 if (*period <= syncrate->period) { 1135 /* 1136 * When responding to a target that requests 1137 * sync, the requested rate may fall between 1138 * two rates that we can output, but still be 1139 * a rate that we can receive. Because of this, 1140 * we want to respond to the target with 1141 * the same rate that it sent to us even 1142 * if the period we use to send data to it 1143 * is lower. Only lower the response period 1144 * if we must. 1145 */ 1146 if (syncrate == &ahc_syncrates[maxsync]) 1147 *period = syncrate->period; 1148 break; 1149 } 1150 syncrate++; 1151 } 1152 1153 if ((*period == 0) 1154 || (syncrate->rate == NULL) 1155 || ((ahc->features & AHC_ULTRA2) != 0 1156 && (syncrate->sxfr_u2 == 0))) { 1157 /* Use asynchronous transfers. */ 1158 *period = 0; 1159 syncrate = NULL; 1160 } 1161 return (syncrate); 1162 } 1163 1164 static u_int 1165 ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync) 1166 { 1167 struct ahc_syncrate *syncrate; 1168 1169 if ((ahc->features & AHC_ULTRA2) != 0) 1170 scsirate &= SXFR_ULTRA2; 1171 else 1172 scsirate &= SXFR; 1173 1174 syncrate = &ahc_syncrates[maxsync]; 1175 while (syncrate->rate != NULL) { 1176 1177 if ((ahc->features & AHC_ULTRA2) != 0) { 1178 if (syncrate->sxfr_u2 == 0) 1179 break; 1180 else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2)) 1181 return (syncrate->period); 1182 } else if (scsirate == (syncrate->sxfr & SXFR)) { 1183 return (syncrate->period); 1184 } 1185 syncrate++; 1186 } 1187 return (0); /* async */ 1188 } 1189 1190 static void 1191 ahc_validate_offset(struct ahc_softc *ahc, struct ahc_syncrate *syncrate, 1192 u_int *offset, int wide) 1193 { 1194 u_int maxoffset; 1195 1196 /* Limit offset to what we can do */ 1197 if (syncrate == NULL) { 1198 maxoffset = 0; 1199 } else if ((ahc->features & AHC_ULTRA2) != 0) { 1200 maxoffset = MAX_OFFSET_ULTRA2; 1201 } else { 1202 if (wide) 1203 maxoffset = MAX_OFFSET_16BIT; 1204 else 1205 maxoffset = MAX_OFFSET_8BIT; 1206 } 1207 *offset = MIN(*offset, maxoffset); 1208 } 1209 1210 static void 1211 ahc_update_target_msg_request(struct ahc_softc *ahc, 1212 struct ahc_devinfo *devinfo, 1213 struct ahc_initiator_tinfo *tinfo, 1214 int force, int paused) 1215 { 1216 u_int targ_msg_req_orig; 1217 1218 targ_msg_req_orig = ahc->targ_msg_req; 1219 if (tinfo->current.period != tinfo->goal.period 1220 || tinfo->current.width != tinfo->goal.width 1221 || tinfo->current.offset != tinfo->goal.offset 1222 || (force 1223 && (tinfo->goal.period != 0 1224 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT))) 1225 ahc->targ_msg_req |= devinfo->target_mask; 1226 else 1227 ahc->targ_msg_req &= ~devinfo->target_mask; 1228 1229 if (ahc->targ_msg_req != targ_msg_req_orig) { 1230 /* Update the message request bit for this target */ 1231 if ((ahc->features & AHC_HS_MAILBOX) != 0) { 1232 if (paused) { 1233 ahc_outb(ahc, TARGET_MSG_REQUEST, 1234 ahc->targ_msg_req & 0xFF); 1235 ahc_outb(ahc, TARGET_MSG_REQUEST + 1, 1236 (ahc->targ_msg_req >> 8) & 0xFF); 1237 } else { 1238 ahc_outb(ahc, HS_MAILBOX, 1239 0x01 << HOST_MAILBOX_SHIFT); 1240 } 1241 } else { 1242 if (!paused) 1243 pause_sequencer(ahc); 1244 1245 ahc_outb(ahc, TARGET_MSG_REQUEST, 1246 ahc->targ_msg_req & 0xFF); 1247 ahc_outb(ahc, TARGET_MSG_REQUEST + 1, 1248 (ahc->targ_msg_req >> 8) & 0xFF); 1249 1250 if (!paused) 1251 unpause_sequencer(ahc); 1252 } 1253 } 1254 } 1255 1256 static int 1257 ahc_create_path(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1258 struct cam_path **path) 1259 { 1260 path_id_t path_id; 1261 1262 if (devinfo->channel == 'B') 1263 path_id = cam_sim_path(ahc->sim_b); 1264 else 1265 path_id = cam_sim_path(ahc->sim); 1266 1267 return (xpt_create_path(path, /*periph*/NULL, 1268 path_id, devinfo->target, 1269 devinfo->lun)); 1270 } 1271 1272 static void 1273 ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1274 struct cam_path *path, struct ahc_syncrate *syncrate, 1275 u_int period, u_int offset, u_int type, int paused) 1276 { 1277 struct ahc_initiator_tinfo *tinfo; 1278 struct tmode_tstate *tstate; 1279 u_int old_period; 1280 u_int old_offset; 1281 int active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 1282 1283 if (syncrate == NULL) { 1284 period = 0; 1285 offset = 0; 1286 } 1287 1288 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1289 devinfo->target, &tstate); 1290 old_period = tinfo->current.period; 1291 old_offset = tinfo->current.offset; 1292 1293 if ((type & AHC_TRANS_CUR) != 0 1294 && (old_period != period || old_offset != offset)) { 1295 struct cam_path *path2; 1296 u_int scsirate; 1297 1298 scsirate = tinfo->scsirate; 1299 if ((ahc->features & AHC_ULTRA2) != 0) { 1300 1301 /* XXX */ 1302 /* Force single edge until DT is fully implemented */ 1303 scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC); 1304 if (syncrate != NULL) 1305 scsirate |= syncrate->sxfr_u2|SINGLE_EDGE; 1306 1307 if (active) 1308 ahc_outb(ahc, SCSIOFFSET, offset); 1309 } else { 1310 1311 scsirate &= ~(SXFR|SOFS); 1312 /* 1313 * Ensure Ultra mode is set properly for 1314 * this target. 1315 */ 1316 tstate->ultraenb &= ~devinfo->target_mask; 1317 if (syncrate != NULL) { 1318 if (syncrate->sxfr & ULTRA_SXFR) { 1319 tstate->ultraenb |= 1320 devinfo->target_mask; 1321 } 1322 scsirate |= syncrate->sxfr & SXFR; 1323 scsirate |= offset & SOFS; 1324 } 1325 if (active) { 1326 u_int sxfrctl0; 1327 1328 sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 1329 sxfrctl0 &= ~FAST20; 1330 if (tstate->ultraenb & devinfo->target_mask) 1331 sxfrctl0 |= FAST20; 1332 ahc_outb(ahc, SXFRCTL0, sxfrctl0); 1333 } 1334 } 1335 if (active) 1336 ahc_outb(ahc, SCSIRATE, scsirate); 1337 1338 tinfo->scsirate = scsirate; 1339 tinfo->current.period = period; 1340 tinfo->current.offset = offset; 1341 1342 /* Update the syncrates in any pending scbs */ 1343 ahc_update_pending_syncrates(ahc); 1344 1345 /* 1346 * If possible, tell the SCSI layer about the 1347 * new transfer parameters. 1348 */ 1349 /* If possible, update the XPT's notion of our transfer rate */ 1350 path2 = NULL; 1351 if (path == NULL) { 1352 int error; 1353 1354 error = ahc_create_path(ahc, devinfo, &path2); 1355 if (error == CAM_REQ_CMP) 1356 path = path2; 1357 else 1358 path2 = NULL; 1359 } 1360 1361 if (path != NULL) { 1362 struct ccb_trans_settings neg; 1363 1364 neg.sync_period = period; 1365 neg.sync_offset = offset; 1366 neg.valid = CCB_TRANS_SYNC_RATE_VALID 1367 | CCB_TRANS_SYNC_OFFSET_VALID; 1368 xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1); 1369 xpt_async(AC_TRANSFER_NEG, path, &neg); 1370 } 1371 1372 if (path2 != NULL) 1373 xpt_free_path(path2); 1374 1375 if (bootverbose) { 1376 if (offset != 0) { 1377 printf("%s: target %d synchronous at %sMHz, " 1378 "offset = 0x%x\n", ahc_name(ahc), 1379 devinfo->target, syncrate->rate, offset); 1380 } else { 1381 printf("%s: target %d using " 1382 "asynchronous transfers\n", 1383 ahc_name(ahc), devinfo->target); 1384 } 1385 } 1386 } 1387 1388 if ((type & AHC_TRANS_GOAL) != 0) { 1389 tinfo->goal.period = period; 1390 tinfo->goal.offset = offset; 1391 } 1392 1393 if ((type & AHC_TRANS_USER) != 0) { 1394 tinfo->user.period = period; 1395 tinfo->user.offset = offset; 1396 } 1397 1398 ahc_update_target_msg_request(ahc, devinfo, tinfo, 1399 /*force*/FALSE, 1400 paused); 1401 } 1402 1403 static void 1404 ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1405 struct cam_path *path, u_int width, u_int type, int paused) 1406 { 1407 struct ahc_initiator_tinfo *tinfo; 1408 struct tmode_tstate *tstate; 1409 u_int oldwidth; 1410 int active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 1411 1412 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1413 devinfo->target, &tstate); 1414 oldwidth = tinfo->current.width; 1415 1416 if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) { 1417 struct cam_path *path2; 1418 u_int scsirate; 1419 1420 scsirate = tinfo->scsirate; 1421 scsirate &= ~WIDEXFER; 1422 if (width == MSG_EXT_WDTR_BUS_16_BIT) 1423 scsirate |= WIDEXFER; 1424 1425 tinfo->scsirate = scsirate; 1426 1427 if (active) 1428 ahc_outb(ahc, SCSIRATE, scsirate); 1429 1430 tinfo->current.width = width; 1431 1432 /* If possible, update the XPT's notion of our transfer rate */ 1433 path2 = NULL; 1434 if (path == NULL) { 1435 int error; 1436 1437 error = ahc_create_path(ahc, devinfo, &path2); 1438 if (error == CAM_REQ_CMP) 1439 path = path2; 1440 else 1441 path2 = NULL; 1442 } 1443 1444 if (path != NULL) { 1445 struct ccb_trans_settings neg; 1446 1447 neg.bus_width = width; 1448 neg.valid = CCB_TRANS_BUS_WIDTH_VALID; 1449 xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1); 1450 xpt_async(AC_TRANSFER_NEG, path, &neg); 1451 } 1452 1453 if (path2 != NULL) 1454 xpt_free_path(path2); 1455 1456 if (bootverbose) { 1457 printf("%s: target %d using %dbit transfers\n", 1458 ahc_name(ahc), devinfo->target, 1459 8 * (0x01 << width)); 1460 } 1461 } 1462 if ((type & AHC_TRANS_GOAL) != 0) 1463 tinfo->goal.width = width; 1464 if ((type & AHC_TRANS_USER) != 0) 1465 tinfo->user.width = width; 1466 1467 ahc_update_target_msg_request(ahc, devinfo, tinfo, 1468 /*force*/FALSE, paused); 1469 } 1470 1471 static void 1472 ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, int enable) 1473 { 1474 struct ahc_initiator_tinfo *tinfo; 1475 struct tmode_tstate *tstate; 1476 1477 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1478 devinfo->target, &tstate); 1479 1480 if (enable) 1481 tstate->tagenable |= devinfo->target_mask; 1482 else 1483 tstate->tagenable &= ~devinfo->target_mask; 1484 } 1485 1486 /* 1487 * Attach all the sub-devices we can find 1488 */ 1489 int 1490 ahc_attach(struct ahc_softc *ahc) 1491 { 1492 struct ccb_setasync csa; 1493 struct cam_devq *devq; 1494 int bus_id; 1495 int bus_id2; 1496 struct cam_sim *sim; 1497 struct cam_sim *sim2; 1498 struct cam_path *path; 1499 struct cam_path *path2; 1500 int count; 1501 int s; 1502 int error; 1503 1504 count = 0; 1505 sim = NULL; 1506 sim2 = NULL; 1507 1508 s = splcam(); 1509 /* Hook up our interrupt handler */ 1510 if ((error = bus_setup_intr(ahc->device, ahc->irq, INTR_TYPE_CAM, 1511 ahc_intr, ahc, &ahc->ih)) != 0) { 1512 device_printf(ahc->device, "bus_setup_intr() failed: %d\n", 1513 error); 1514 goto fail; 1515 } 1516 1517 /* 1518 * Attach secondary channel first if the user has 1519 * declared it the primary channel. 1520 */ 1521 if ((ahc->flags & AHC_CHANNEL_B_PRIMARY) != 0) { 1522 bus_id = 1; 1523 bus_id2 = 0; 1524 } else { 1525 bus_id = 0; 1526 bus_id2 = 1; 1527 } 1528 1529 /* 1530 * Create the device queue for our SIM(s). 1531 */ 1532 devq = cam_simq_alloc(AHC_SCB_MAX); 1533 if (devq == NULL) 1534 goto fail; 1535 1536 /* 1537 * Construct our first channel SIM entry 1538 */ 1539 sim = cam_sim_alloc(ahc_action, ahc_poll, "ahc", ahc, ahc->unit, 1540 1, AHC_SCB_MAX, devq); 1541 if (sim == NULL) { 1542 cam_simq_free(devq); 1543 goto fail; 1544 } 1545 1546 if (xpt_bus_register(sim, bus_id) != CAM_SUCCESS) { 1547 cam_sim_free(sim, /*free_devq*/TRUE); 1548 sim = NULL; 1549 goto fail; 1550 } 1551 1552 if (xpt_create_path(&path, /*periph*/NULL, 1553 cam_sim_path(sim), CAM_TARGET_WILDCARD, 1554 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1555 xpt_bus_deregister(cam_sim_path(sim)); 1556 cam_sim_free(sim, /*free_devq*/TRUE); 1557 sim = NULL; 1558 goto fail; 1559 } 1560 1561 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); 1562 csa.ccb_h.func_code = XPT_SASYNC_CB; 1563 csa.event_enable = AC_LOST_DEVICE; 1564 csa.callback = ahc_async; 1565 csa.callback_arg = sim; 1566 xpt_action((union ccb *)&csa); 1567 count++; 1568 1569 if (ahc->features & AHC_TWIN) { 1570 sim2 = cam_sim_alloc(ahc_action, ahc_poll, "ahc", 1571 ahc, ahc->unit, 1, 1572 AHC_SCB_MAX, devq); 1573 1574 if (sim2 == NULL) { 1575 printf("ahc_attach: Unable to attach second " 1576 "bus due to resource shortage"); 1577 goto fail; 1578 } 1579 1580 if (xpt_bus_register(sim2, bus_id2) != CAM_SUCCESS) { 1581 printf("ahc_attach: Unable to attach second " 1582 "bus due to resource shortage"); 1583 /* 1584 * We do not want to destroy the device queue 1585 * because the first bus is using it. 1586 */ 1587 cam_sim_free(sim2, /*free_devq*/FALSE); 1588 goto fail; 1589 } 1590 1591 if (xpt_create_path(&path2, /*periph*/NULL, 1592 cam_sim_path(sim2), 1593 CAM_TARGET_WILDCARD, 1594 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1595 xpt_bus_deregister(cam_sim_path(sim2)); 1596 cam_sim_free(sim2, /*free_devq*/FALSE); 1597 sim2 = NULL; 1598 goto fail; 1599 } 1600 xpt_setup_ccb(&csa.ccb_h, path2, /*priority*/5); 1601 csa.ccb_h.func_code = XPT_SASYNC_CB; 1602 csa.event_enable = AC_LOST_DEVICE; 1603 csa.callback = ahc_async; 1604 csa.callback_arg = sim2; 1605 xpt_action((union ccb *)&csa); 1606 count++; 1607 } 1608 1609 fail: 1610 if ((ahc->flags & AHC_CHANNEL_B_PRIMARY) != 0) { 1611 ahc->sim_b = sim; 1612 ahc->path_b = path; 1613 ahc->sim = sim2; 1614 ahc->path = path2; 1615 } else { 1616 ahc->sim = sim; 1617 ahc->path = path; 1618 ahc->sim_b = sim2; 1619 ahc->path_b = path2; 1620 } 1621 splx(s); 1622 return (count); 1623 } 1624 1625 #if UNUSED 1626 static void 1627 ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1628 struct scb *scb) 1629 { 1630 role_t role; 1631 int our_id; 1632 1633 if (scb->ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1634 our_id = scb->ccb->ccb_h.target_id; 1635 role = ROLE_TARGET; 1636 } else { 1637 our_id = SCB_CHANNEL(scb) == 'B' ? ahc->our_id_b : ahc->our_id; 1638 role = ROLE_INITIATOR; 1639 } 1640 ahc_compile_devinfo(devinfo, our_id, SCB_TARGET(scb), 1641 SCB_LUN(scb), SCB_CHANNEL(scb), role); 1642 } 1643 #endif 1644 1645 static void 1646 ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 1647 { 1648 u_int saved_tcl; 1649 role_t role; 1650 int our_id; 1651 1652 if (ahc_inb(ahc, SSTAT0) & TARGET) 1653 role = ROLE_TARGET; 1654 else 1655 role = ROLE_INITIATOR; 1656 1657 if (role == ROLE_TARGET 1658 && (ahc->features & AHC_MULTI_TID) != 0 1659 && (ahc_inb(ahc, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) { 1660 /* We were selected, so pull our id from TARGIDIN */ 1661 our_id = ahc_inb(ahc, TARGIDIN) & OID; 1662 } else if ((ahc->features & AHC_ULTRA2) != 0) 1663 our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID; 1664 else 1665 our_id = ahc_inb(ahc, SCSIID) & OID; 1666 1667 saved_tcl = ahc_inb(ahc, SAVED_TCL); 1668 ahc_compile_devinfo(devinfo, our_id, TCL_TARGET(saved_tcl), 1669 TCL_LUN(saved_tcl), TCL_CHANNEL(ahc, saved_tcl), 1670 role); 1671 } 1672 1673 static void 1674 ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target, 1675 u_int lun, char channel, role_t role) 1676 { 1677 devinfo->our_scsiid = our_id; 1678 devinfo->target = target; 1679 devinfo->lun = lun; 1680 devinfo->target_offset = target; 1681 devinfo->channel = channel; 1682 devinfo->role = role; 1683 if (channel == 'B') 1684 devinfo->target_offset += 8; 1685 devinfo->target_mask = (0x01 << devinfo->target_offset); 1686 } 1687 1688 /* 1689 * Catch an interrupt from the adapter 1690 */ 1691 void 1692 ahc_intr(void *arg) 1693 { 1694 struct ahc_softc *ahc; 1695 u_int intstat; 1696 1697 ahc = (struct ahc_softc *)arg; 1698 1699 intstat = ahc_inb(ahc, INTSTAT); 1700 1701 /* 1702 * Any interrupts to process? 1703 */ 1704 #if NPCI > 0 1705 if ((intstat & INT_PEND) == 0) { 1706 if ((ahc->chip & AHC_PCI) != 0 1707 && (ahc->unsolicited_ints > 500)) { 1708 if ((ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0) 1709 ahc_pci_intr(ahc); 1710 ahc->unsolicited_ints = 0; 1711 } else { 1712 ahc->unsolicited_ints++; 1713 } 1714 return; 1715 } else { 1716 ahc->unsolicited_ints = 0; 1717 } 1718 #else 1719 if ((intstat & INT_PEND) == 0) 1720 return; 1721 #endif 1722 1723 if (intstat & CMDCMPLT) { 1724 ahc_outb(ahc, CLRINT, CLRCMDINT); 1725 ahc_run_qoutfifo(ahc); 1726 if ((ahc->flags & AHC_TARGETMODE) != 0) { 1727 ahc_run_tqinfifo(ahc, /*paused*/FALSE); 1728 } 1729 } 1730 if (intstat & BRKADRINT) { 1731 /* 1732 * We upset the sequencer :-( 1733 * Lookup the error message 1734 */ 1735 int i, error, num_errors; 1736 1737 error = ahc_inb(ahc, ERROR); 1738 num_errors = sizeof(hard_error)/sizeof(hard_error[0]); 1739 for (i = 0; error != 1 && i < num_errors; i++) 1740 error >>= 1; 1741 panic("%s: brkadrint, %s at seqaddr = 0x%x\n", 1742 ahc_name(ahc), hard_error[i].errmesg, 1743 ahc_inb(ahc, SEQADDR0) | 1744 (ahc_inb(ahc, SEQADDR1) << 8)); 1745 1746 /* Tell everyone that this HBA is no longer availible */ 1747 ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS, 1748 CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, 1749 CAM_NO_HBA); 1750 } 1751 if (intstat & SEQINT) 1752 ahc_handle_seqint(ahc, intstat); 1753 1754 if (intstat & SCSIINT) 1755 ahc_handle_scsiint(ahc, intstat); 1756 } 1757 1758 static struct tmode_tstate * 1759 ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel) 1760 { 1761 struct tmode_tstate *master_tstate; 1762 struct tmode_tstate *tstate; 1763 int i, s; 1764 1765 master_tstate = ahc->enabled_targets[ahc->our_id]; 1766 if (channel == 'B') { 1767 scsi_id += 8; 1768 master_tstate = ahc->enabled_targets[ahc->our_id_b + 8]; 1769 } 1770 if (ahc->enabled_targets[scsi_id] != NULL 1771 && ahc->enabled_targets[scsi_id] != master_tstate) 1772 panic("%s: ahc_alloc_tstate - Target already allocated", 1773 ahc_name(ahc)); 1774 tstate = malloc(sizeof(*tstate), M_DEVBUF, M_NOWAIT); 1775 if (tstate == NULL) 1776 return (NULL); 1777 1778 /* 1779 * If we have allocated a master tstate, copy user settings from 1780 * the master tstate (taken from SRAM or the EEPROM) for this 1781 * channel, but reset our current and goal settings to async/narrow 1782 * until an initiator talks to us. 1783 */ 1784 if (master_tstate != NULL) { 1785 bcopy(master_tstate, tstate, sizeof(*tstate)); 1786 bzero(tstate->enabled_luns, sizeof(tstate->enabled_luns)); 1787 tstate->ultraenb = 0; 1788 for (i = 0; i < 16; i++) { 1789 bzero(&tstate->transinfo[i].current, 1790 sizeof(tstate->transinfo[i].current)); 1791 bzero(&tstate->transinfo[i].goal, 1792 sizeof(tstate->transinfo[i].goal)); 1793 } 1794 } else 1795 bzero(tstate, sizeof(*tstate)); 1796 s = splcam(); 1797 ahc->enabled_targets[scsi_id] = tstate; 1798 splx(s); 1799 return (tstate); 1800 } 1801 1802 static void 1803 ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force) 1804 { 1805 struct tmode_tstate *tstate; 1806 1807 /* Don't clean up the entry for our initiator role */ 1808 if ((ahc->flags & AHC_INITIATORMODE) != 0 1809 && ((channel == 'B' && scsi_id == ahc->our_id_b) 1810 || (channel == 'A' && scsi_id == ahc->our_id)) 1811 && force == FALSE) 1812 return; 1813 1814 if (channel == 'B') 1815 scsi_id += 8; 1816 tstate = ahc->enabled_targets[scsi_id]; 1817 if (tstate != NULL) 1818 free(tstate, M_DEVBUF); 1819 ahc->enabled_targets[scsi_id] = NULL; 1820 } 1821 1822 static void 1823 ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) 1824 { 1825 struct tmode_tstate *tstate; 1826 struct tmode_lstate *lstate; 1827 struct ccb_en_lun *cel; 1828 cam_status status; 1829 int target; 1830 int lun; 1831 u_int target_mask; 1832 char channel; 1833 int s; 1834 1835 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate, 1836 /* notfound_failure*/FALSE); 1837 1838 if (status != CAM_REQ_CMP) { 1839 ccb->ccb_h.status = status; 1840 return; 1841 } 1842 1843 cel = &ccb->cel; 1844 target = ccb->ccb_h.target_id; 1845 lun = ccb->ccb_h.target_lun; 1846 channel = SIM_CHANNEL(ahc, sim); 1847 target_mask = 0x01 << target; 1848 if (channel == 'B') 1849 target_mask <<= 8; 1850 1851 if (cel->enable != 0) { 1852 u_int scsiseq; 1853 1854 /* Are we already enabled?? */ 1855 if (lstate != NULL) { 1856 xpt_print_path(ccb->ccb_h.path); 1857 printf("Lun already enabled\n"); 1858 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 1859 return; 1860 } 1861 1862 if (cel->grp6_len != 0 1863 || cel->grp7_len != 0) { 1864 /* 1865 * Don't (yet?) support vendor 1866 * specific commands. 1867 */ 1868 ccb->ccb_h.status = CAM_REQ_INVALID; 1869 printf("Non-zero Group Codes\n"); 1870 return; 1871 } 1872 1873 /* 1874 * Seems to be okay. 1875 * Setup our data structures. 1876 */ 1877 if (target != CAM_TARGET_WILDCARD && tstate == NULL) { 1878 tstate = ahc_alloc_tstate(ahc, target, channel); 1879 if (tstate == NULL) { 1880 xpt_print_path(ccb->ccb_h.path); 1881 printf("Couldn't allocate tstate\n"); 1882 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1883 return; 1884 } 1885 } 1886 lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT); 1887 if (lstate == NULL) { 1888 xpt_print_path(ccb->ccb_h.path); 1889 printf("Couldn't allocate lstate\n"); 1890 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1891 return; 1892 } 1893 bzero(lstate, sizeof(*lstate)); 1894 status = xpt_create_path(&lstate->path, /*periph*/NULL, 1895 xpt_path_path_id(ccb->ccb_h.path), 1896 xpt_path_target_id(ccb->ccb_h.path), 1897 xpt_path_lun_id(ccb->ccb_h.path)); 1898 if (status != CAM_REQ_CMP) { 1899 free(lstate, M_DEVBUF); 1900 xpt_print_path(ccb->ccb_h.path); 1901 printf("Couldn't allocate path\n"); 1902 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1903 return; 1904 } 1905 SLIST_INIT(&lstate->accept_tios); 1906 SLIST_INIT(&lstate->immed_notifies); 1907 s = splcam(); 1908 pause_sequencer(ahc); 1909 if (target != CAM_TARGET_WILDCARD) { 1910 tstate->enabled_luns[lun] = lstate; 1911 ahc->enabled_luns++; 1912 1913 if ((ahc->features & AHC_MULTI_TID) != 0) { 1914 u_int targid_mask; 1915 1916 targid_mask = ahc_inb(ahc, TARGID) 1917 | (ahc_inb(ahc, TARGID + 1) << 8); 1918 1919 targid_mask |= target_mask; 1920 ahc_outb(ahc, TARGID, targid_mask); 1921 ahc_outb(ahc, TARGID+1, (targid_mask >> 8)); 1922 1923 ahc_update_scsiid(ahc, targid_mask); 1924 } else { 1925 int our_id; 1926 char channel; 1927 1928 channel = SIM_CHANNEL(ahc, sim); 1929 our_id = SIM_SCSI_ID(ahc, sim); 1930 1931 /* 1932 * This can only happen if selections 1933 * are not enabled 1934 */ 1935 if (target != our_id) { 1936 u_int sblkctl; 1937 char cur_channel; 1938 int swap; 1939 1940 sblkctl = ahc_inb(ahc, SBLKCTL); 1941 cur_channel = (sblkctl & SELBUSB) 1942 ? 'B' : 'A'; 1943 if ((ahc->features & AHC_TWIN) == 0) 1944 cur_channel = 'A'; 1945 swap = cur_channel != channel; 1946 if (channel == 'A') 1947 ahc->our_id = target; 1948 else 1949 ahc->our_id_b = target; 1950 1951 if (swap) 1952 ahc_outb(ahc, SBLKCTL, 1953 sblkctl ^ SELBUSB); 1954 1955 ahc_outb(ahc, SCSIID, target); 1956 1957 if (swap) 1958 ahc_outb(ahc, SBLKCTL, sblkctl); 1959 } 1960 } 1961 } else 1962 ahc->black_hole = lstate; 1963 /* Allow select-in operations */ 1964 if (ahc->black_hole != NULL && ahc->enabled_luns > 0) { 1965 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 1966 scsiseq |= ENSELI; 1967 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 1968 scsiseq = ahc_inb(ahc, SCSISEQ); 1969 scsiseq |= ENSELI; 1970 ahc_outb(ahc, SCSISEQ, scsiseq); 1971 } 1972 unpause_sequencer(ahc); 1973 splx(s); 1974 ccb->ccb_h.status = CAM_REQ_CMP; 1975 xpt_print_path(ccb->ccb_h.path); 1976 printf("Lun now enabled for target mode\n"); 1977 } else { 1978 struct ccb_hdr *elm; 1979 int i, empty; 1980 1981 if (lstate == NULL) { 1982 ccb->ccb_h.status = CAM_LUN_INVALID; 1983 return; 1984 } 1985 1986 s = splcam(); 1987 ccb->ccb_h.status = CAM_REQ_CMP; 1988 LIST_FOREACH(elm, &ahc->pending_ccbs, sim_links.le) { 1989 if (elm->func_code == XPT_CONT_TARGET_IO 1990 && !xpt_path_comp(elm->path, ccb->ccb_h.path)){ 1991 printf("CTIO pending\n"); 1992 ccb->ccb_h.status = CAM_REQ_INVALID; 1993 splx(s); 1994 return; 1995 } 1996 } 1997 1998 if (SLIST_FIRST(&lstate->accept_tios) != NULL) { 1999 printf("ATIOs pending\n"); 2000 ccb->ccb_h.status = CAM_REQ_INVALID; 2001 } 2002 2003 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { 2004 printf("INOTs pending\n"); 2005 ccb->ccb_h.status = CAM_REQ_INVALID; 2006 } 2007 2008 if (ccb->ccb_h.status != CAM_REQ_CMP) { 2009 splx(s); 2010 return; 2011 } 2012 2013 xpt_print_path(ccb->ccb_h.path); 2014 printf("Target mode disabled\n"); 2015 xpt_free_path(lstate->path); 2016 free(lstate, M_DEVBUF); 2017 2018 pause_sequencer(ahc); 2019 /* Can we clean up the target too? */ 2020 if (target != CAM_TARGET_WILDCARD) { 2021 tstate->enabled_luns[lun] = NULL; 2022 ahc->enabled_luns--; 2023 for (empty = 1, i = 0; i < 8; i++) 2024 if (tstate->enabled_luns[i] != NULL) { 2025 empty = 0; 2026 break; 2027 } 2028 2029 if (empty) { 2030 ahc_free_tstate(ahc, target, channel, 2031 /*force*/FALSE); 2032 if (ahc->features & AHC_MULTI_TID) { 2033 u_int targid_mask; 2034 2035 targid_mask = ahc_inb(ahc, TARGID) 2036 | (ahc_inb(ahc, TARGID + 1) 2037 << 8); 2038 2039 targid_mask &= ~target_mask; 2040 ahc_outb(ahc, TARGID, targid_mask); 2041 ahc_outb(ahc, TARGID+1, 2042 (targid_mask >> 8)); 2043 ahc_update_scsiid(ahc, targid_mask); 2044 } 2045 } 2046 } else { 2047 2048 ahc->black_hole = NULL; 2049 2050 /* 2051 * We can't allow selections without 2052 * our black hole device. 2053 */ 2054 empty = TRUE; 2055 } 2056 if (ahc->enabled_luns == 0) { 2057 /* Disallow select-in */ 2058 u_int scsiseq; 2059 2060 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 2061 scsiseq &= ~ENSELI; 2062 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 2063 scsiseq = ahc_inb(ahc, SCSISEQ); 2064 scsiseq &= ~ENSELI; 2065 ahc_outb(ahc, SCSISEQ, scsiseq); 2066 } 2067 unpause_sequencer(ahc); 2068 splx(s); 2069 } 2070 } 2071 2072 static void 2073 ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask) 2074 { 2075 u_int scsiid_mask; 2076 u_int scsiid; 2077 2078 if ((ahc->features & AHC_MULTI_TID) == 0) 2079 panic("ahc_update_scsiid called on non-multitid unit\n"); 2080 2081 /* 2082 * Since we will rely on the the TARGID mask 2083 * for selection enables, ensure that OID 2084 * in SCSIID is not set to some other ID 2085 * that we don't want to allow selections on. 2086 */ 2087 if ((ahc->features & AHC_ULTRA2) != 0) 2088 scsiid = ahc_inb(ahc, SCSIID_ULTRA2); 2089 else 2090 scsiid = ahc_inb(ahc, SCSIID); 2091 scsiid_mask = 0x1 << (scsiid & OID); 2092 if ((targid_mask & scsiid_mask) == 0) { 2093 u_int our_id; 2094 2095 /* ffs counts from 1 */ 2096 our_id = ffs(targid_mask); 2097 if (our_id == 0) 2098 our_id = ahc->our_id; 2099 else 2100 our_id--; 2101 scsiid &= TID; 2102 scsiid |= our_id; 2103 } 2104 if ((ahc->features & AHC_ULTRA2) != 0) 2105 ahc_outb(ahc, SCSIID_ULTRA2, scsiid); 2106 else 2107 ahc_outb(ahc, SCSIID, scsiid); 2108 } 2109 2110 static int 2111 ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd) 2112 { 2113 struct tmode_tstate *tstate; 2114 struct tmode_lstate *lstate; 2115 struct ccb_accept_tio *atio; 2116 u_int8_t *byte; 2117 int initiator; 2118 int target; 2119 int lun; 2120 2121 initiator = cmd->initiator_channel >> 4; 2122 target = cmd->targ_id; 2123 lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); 2124 2125 byte = cmd->bytes; 2126 tstate = ahc->enabled_targets[target]; 2127 lstate = NULL; 2128 if (tstate != NULL && lun < 8) 2129 lstate = tstate->enabled_luns[lun]; 2130 2131 /* 2132 * Commands for disabled luns go to the black hole driver. 2133 */ 2134 if (lstate == NULL) 2135 lstate = ahc->black_hole; 2136 2137 atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); 2138 if (atio == NULL) { 2139 ahc->flags |= AHC_TQINFIFO_BLOCKED; 2140 /* 2141 * Wait for more ATIOs from the peripheral driver for this lun. 2142 */ 2143 return (1); 2144 } else 2145 ahc->flags &= ~AHC_TQINFIFO_BLOCKED; 2146 #if 0 2147 printf("Incoming command from %d for %d:%d%s\n", 2148 initiator, target, lun, 2149 lstate == ahc->black_hole ? "(Black Holed)" : ""); 2150 #endif 2151 SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); 2152 2153 if (lstate == ahc->black_hole) { 2154 /* Fill in the wildcards */ 2155 atio->ccb_h.target_id = target; 2156 atio->ccb_h.target_lun = lun; 2157 } 2158 2159 /* 2160 * Package it up and send it off to 2161 * whomever has this lun enabled. 2162 */ 2163 atio->sense_len = 0; 2164 atio->init_id = initiator; 2165 if (byte[0] != 0xFF) { 2166 /* Tag was included */ 2167 atio->tag_action = *byte++; 2168 atio->tag_id = *byte++; 2169 atio->ccb_h.flags = CAM_TAG_ACTION_VALID; 2170 } else { 2171 byte++; 2172 atio->ccb_h.flags = 0; 2173 } 2174 2175 /* Okay. Now determine the cdb size based on the command code */ 2176 switch (*byte >> CMD_GROUP_CODE_SHIFT) { 2177 case 0: 2178 atio->cdb_len = 6; 2179 break; 2180 case 1: 2181 case 2: 2182 atio->cdb_len = 10; 2183 break; 2184 case 4: 2185 atio->cdb_len = 16; 2186 break; 2187 case 5: 2188 atio->cdb_len = 12; 2189 break; 2190 case 3: 2191 default: 2192 /* Only copy the opcode. */ 2193 atio->cdb_len = 1; 2194 printf("Reserved or VU command code type encountered\n"); 2195 break; 2196 } 2197 bcopy(byte, atio->cdb_io.cdb_bytes, atio->cdb_len); 2198 2199 atio->ccb_h.status |= CAM_CDB_RECVD; 2200 2201 if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { 2202 /* 2203 * We weren't allowed to disconnect. 2204 * We're hanging on the bus until a 2205 * continue target I/O comes in response 2206 * to this accept tio. 2207 */ 2208 #if 0 2209 printf("Received Immediate Command %d:%d:%d - %p\n", 2210 initiator, target, lun, ahc->pending_device); 2211 #endif 2212 ahc->pending_device = lstate; 2213 } 2214 xpt_done((union ccb*)atio); 2215 return (0); 2216 } 2217 2218 static void 2219 ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat) 2220 { 2221 struct scb *scb; 2222 struct ahc_devinfo devinfo; 2223 2224 ahc_fetch_devinfo(ahc, &devinfo); 2225 2226 /* 2227 * Clear the upper byte that holds SEQINT status 2228 * codes and clear the SEQINT bit. We will unpause 2229 * the sequencer, if appropriate, after servicing 2230 * the request. 2231 */ 2232 ahc_outb(ahc, CLRINT, CLRSEQINT); 2233 switch (intstat & SEQINT_MASK) { 2234 case NO_MATCH: 2235 { 2236 /* Ensure we don't leave the selection hardware on */ 2237 ahc_outb(ahc, SCSISEQ, 2238 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 2239 2240 printf("%s:%c:%d: no active SCB for reconnecting " 2241 "target - issuing BUS DEVICE RESET\n", 2242 ahc_name(ahc), devinfo.channel, devinfo.target); 2243 printf("SAVED_TCL == 0x%x, ARG_1 == 0x%x, SEQ_FLAGS == 0x%x\n", 2244 ahc_inb(ahc, SAVED_TCL), ahc_inb(ahc, ARG_1), 2245 ahc_inb(ahc, SEQ_FLAGS)); 2246 ahc->msgout_buf[0] = MSG_BUS_DEV_RESET; 2247 ahc->msgout_len = 1; 2248 ahc->msgout_index = 0; 2249 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2250 ahc_outb(ahc, MSG_OUT, HOST_MSG); 2251 ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, LASTPHASE) | ATNO); 2252 break; 2253 } 2254 case UPDATE_TMSG_REQ: 2255 ahc_outb(ahc, TARGET_MSG_REQUEST, ahc->targ_msg_req & 0xFF); 2256 ahc_outb(ahc, TARGET_MSG_REQUEST + 1, 2257 (ahc->targ_msg_req >> 8) & 0xFF); 2258 ahc_outb(ahc, HS_MAILBOX, 0); 2259 break; 2260 case SEND_REJECT: 2261 { 2262 u_int rejbyte = ahc_inb(ahc, ACCUM); 2263 printf("%s:%c:%d: Warning - unknown message received from " 2264 "target (0x%x). Rejecting\n", 2265 ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte); 2266 break; 2267 } 2268 case NO_IDENT: 2269 { 2270 /* 2271 * The reconnecting target either did not send an identify 2272 * message, or did, but we didn't find and SCB to match and 2273 * before it could respond to our ATN/abort, it hit a dataphase. 2274 * The only safe thing to do is to blow it away with a bus 2275 * reset. 2276 */ 2277 int found; 2278 2279 printf("%s:%c:%d: Target did not send an IDENTIFY message. " 2280 "LASTPHASE = 0x%x, SAVED_TCL == 0x%x\n", 2281 ahc_name(ahc), devinfo.channel, devinfo.target, 2282 ahc_inb(ahc, LASTPHASE), ahc_inb(ahc, SAVED_TCL)); 2283 found = ahc_reset_channel(ahc, devinfo.channel, 2284 /*initiate reset*/TRUE); 2285 printf("%s: Issued Channel %c Bus Reset. " 2286 "%d SCBs aborted\n", ahc_name(ahc), devinfo.channel, 2287 found); 2288 return; 2289 } 2290 case BAD_PHASE: 2291 { 2292 u_int lastphase; 2293 2294 lastphase = ahc_inb(ahc, LASTPHASE); 2295 if (lastphase == P_BUSFREE) { 2296 printf("%s:%c:%d: Missed busfree. Curphase = 0x%x\n", 2297 ahc_name(ahc), devinfo.channel, devinfo.target, 2298 ahc_inb(ahc, SCSISIGI)); 2299 restart_sequencer(ahc); 2300 return; 2301 } else { 2302 printf("%s:%c:%d: unknown scsi bus phase %x. " 2303 "Attempting to continue\n", 2304 ahc_name(ahc), devinfo.channel, devinfo.target, 2305 ahc_inb(ahc, SCSISIGI)); 2306 } 2307 break; 2308 } 2309 case BAD_STATUS: 2310 { 2311 u_int scb_index; 2312 struct hardware_scb *hscb; 2313 struct ccb_scsiio *csio; 2314 /* 2315 * The sequencer will notify us when a command 2316 * has an error that would be of interest to 2317 * the kernel. This allows us to leave the sequencer 2318 * running in the common case of command completes 2319 * without error. The sequencer will already have 2320 * dma'd the SCB back up to us, so we can reference 2321 * the in kernel copy directly. 2322 */ 2323 scb_index = ahc_inb(ahc, SCB_TAG); 2324 scb = &ahc->scb_data->scbarray[scb_index]; 2325 2326 /* 2327 * Set the default return value to 0 (don't 2328 * send sense). The sense code will change 2329 * this if needed. 2330 */ 2331 ahc_outb(ahc, RETURN_1, 0); 2332 if (!(scb_index < ahc->scb_data->numscbs 2333 && (scb->flags & SCB_ACTIVE) != 0)) { 2334 printf("%s:%c:%d: ahc_intr - referenced scb " 2335 "not valid during seqint 0x%x scb(%d)\n", 2336 ahc_name(ahc), devinfo.channel, 2337 devinfo.target, intstat, scb_index); 2338 goto unpause; 2339 } 2340 2341 hscb = scb->hscb; 2342 2343 /* Don't want to clobber the original sense code */ 2344 if ((scb->flags & SCB_SENSE) != 0) { 2345 /* 2346 * Clear the SCB_SENSE Flag and have 2347 * the sequencer do a normal command 2348 * complete. 2349 */ 2350 scb->flags &= ~SCB_SENSE; 2351 ahcsetccbstatus(scb->ccb, CAM_AUTOSENSE_FAIL); 2352 break; 2353 } 2354 ahcsetccbstatus(scb->ccb, CAM_SCSI_STATUS_ERROR); 2355 /* Freeze the queue unit the client sees the error. */ 2356 ahc_freeze_devq(ahc, scb->ccb->ccb_h.path); 2357 ahc_freeze_ccb(scb->ccb); 2358 csio = &scb->ccb->csio; 2359 csio->scsi_status = hscb->status; 2360 switch (hscb->status) { 2361 case SCSI_STATUS_OK: 2362 printf("%s: Interrupted for staus of 0???\n", 2363 ahc_name(ahc)); 2364 break; 2365 case SCSI_STATUS_CMD_TERMINATED: 2366 case SCSI_STATUS_CHECK_COND: 2367 #ifdef AHC_DEBUG 2368 if (ahc_debug & AHC_SHOWSENSE) { 2369 xpt_print_path(csio->ccb_h.path); 2370 printf("SCB %d: requests Check Status\n", 2371 scb->hscb->tag); 2372 } 2373 #endif 2374 2375 if ((csio->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) { 2376 struct ahc_dma_seg *sg; 2377 struct scsi_sense *sc; 2378 struct ahc_initiator_tinfo *tinfo; 2379 struct tmode_tstate *tstate; 2380 2381 sg = scb->sg_list; 2382 sc = (struct scsi_sense *)(&hscb->cmdstore); 2383 /* 2384 * Save off the residual if there is one. 2385 */ 2386 if (hscb->residual_SG_count != 0) 2387 ahc_calc_residual(scb); 2388 else 2389 scb->ccb->csio.resid = 0; 2390 2391 #ifdef AHC_DEBUG 2392 if (ahc_debug & AHC_SHOWSENSE) { 2393 xpt_print_path(csio->ccb_h.path); 2394 printf("Sending Sense\n"); 2395 } 2396 #endif 2397 sg->addr = ahc->scb_data->sense_busaddr 2398 + (hscb->tag*sizeof(struct scsi_sense_data)); 2399 sg->len = MIN(sizeof(struct scsi_sense_data), 2400 csio->sense_len); 2401 2402 sc->opcode = REQUEST_SENSE; 2403 sc->byte2 = SCB_LUN(scb) << 5; 2404 sc->unused[0] = 0; 2405 sc->unused[1] = 0; 2406 sc->length = sg->len; 2407 sc->control = 0; 2408 2409 /* 2410 * Would be nice to preserve DISCENB here, 2411 * but due to the way we page SCBs, we can't. 2412 */ 2413 hscb->control = 0; 2414 2415 /* 2416 * This request sense could be because the 2417 * the device lost power or in some other 2418 * way has lost our transfer negotiations. 2419 * Renegotiate if appropriate. Unit attention 2420 * errors will be reported before any data 2421 * phases occur. 2422 */ 2423 ahc_calc_residual(scb); 2424 if (scb->ccb->csio.resid 2425 == scb->ccb->csio.dxfer_len) { 2426 tinfo = ahc_fetch_transinfo(ahc, 2427 devinfo.channel, 2428 devinfo.our_scsiid, 2429 devinfo.target, 2430 &tstate); 2431 ahc_update_target_msg_request(ahc, 2432 &devinfo, 2433 tinfo, 2434 /*force*/TRUE, 2435 /*paused*/TRUE); 2436 } 2437 hscb->status = 0; 2438 hscb->SG_count = 1; 2439 hscb->SG_pointer = scb->sg_list_phys; 2440 hscb->data = sg->addr; 2441 hscb->datalen = sg->len; 2442 hscb->cmdpointer = hscb->cmdstore_busaddr; 2443 hscb->cmdlen = sizeof(*sc); 2444 scb->sg_count = hscb->SG_count; 2445 scb->flags |= SCB_SENSE; 2446 /* 2447 * Ensure the target is busy since this 2448 * will be an untagged request. 2449 */ 2450 ahc_busy_tcl(ahc, scb); 2451 ahc_outb(ahc, RETURN_1, SEND_SENSE); 2452 2453 /* 2454 * Ensure we have enough time to actually 2455 * retrieve the sense. 2456 */ 2457 untimeout(ahc_timeout, (caddr_t)scb, 2458 scb->ccb->ccb_h.timeout_ch); 2459 scb->ccb->ccb_h.timeout_ch = 2460 timeout(ahc_timeout, (caddr_t)scb, 5 * hz); 2461 } 2462 break; 2463 case SCSI_STATUS_BUSY: 2464 case SCSI_STATUS_QUEUE_FULL: 2465 /* 2466 * Requeue any transactions that haven't been 2467 * sent yet. 2468 */ 2469 ahc_freeze_devq(ahc, scb->ccb->ccb_h.path); 2470 ahc_freeze_ccb(scb->ccb); 2471 break; 2472 } 2473 break; 2474 } 2475 case TRACE_POINT: 2476 { 2477 printf("SSTAT2 = 0x%x DFCNTRL = 0x%x\n", ahc_inb(ahc, SSTAT2), 2478 ahc_inb(ahc, DFCNTRL)); 2479 printf("SSTAT3 = 0x%x DSTATUS = 0x%x\n", ahc_inb(ahc, SSTAT3), 2480 ahc_inb(ahc, DFSTATUS)); 2481 printf("SSTAT0 = 0x%x, SCB_DATACNT = 0x%x\n", 2482 ahc_inb(ahc, SSTAT0), 2483 ahc_inb(ahc, SCB_DATACNT)); 2484 break; 2485 } 2486 case HOST_MSG_LOOP: 2487 { 2488 /* 2489 * The sequencer has encountered a message phase 2490 * that requires host assistance for completion. 2491 * While handling the message phase(s), we will be 2492 * notified by the sequencer after each byte is 2493 * transfered so we can track bus phases. 2494 * 2495 * If this is the first time we've seen a HOST_MSG_LOOP, 2496 * initialize the state of the host message loop. 2497 */ 2498 if (ahc->msg_type == MSG_TYPE_NONE) { 2499 u_int bus_phase; 2500 2501 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 2502 if (bus_phase != P_MESGIN 2503 && bus_phase != P_MESGOUT) { 2504 printf("ahc_intr: HOST_MSG_LOOP bad " 2505 "phase 0x%x\n", 2506 bus_phase); 2507 /* 2508 * Probably transitioned to bus free before 2509 * we got here. Just punt the message. 2510 */ 2511 ahc_clear_intstat(ahc); 2512 restart_sequencer(ahc); 2513 } 2514 2515 if (devinfo.role == ROLE_INITIATOR) { 2516 struct scb *scb; 2517 u_int scb_index; 2518 2519 scb_index = ahc_inb(ahc, SCB_TAG); 2520 scb = &ahc->scb_data->scbarray[scb_index]; 2521 2522 if (bus_phase == P_MESGOUT) 2523 ahc_setup_initiator_msgout(ahc, 2524 &devinfo, 2525 scb); 2526 else { 2527 ahc->msg_type = 2528 MSG_TYPE_INITIATOR_MSGIN; 2529 ahc->msgin_index = 0; 2530 } 2531 } else { 2532 if (bus_phase == P_MESGOUT) { 2533 ahc->msg_type = 2534 MSG_TYPE_TARGET_MSGOUT; 2535 ahc->msgin_index = 0; 2536 } else 2537 /* XXX Ever executed??? */ 2538 ahc_setup_target_msgin(ahc, &devinfo); 2539 } 2540 } 2541 2542 /* Pass a NULL path so that handlers generate their own */ 2543 ahc_handle_message_phase(ahc, /*path*/NULL); 2544 break; 2545 } 2546 case PERR_DETECTED: 2547 { 2548 /* 2549 * If we've cleared the parity error interrupt 2550 * but the sequencer still believes that SCSIPERR 2551 * is true, it must be that the parity error is 2552 * for the currently presented byte on the bus, 2553 * and we are not in a phase (data-in) where we will 2554 * eventually ack this byte. Ack the byte and 2555 * throw it away in the hope that the target will 2556 * take us to message out to deliver the appropriate 2557 * error message. 2558 */ 2559 if ((intstat & SCSIINT) == 0 2560 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) { 2561 u_int curphase; 2562 2563 /* 2564 * The hardware will only let you ack bytes 2565 * if the expected phase in SCSISIGO matches 2566 * the current phase. Make sure this is 2567 * currently the case. 2568 */ 2569 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 2570 ahc_outb(ahc, LASTPHASE, curphase); 2571 ahc_outb(ahc, SCSISIGO, curphase); 2572 ahc_inb(ahc, SCSIDATL); 2573 } 2574 break; 2575 } 2576 case DATA_OVERRUN: 2577 { 2578 /* 2579 * When the sequencer detects an overrun, it 2580 * places the controller in "BITBUCKET" mode 2581 * and allows the target to complete its transfer. 2582 * Unfortunately, none of the counters get updated 2583 * when the controller is in this mode, so we have 2584 * no way of knowing how large the overrun was. 2585 */ 2586 u_int scbindex = ahc_inb(ahc, SCB_TAG); 2587 u_int lastphase = ahc_inb(ahc, LASTPHASE); 2588 int i; 2589 2590 scb = &ahc->scb_data->scbarray[scbindex]; 2591 for (i = 0; i < num_phases; i++) { 2592 if (lastphase == phase_table[i].phase) 2593 break; 2594 } 2595 xpt_print_path(scb->ccb->ccb_h.path); 2596 printf("data overrun detected %s." 2597 " Tag == 0x%x.\n", 2598 phase_table[i].phasemsg, 2599 scb->hscb->tag); 2600 xpt_print_path(scb->ccb->ccb_h.path); 2601 printf("%s seen Data Phase. Length = %d. NumSGs = %d.\n", 2602 ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", 2603 scb->ccb->csio.dxfer_len, scb->sg_count); 2604 if (scb->sg_count > 0) { 2605 for (i = 0; i < scb->sg_count; i++) { 2606 printf("sg[%d] - Addr 0x%x : Length %d\n", 2607 i, 2608 scb->sg_list[i].addr, 2609 scb->sg_list[i].len); 2610 } 2611 } 2612 /* 2613 * Set this and it will take affect when the 2614 * target does a command complete. 2615 */ 2616 ahc_freeze_devq(ahc, scb->ccb->ccb_h.path); 2617 ahcsetccbstatus(scb->ccb, CAM_DATA_RUN_ERR); 2618 ahc_freeze_ccb(scb->ccb); 2619 break; 2620 } 2621 case TRACEPOINT: 2622 { 2623 printf("TRACEPOINT: RETURN_2 = %d\n", ahc_inb(ahc, RETURN_2)); 2624 #if 0 2625 printf("SSTAT1 == 0x%x\n", ahc_inb(ahc, SSTAT1)); 2626 printf("SSTAT0 == 0x%x\n", ahc_inb(ahc, SSTAT0)); 2627 printf(", SCSISIGI == 0x%x\n", ahc_inb(ahc, SCSISIGI)); 2628 printf("TRACEPOINT: CCHCNT = %d, SG_COUNT = %d\n", 2629 ahc_inb(ahc, CCHCNT), ahc_inb(ahc, SG_COUNT)); 2630 printf("TRACEPOINT: SCB_TAG = %d\n", ahc_inb(ahc, SCB_TAG)); 2631 printf("TRACEPOINT1: CCHADDR = %d, CCHCNT = %d, SCBPTR = %d\n", 2632 ahc_inb(ahc, CCHADDR) 2633 | (ahc_inb(ahc, CCHADDR+1) << 8) 2634 | (ahc_inb(ahc, CCHADDR+2) << 16) 2635 | (ahc_inb(ahc, CCHADDR+3) << 24), 2636 ahc_inb(ahc, CCHCNT) 2637 | (ahc_inb(ahc, CCHCNT+1) << 8) 2638 | (ahc_inb(ahc, CCHCNT+2) << 16), 2639 ahc_inb(ahc, SCBPTR)); 2640 printf("TRACEPOINT: WAITING_SCBH = %d\n", ahc_inb(ahc, WAITING_SCBH)); 2641 printf("TRACEPOINT: SCB_TAG = %d\n", ahc_inb(ahc, SCB_TAG)); 2642 #endif 2643 break; 2644 } 2645 #if NOT_YET 2646 /* XXX Fill these in later */ 2647 case MESG_BUFFER_BUSY: 2648 break; 2649 case MSGIN_PHASEMIS: 2650 break; 2651 #endif 2652 default: 2653 printf("ahc_intr: seqint, " 2654 "intstat == 0x%x, scsisigi = 0x%x\n", 2655 intstat, ahc_inb(ahc, SCSISIGI)); 2656 break; 2657 } 2658 2659 unpause: 2660 /* 2661 * The sequencer is paused immediately on 2662 * a SEQINT, so we should restart it when 2663 * we're done. 2664 */ 2665 unpause_sequencer(ahc); 2666 } 2667 2668 static void 2669 ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat) 2670 { 2671 u_int scb_index; 2672 u_int status; 2673 struct scb *scb; 2674 char cur_channel; 2675 char intr_channel; 2676 2677 if ((ahc->features & AHC_TWIN) != 0 2678 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0)) 2679 cur_channel = 'B'; 2680 else 2681 cur_channel = 'A'; 2682 intr_channel = cur_channel; 2683 2684 status = ahc_inb(ahc, SSTAT1); 2685 if (status == 0) { 2686 if ((ahc->features & AHC_TWIN) != 0) { 2687 /* Try the other channel */ 2688 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 2689 status = ahc_inb(ahc, SSTAT1); 2690 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 2691 intr_channel = (cur_channel == 'A') ? 'B' : 'A'; 2692 } 2693 if (status == 0) { 2694 printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc)); 2695 return; 2696 } 2697 } 2698 2699 scb_index = ahc_inb(ahc, SCB_TAG); 2700 if (scb_index < ahc->scb_data->numscbs) { 2701 scb = &ahc->scb_data->scbarray[scb_index]; 2702 if ((scb->flags & SCB_ACTIVE) == 0 2703 || (ahc_inb(ahc, SEQ_FLAGS) & IDENTIFY_SEEN) == 0) 2704 scb = NULL; 2705 } else 2706 scb = NULL; 2707 2708 if ((status & SCSIRSTI) != 0) { 2709 printf("%s: Someone reset channel %c\n", 2710 ahc_name(ahc), intr_channel); 2711 ahc_reset_channel(ahc, intr_channel, /* Initiate Reset */FALSE); 2712 } else if ((status & SCSIPERR) != 0) { 2713 /* 2714 * Determine the bus phase and queue an appropriate message. 2715 * SCSIPERR is latched true as soon as a parity error 2716 * occurs. If the sequencer acked the transfer that 2717 * caused the parity error and the currently presented 2718 * transfer on the bus has correct parity, SCSIPERR will 2719 * be cleared by CLRSCSIPERR. Use this to determine if 2720 * we should look at the last phase the sequencer recorded, 2721 * or the current phase presented on the bus. 2722 */ 2723 u_int mesg_out; 2724 u_int curphase; 2725 u_int errorphase; 2726 u_int lastphase; 2727 int i; 2728 2729 lastphase = ahc_inb(ahc, LASTPHASE); 2730 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 2731 ahc_outb(ahc, CLRSINT1, CLRSCSIPERR); 2732 /* 2733 * For all phases save DATA, the sequencer won't 2734 * automatically ack a byte that has a parity error 2735 * in it. So the only way that the current phase 2736 * could be 'data-in' is if the parity error is for 2737 * an already acked byte in the data phase. During 2738 * synchronous data-in transfers, we may actually 2739 * ack bytes before latching the current phase in 2740 * LASTPHASE, leading to the discrepancy between 2741 * curphase and lastphase. 2742 */ 2743 if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0 2744 || curphase == P_DATAIN) 2745 errorphase = curphase; 2746 else 2747 errorphase = lastphase; 2748 2749 for (i = 0; i < num_phases; i++) { 2750 if (errorphase == phase_table[i].phase) 2751 break; 2752 } 2753 mesg_out = phase_table[i].mesg_out; 2754 if (scb != NULL) 2755 xpt_print_path(scb->ccb->ccb_h.path); 2756 else 2757 printf("%s:%c:%d: ", ahc_name(ahc), 2758 intr_channel, 2759 TCL_TARGET(ahc_inb(ahc, SAVED_TCL))); 2760 2761 printf("parity error detected %s. " 2762 "SEQADDR(0x%x) SCSIRATE(0x%x)\n", 2763 phase_table[i].phasemsg, 2764 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8), 2765 ahc_inb(ahc, SCSIRATE)); 2766 2767 /* 2768 * We've set the hardware to assert ATN if we 2769 * get a parity error on "in" phases, so all we 2770 * need to do is stuff the message buffer with 2771 * the appropriate message. "In" phases have set 2772 * mesg_out to something other than MSG_NOP. 2773 */ 2774 if (mesg_out != MSG_NOOP) { 2775 if (ahc->msg_type != MSG_TYPE_NONE) 2776 ahc->send_msg_perror = TRUE; 2777 else 2778 ahc_outb(ahc, MSG_OUT, mesg_out); 2779 } 2780 ahc_outb(ahc, CLRINT, CLRSCSIINT); 2781 unpause_sequencer(ahc); 2782 } else if ((status & BUSFREE) != 0 2783 && (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) { 2784 /* 2785 * First look at what phase we were last in. 2786 * If its message out, chances are pretty good 2787 * that the busfree was in response to one of 2788 * our abort requests. 2789 */ 2790 u_int lastphase = ahc_inb(ahc, LASTPHASE); 2791 u_int saved_tcl = ahc_inb(ahc, SAVED_TCL); 2792 u_int target = TCL_TARGET(saved_tcl); 2793 u_int initiator_role_id = TCL_SCSI_ID(ahc, saved_tcl); 2794 char channel = TCL_CHANNEL(ahc, saved_tcl); 2795 int printerror = 1; 2796 2797 ahc_outb(ahc, SCSISEQ, 2798 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 2799 if (lastphase == P_MESGOUT) { 2800 u_int message; 2801 u_int tag; 2802 2803 message = ahc->msgout_buf[ahc->msgout_index - 1]; 2804 tag = SCB_LIST_NULL; 2805 switch (message) { 2806 case MSG_ABORT_TAG: 2807 tag = scb->hscb->tag; 2808 /* FALLTRHOUGH */ 2809 case MSG_ABORT: 2810 xpt_print_path(scb->ccb->ccb_h.path); 2811 printf("SCB %d - Abort %s Completed.\n", 2812 scb->hscb->tag, tag == SCB_LIST_NULL ? 2813 "" : "Tag"); 2814 ahc_abort_scbs(ahc, target, channel, 2815 TCL_LUN(saved_tcl), tag, 2816 ROLE_INITIATOR, 2817 CAM_REQ_ABORTED); 2818 printerror = 0; 2819 break; 2820 case MSG_BUS_DEV_RESET: 2821 { 2822 struct ahc_devinfo devinfo; 2823 2824 /* 2825 * Don't mark the user's request for this BDR 2826 * as completing with CAM_BDR_SENT. CAM3 2827 * specifies CAM_REQ_CMP. 2828 */ 2829 if (scb != NULL 2830 && scb->ccb->ccb_h.func_code == XPT_RESET_DEV 2831 && ahc_match_scb(scb, target, channel, 2832 TCL_LUN(saved_tcl), 2833 SCB_LIST_NULL, 2834 ROLE_INITIATOR)) { 2835 ahcsetccbstatus(scb->ccb, CAM_REQ_CMP); 2836 } 2837 ahc_compile_devinfo(&devinfo, 2838 initiator_role_id, 2839 target, 2840 TCL_LUN(saved_tcl), 2841 channel, 2842 ROLE_INITIATOR); 2843 ahc_handle_devreset(ahc, &devinfo, 2844 CAM_BDR_SENT, AC_SENT_BDR, 2845 "Bus Device Reset", 2846 /*verbose_level*/0); 2847 printerror = 0; 2848 break; 2849 } 2850 default: 2851 break; 2852 } 2853 } 2854 if (printerror != 0) { 2855 int i; 2856 2857 if (scb != NULL) { 2858 u_int tag; 2859 2860 if ((scb->hscb->control & TAG_ENB) != 0) 2861 tag = scb->hscb->tag; 2862 else 2863 tag = SCB_LIST_NULL; 2864 ahc_abort_scbs(ahc, target, channel, 2865 SCB_LUN(scb), tag, 2866 ROLE_INITIATOR, 2867 CAM_UNEXP_BUSFREE); 2868 xpt_print_path(scb->ccb->ccb_h.path); 2869 } else { 2870 /* 2871 * We had not fully identified this connection, 2872 * so we cannot abort anything. 2873 */ 2874 printf("%s: ", ahc_name(ahc)); 2875 } 2876 for (i = 0; i < num_phases; i++) { 2877 if (lastphase == phase_table[i].phase) 2878 break; 2879 } 2880 printf("Unexpected busfree %s\n" 2881 "SEQADDR == 0x%x\n", 2882 phase_table[i].phasemsg, ahc_inb(ahc, SEQADDR0) 2883 | (ahc_inb(ahc, SEQADDR1) << 8)); 2884 } 2885 ahc_clear_msg_state(ahc); 2886 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 2887 ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR); 2888 ahc_outb(ahc, CLRINT, CLRSCSIINT); 2889 restart_sequencer(ahc); 2890 } else if ((status & SELTO) != 0) { 2891 u_int scbptr; 2892 2893 scbptr = ahc_inb(ahc, WAITING_SCBH); 2894 ahc_outb(ahc, SCBPTR, scbptr); 2895 scb_index = ahc_inb(ahc, SCB_TAG); 2896 2897 if (scb_index < ahc->scb_data->numscbs) { 2898 scb = &ahc->scb_data->scbarray[scb_index]; 2899 if ((scb->flags & SCB_ACTIVE) == 0) 2900 scb = NULL; 2901 } else 2902 scb = NULL; 2903 2904 if (scb == NULL) { 2905 printf("%s: ahc_intr - referenced scb not " 2906 "valid during SELTO scb(%d, %d)\n", 2907 ahc_name(ahc), scbptr, scb_index); 2908 } else { 2909 u_int tag; 2910 2911 tag = SCB_LIST_NULL; 2912 if ((scb->hscb->control & MSG_SIMPLE_Q_TAG) != 0) 2913 tag = scb->hscb->tag; 2914 2915 ahc_abort_scbs(ahc, SCB_TARGET(scb), SCB_CHANNEL(scb), 2916 SCB_LUN(scb), tag, 2917 ROLE_INITIATOR, CAM_SEL_TIMEOUT); 2918 } 2919 /* Stop the selection */ 2920 ahc_outb(ahc, SCSISEQ, 0); 2921 2922 /* No more pending messages */ 2923 ahc_clear_msg_state(ahc); 2924 2925 /* 2926 * Although the driver does not care about the 2927 * 'Selection in Progress' status bit, the busy 2928 * LED does. SELINGO is only cleared by a sucessful 2929 * selection, so we must manually clear it to ensure 2930 * the LED turns off just incase no future successful 2931 * selections occur (e.g. no devices on the bus). 2932 */ 2933 ahc_outb(ahc, CLRSINT0, CLRSELINGO); 2934 2935 /* Clear interrupt state */ 2936 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); 2937 ahc_outb(ahc, CLRINT, CLRSCSIINT); 2938 restart_sequencer(ahc); 2939 } else { 2940 xpt_print_path(scb->ccb->ccb_h.path); 2941 printf("Unknown SCSIINT. Status = 0x%x\n", status); 2942 ahc_outb(ahc, CLRSINT1, status); 2943 ahc_outb(ahc, CLRINT, CLRSCSIINT); 2944 unpause_sequencer(ahc); 2945 } 2946 } 2947 2948 static void 2949 ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2950 { 2951 /* 2952 * We need to initiate transfer negotiations. 2953 * If our current and goal settings are identical, 2954 * we want to renegotiate due to a check condition. 2955 */ 2956 struct ahc_initiator_tinfo *tinfo; 2957 struct tmode_tstate *tstate; 2958 int dowide; 2959 int dosync; 2960 2961 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 2962 devinfo->target, &tstate); 2963 dowide = tinfo->current.width != tinfo->goal.width; 2964 dosync = tinfo->current.period != tinfo->goal.period; 2965 2966 if (!dowide && !dosync) { 2967 dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; 2968 dosync = tinfo->goal.period != 0; 2969 } 2970 2971 if (dowide) { 2972 ahc_construct_wdtr(ahc, tinfo->goal.width); 2973 } else if (dosync) { 2974 struct ahc_syncrate *rate; 2975 u_int period; 2976 u_int offset; 2977 2978 period = tinfo->goal.period; 2979 rate = ahc_devlimited_syncrate(ahc, &period); 2980 offset = tinfo->goal.offset; 2981 ahc_validate_offset(ahc, rate, &offset, 2982 tinfo->current.width); 2983 ahc_construct_sdtr(ahc, period, offset); 2984 } else { 2985 panic("ahc_intr: AWAITING_MSG for negotiation, " 2986 "but no negotiation needed\n"); 2987 } 2988 } 2989 2990 static void 2991 ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2992 struct scb *scb) 2993 { 2994 /* 2995 * To facilitate adding multiple messages together, 2996 * each routine should increment the index and len 2997 * variables instead of setting them explicitly. 2998 */ 2999 ahc->msgout_index = 0; 3000 ahc->msgout_len = 0; 3001 3002 if ((scb->flags & SCB_DEVICE_RESET) == 0 3003 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) { 3004 u_int identify_msg; 3005 3006 identify_msg = MSG_IDENTIFYFLAG | SCB_LUN(scb); 3007 if ((scb->hscb->control & DISCENB) != 0) 3008 identify_msg |= MSG_IDENTIFY_DISCFLAG; 3009 ahc->msgout_buf[ahc->msgout_index++] = identify_msg; 3010 ahc->msgout_len++; 3011 3012 if ((scb->hscb->control & TAG_ENB) != 0) { 3013 ahc->msgout_buf[ahc->msgout_index++] = 3014 scb->ccb->csio.tag_action; 3015 ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag; 3016 ahc->msgout_len += 2; 3017 } 3018 } 3019 3020 if (scb->flags & SCB_DEVICE_RESET) { 3021 ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET; 3022 ahc->msgout_len++; 3023 xpt_print_path(scb->ccb->ccb_h.path); 3024 printf("Bus Device Reset Message Sent\n"); 3025 } else if (scb->flags & SCB_ABORT) { 3026 if ((scb->hscb->control & TAG_ENB) != 0) 3027 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG; 3028 else 3029 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT; 3030 ahc->msgout_len++; 3031 xpt_print_path(scb->ccb->ccb_h.path); 3032 printf("Abort Message Sent\n"); 3033 } else if ((ahc->targ_msg_req & devinfo->target_mask) != 0) { 3034 ahc_build_transfer_msg(ahc, devinfo); 3035 } else { 3036 printf("ahc_intr: AWAITING_MSG for an SCB that " 3037 "does not have a waiting message"); 3038 panic("SCB = %d, SCB Control = %x, MSG_OUT = %x " 3039 "SCB flags = %x", scb->hscb->tag, scb->hscb->control, 3040 ahc_inb(ahc, MSG_OUT), scb->flags); 3041 } 3042 3043 /* 3044 * Clear the MK_MESSAGE flag from the SCB so we aren't 3045 * asked to send this message again. 3046 */ 3047 ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE); 3048 ahc->msgout_index = 0; 3049 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 3050 } 3051 3052 static void 3053 ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 3054 { 3055 /* 3056 * To facilitate adding multiple messages together, 3057 * each routine should increment the index and len 3058 * variables instead of setting them explicitly. 3059 */ 3060 ahc->msgout_index = 0; 3061 ahc->msgout_len = 0; 3062 3063 if ((ahc->targ_msg_req & devinfo->target_mask) != 0) 3064 ahc_build_transfer_msg(ahc, devinfo); 3065 else 3066 panic("ahc_intr: AWAITING target message with no message"); 3067 3068 ahc->msgout_index = 0; 3069 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 3070 } 3071 3072 static int 3073 ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 3074 { 3075 /* 3076 * What we care about here is if we had an 3077 * outstanding SDTR or WDTR message for this 3078 * target. If we did, this is a signal that 3079 * the target is refusing negotiation. 3080 */ 3081 struct scb *scb; 3082 u_int scb_index; 3083 u_int last_msg; 3084 int response = 0; 3085 3086 scb_index = ahc_inb(ahc, SCB_TAG); 3087 scb = &ahc->scb_data->scbarray[scb_index]; 3088 3089 /* Might be necessary */ 3090 last_msg = ahc_inb(ahc, LAST_MSG); 3091 3092 if (ahc_sent_msg(ahc, MSG_EXT_WDTR, /*full*/FALSE)) { 3093 struct ahc_initiator_tinfo *tinfo; 3094 struct tmode_tstate *tstate; 3095 3096 /* note 8bit xfers */ 3097 printf("%s:%c:%d: refuses WIDE negotiation. Using " 3098 "8bit transfers\n", ahc_name(ahc), 3099 devinfo->channel, devinfo->target); 3100 ahc_set_width(ahc, devinfo, scb->ccb->ccb_h.path, 3101 MSG_EXT_WDTR_BUS_8_BIT, 3102 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3103 /*paused*/TRUE); 3104 /* 3105 * No need to clear the sync rate. If the target 3106 * did not accept the command, our syncrate is 3107 * unaffected. If the target started the negotiation, 3108 * but rejected our response, we already cleared the 3109 * sync rate before sending our WDTR. 3110 */ 3111 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, 3112 devinfo->our_scsiid, 3113 devinfo->target, &tstate); 3114 if (tinfo->goal.period) { 3115 u_int period; 3116 3117 /* Start the sync negotiation */ 3118 period = tinfo->goal.period; 3119 ahc_devlimited_syncrate(ahc, &period); 3120 ahc->msgout_index = 0; 3121 ahc->msgout_len = 0; 3122 ahc_construct_sdtr(ahc, period, tinfo->goal.offset); 3123 ahc->msgout_index = 0; 3124 response = 1; 3125 } 3126 } else if (ahc_sent_msg(ahc, MSG_EXT_SDTR, /*full*/FALSE)) { 3127 /* note asynch xfers and clear flag */ 3128 ahc_set_syncrate(ahc, devinfo, scb->ccb->ccb_h.path, 3129 /*syncrate*/NULL, /*period*/0, 3130 /*offset*/0, 3131 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3132 /*paused*/TRUE); 3133 printf("%s:%c:%d: refuses synchronous negotiation. " 3134 "Using asynchronous transfers\n", 3135 ahc_name(ahc), 3136 devinfo->channel, devinfo->target); 3137 } else if ((scb->hscb->control & MSG_SIMPLE_Q_TAG) != 0) { 3138 struct ccb_trans_settings neg; 3139 3140 printf("%s:%c:%d: refuses tagged commands. Performing " 3141 "non-tagged I/O\n", ahc_name(ahc), 3142 devinfo->channel, devinfo->target); 3143 3144 ahc_set_tags(ahc, devinfo, FALSE); 3145 neg.flags = 0; 3146 neg.valid = CCB_TRANS_TQ_VALID; 3147 xpt_setup_ccb(&neg.ccb_h, scb->ccb->ccb_h.path, /*priority*/1); 3148 xpt_async(AC_TRANSFER_NEG, scb->ccb->ccb_h.path, &neg); 3149 3150 /* 3151 * Resend the identify for this CCB as the target 3152 * may believe that the selection is invalid otherwise. 3153 */ 3154 ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) 3155 & ~MSG_SIMPLE_Q_TAG); 3156 scb->hscb->control &= ~MSG_SIMPLE_Q_TAG; 3157 scb->ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; 3158 ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG); 3159 ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, SCSISIGO) | ATNO); 3160 3161 /* 3162 * Requeue all tagged commands for this target 3163 * currently in our posession so they can be 3164 * converted to untagged commands. 3165 */ 3166 ahc_search_qinfifo(ahc, SCB_TARGET(scb), SCB_CHANNEL(scb), 3167 SCB_LUN(scb), /*tag*/SCB_LIST_NULL, 3168 ROLE_INITIATOR, CAM_REQUEUE_REQ, 3169 SEARCH_COMPLETE); 3170 } else { 3171 /* 3172 * Otherwise, we ignore it. 3173 */ 3174 printf("%s:%c:%d: Message reject for %x -- ignored\n", 3175 ahc_name(ahc), devinfo->channel, devinfo->target, 3176 last_msg); 3177 } 3178 return (response); 3179 } 3180 3181 static void 3182 ahc_clear_msg_state(struct ahc_softc *ahc) 3183 { 3184 ahc->msgout_len = 0; 3185 ahc->msgin_index = 0; 3186 ahc->msg_type = MSG_TYPE_NONE; 3187 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 3188 } 3189 3190 static void 3191 ahc_handle_message_phase(struct ahc_softc *ahc, struct cam_path *path) 3192 { 3193 struct ahc_devinfo devinfo; 3194 u_int bus_phase; 3195 int end_session; 3196 3197 ahc_fetch_devinfo(ahc, &devinfo); 3198 end_session = FALSE; 3199 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 3200 3201 reswitch: 3202 switch (ahc->msg_type) { 3203 case MSG_TYPE_INITIATOR_MSGOUT: 3204 { 3205 int lastbyte; 3206 int phasemis; 3207 int msgdone; 3208 3209 if (ahc->msgout_len == 0) 3210 panic("REQINIT interrupt with no active message"); 3211 3212 phasemis = bus_phase != P_MESGOUT; 3213 if (phasemis) { 3214 if (bus_phase == P_MESGIN) { 3215 /* 3216 * Change gears and see if 3217 * this messages is of interest to 3218 * us or should be passed back to 3219 * the sequencer. 3220 */ 3221 ahc_outb(ahc, CLRSINT1, CLRATNO); 3222 ahc->send_msg_perror = FALSE; 3223 ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN; 3224 ahc->msgin_index = 0; 3225 goto reswitch; 3226 } 3227 end_session = TRUE; 3228 break; 3229 } 3230 3231 if (ahc->send_msg_perror) { 3232 ahc_outb(ahc, CLRSINT1, CLRATNO); 3233 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 3234 ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR); 3235 break; 3236 } 3237 3238 msgdone = ahc->msgout_index == ahc->msgout_len; 3239 if (msgdone) { 3240 /* 3241 * The target has requested a retry. 3242 * Re-assert ATN, reset our message index to 3243 * 0, and try again. 3244 */ 3245 ahc->msgout_index = 0; 3246 ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, SCSISIGO) | ATNO); 3247 } 3248 3249 lastbyte = ahc->msgout_index == (ahc->msgout_len - 1); 3250 if (lastbyte) { 3251 /* Last byte is signified by dropping ATN */ 3252 ahc_outb(ahc, CLRSINT1, CLRATNO); 3253 } 3254 3255 /* 3256 * Clear our interrupt status and present 3257 * the next byte on the bus. 3258 */ 3259 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 3260 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 3261 break; 3262 } 3263 case MSG_TYPE_INITIATOR_MSGIN: 3264 { 3265 int phasemis; 3266 int message_done; 3267 3268 phasemis = bus_phase != P_MESGIN; 3269 3270 if (phasemis) { 3271 ahc->msgin_index = 0; 3272 if (bus_phase == P_MESGOUT 3273 && (ahc->send_msg_perror == TRUE 3274 || (ahc->msgout_len != 0 3275 && ahc->msgout_index == 0))) { 3276 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 3277 goto reswitch; 3278 } 3279 end_session = TRUE; 3280 break; 3281 } 3282 3283 /* Pull the byte in without acking it */ 3284 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL); 3285 3286 message_done = ahc_parse_msg(ahc, path, &devinfo); 3287 3288 if (message_done) { 3289 /* 3290 * Clear our incoming message buffer in case there 3291 * is another message following this one. 3292 */ 3293 ahc->msgin_index = 0; 3294 3295 /* 3296 * If this message illicited a response, 3297 * assert ATN so the target takes us to the 3298 * message out phase. 3299 */ 3300 if (ahc->msgout_len != 0) 3301 ahc_outb(ahc, SCSISIGO, 3302 ahc_inb(ahc, SCSISIGO) | ATNO); 3303 } else 3304 ahc->msgin_index++; 3305 3306 /* Ack the byte */ 3307 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 3308 ahc_inb(ahc, SCSIDATL); 3309 break; 3310 } 3311 case MSG_TYPE_TARGET_MSGIN: 3312 { 3313 int msgdone; 3314 int msgout_request; 3315 3316 if (ahc->msgout_len == 0) 3317 panic("Target MSGIN with no active message"); 3318 3319 /* 3320 * If we interrupted a mesgout session, the initiator 3321 * will not know this until our first REQ. So, we 3322 * only honor mesgout requests after we've sent our 3323 * first byte. 3324 */ 3325 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0 3326 && ahc->msgout_index > 0) 3327 msgout_request = TRUE; 3328 else 3329 msgout_request = FALSE; 3330 3331 if (msgout_request) { 3332 3333 /* 3334 * Change gears and see if 3335 * this messages is of interest to 3336 * us or should be passed back to 3337 * the sequencer. 3338 */ 3339 ahc->msg_type = MSG_TYPE_TARGET_MSGOUT; 3340 ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO); 3341 ahc->msgin_index = 0; 3342 /* Dummy read to REQ for first byte */ 3343 ahc_inb(ahc, SCSIDATL); 3344 ahc_outb(ahc, SXFRCTL0, 3345 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 3346 break; 3347 } 3348 3349 msgdone = ahc->msgout_index == ahc->msgout_len; 3350 if (msgdone) { 3351 ahc_outb(ahc, SXFRCTL0, 3352 ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 3353 end_session = TRUE; 3354 break; 3355 } 3356 3357 /* 3358 * Present the next byte on the bus. 3359 */ 3360 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); 3361 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 3362 break; 3363 } 3364 case MSG_TYPE_TARGET_MSGOUT: 3365 { 3366 int lastbyte; 3367 int msgdone; 3368 3369 /* 3370 * The initiator signals that this is 3371 * the last byte by dropping ATN. 3372 */ 3373 lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0; 3374 3375 /* 3376 * Read the latched byte, but turn off SPIOEN first 3377 * so that we don't inadvertantly cause a REQ for the 3378 * next byte. 3379 */ 3380 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 3381 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL); 3382 msgdone = ahc_parse_msg(ahc, path, &devinfo); 3383 if (msgdone == MSGLOOP_TERMINATED) { 3384 /* 3385 * The message is *really* done in that it caused 3386 * us to go to bus free. The sequencer has already 3387 * been reset at this point, so pull the ejection 3388 * handle. 3389 */ 3390 return; 3391 } 3392 3393 ahc->msgin_index++; 3394 3395 /* 3396 * XXX Read spec about initiator dropping ATN too soon 3397 * and use msgdone to detect it. 3398 */ 3399 if (msgdone == MSGLOOP_MSGCOMPLETE) { 3400 ahc->msgin_index = 0; 3401 3402 /* 3403 * If this message illicited a response, transition 3404 * to the Message in phase and send it. 3405 */ 3406 if (ahc->msgout_len != 0) { 3407 ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO); 3408 ahc_outb(ahc, SXFRCTL0, 3409 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 3410 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 3411 ahc->msgin_index = 0; 3412 break; 3413 } 3414 } 3415 3416 if (lastbyte) 3417 end_session = TRUE; 3418 else { 3419 /* Ask for the next byte. */ 3420 ahc_outb(ahc, SXFRCTL0, 3421 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 3422 } 3423 3424 break; 3425 } 3426 default: 3427 panic("Unknown REQINIT message type"); 3428 } 3429 3430 if (end_session) { 3431 ahc_clear_msg_state(ahc); 3432 ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP); 3433 } else 3434 ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP); 3435 } 3436 3437 /* 3438 * See if we sent a particular extended message to the target. 3439 * If "full" is true, the target saw the full message. 3440 * If "full" is false, the target saw at least the first 3441 * byte of the message. 3442 */ 3443 static int 3444 ahc_sent_msg(struct ahc_softc *ahc, u_int msgtype, int full) 3445 { 3446 int found; 3447 int index; 3448 3449 found = FALSE; 3450 index = 0; 3451 3452 while (index < ahc->msgout_len) { 3453 if (ahc->msgout_buf[index] == MSG_EXTENDED) { 3454 3455 /* Found a candidate */ 3456 if (ahc->msgout_buf[index+2] == msgtype) { 3457 u_int end_index; 3458 3459 end_index = index + 1 3460 + ahc->msgout_buf[index + 1]; 3461 if (full) { 3462 if (ahc->msgout_index > end_index) 3463 found = TRUE; 3464 } else if (ahc->msgout_index > index) 3465 found = TRUE; 3466 } 3467 break; 3468 } else if (ahc->msgout_buf[index] >= MSG_SIMPLE_Q_TAG 3469 && ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) { 3470 3471 /* Skip tag type and tag id or residue param*/ 3472 index += 2; 3473 } else { 3474 /* Single byte message */ 3475 index++; 3476 } 3477 } 3478 return (found); 3479 } 3480 3481 static int 3482 ahc_parse_msg(struct ahc_softc *ahc, struct cam_path *path, 3483 struct ahc_devinfo *devinfo) 3484 { 3485 struct ahc_initiator_tinfo *tinfo; 3486 struct tmode_tstate *tstate; 3487 int reject; 3488 int done; 3489 int response; 3490 u_int targ_scsirate; 3491 3492 done = MSGLOOP_IN_PROG; 3493 response = FALSE; 3494 reject = FALSE; 3495 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 3496 devinfo->target, &tstate); 3497 targ_scsirate = tinfo->scsirate; 3498 3499 /* 3500 * Parse as much of the message as is availible, 3501 * rejecting it if we don't support it. When 3502 * the entire message is availible and has been 3503 * handled, return MSGLOOP_MSGCOMPLETE, indicating 3504 * that we have parsed an entire message. 3505 * 3506 * In the case of extended messages, we accept the length 3507 * byte outright and perform more checking once we know the 3508 * extended message type. 3509 */ 3510 switch (ahc->msgin_buf[0]) { 3511 case MSG_MESSAGE_REJECT: 3512 response = ahc_handle_msg_reject(ahc, devinfo); 3513 /* FALLTHROUGH */ 3514 case MSG_NOOP: 3515 done = MSGLOOP_MSGCOMPLETE; 3516 break; 3517 case MSG_IGN_WIDE_RESIDUE: 3518 { 3519 /* Wait for the whole message */ 3520 if (ahc->msgin_index >= 1) { 3521 if (ahc->msgin_buf[1] != 1 3522 || tinfo->current.width == MSG_EXT_WDTR_BUS_8_BIT) { 3523 reject = TRUE; 3524 done = MSGLOOP_MSGCOMPLETE; 3525 } else 3526 ahc_handle_ign_wide_residue(ahc, devinfo); 3527 } 3528 break; 3529 } 3530 case MSG_EXTENDED: 3531 { 3532 /* Wait for enough of the message to begin validation */ 3533 if (ahc->msgin_index < 2) 3534 break; 3535 switch (ahc->msgin_buf[2]) { 3536 case MSG_EXT_SDTR: 3537 { 3538 struct ahc_syncrate *syncrate; 3539 u_int period; 3540 u_int offset; 3541 u_int saved_offset; 3542 3543 if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) { 3544 reject = TRUE; 3545 break; 3546 } 3547 3548 /* 3549 * Wait until we have both args before validating 3550 * and acting on this message. 3551 * 3552 * Add one to MSG_EXT_SDTR_LEN to account for 3553 * the extended message preamble. 3554 */ 3555 if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1)) 3556 break; 3557 3558 period = ahc->msgin_buf[3]; 3559 saved_offset = offset = ahc->msgin_buf[4]; 3560 syncrate = ahc_devlimited_syncrate(ahc, &period); 3561 ahc_validate_offset(ahc, syncrate, &offset, 3562 targ_scsirate & WIDEXFER); 3563 ahc_set_syncrate(ahc, devinfo, path, 3564 syncrate, period, offset, 3565 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3566 /*paused*/TRUE); 3567 3568 /* 3569 * See if we initiated Sync Negotiation 3570 * and didn't have to fall down to async 3571 * transfers. 3572 */ 3573 if (ahc_sent_msg(ahc, MSG_EXT_SDTR, /*full*/TRUE)) { 3574 /* We started it */ 3575 if (saved_offset != offset) { 3576 /* Went too low - force async */ 3577 reject = TRUE; 3578 } 3579 } else { 3580 /* 3581 * Send our own SDTR in reply 3582 */ 3583 if (bootverbose) 3584 printf("Sending SDTR!\n"); 3585 ahc->msgout_index = 0; 3586 ahc->msgout_len = 0; 3587 ahc_construct_sdtr(ahc, period, offset); 3588 ahc->msgout_index = 0; 3589 response = TRUE; 3590 } 3591 done = MSGLOOP_MSGCOMPLETE; 3592 break; 3593 } 3594 case MSG_EXT_WDTR: 3595 { 3596 u_int bus_width; 3597 u_int sending_reply; 3598 3599 sending_reply = FALSE; 3600 if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) { 3601 reject = TRUE; 3602 break; 3603 } 3604 3605 /* 3606 * Wait until we have our arg before validating 3607 * and acting on this message. 3608 * 3609 * Add one to MSG_EXT_WDTR_LEN to account for 3610 * the extended message preamble. 3611 */ 3612 if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1)) 3613 break; 3614 3615 bus_width = ahc->msgin_buf[3]; 3616 if (ahc_sent_msg(ahc, MSG_EXT_WDTR, /*full*/TRUE)) { 3617 /* 3618 * Don't send a WDTR back to the 3619 * target, since we asked first. 3620 */ 3621 switch (bus_width){ 3622 default: 3623 /* 3624 * How can we do anything greater 3625 * than 16bit transfers on a 16bit 3626 * bus? 3627 */ 3628 reject = TRUE; 3629 printf("%s: target %d requested %dBit " 3630 "transfers. Rejecting...\n", 3631 ahc_name(ahc), devinfo->target, 3632 8 * (0x01 << bus_width)); 3633 /* FALLTHROUGH */ 3634 case MSG_EXT_WDTR_BUS_8_BIT: 3635 bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3636 break; 3637 case MSG_EXT_WDTR_BUS_16_BIT: 3638 break; 3639 } 3640 } else { 3641 /* 3642 * Send our own WDTR in reply 3643 */ 3644 if (bootverbose) 3645 printf("Sending WDTR!\n"); 3646 switch (bus_width) { 3647 default: 3648 if (ahc->features & AHC_WIDE) { 3649 /* Respond Wide */ 3650 bus_width = 3651 MSG_EXT_WDTR_BUS_16_BIT; 3652 break; 3653 } 3654 /* FALLTHROUGH */ 3655 case MSG_EXT_WDTR_BUS_8_BIT: 3656 bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3657 break; 3658 } 3659 ahc->msgout_index = 0; 3660 ahc->msgout_len = 0; 3661 ahc_construct_wdtr(ahc, bus_width); 3662 ahc->msgout_index = 0; 3663 response = TRUE; 3664 sending_reply = TRUE; 3665 } 3666 ahc_set_width(ahc, devinfo, path, bus_width, 3667 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3668 /*paused*/TRUE); 3669 3670 /* After a wide message, we are async */ 3671 ahc_set_syncrate(ahc, devinfo, path, 3672 /*syncrate*/NULL, /*period*/0, 3673 /*offset*/0, AHC_TRANS_ACTIVE, 3674 /*paused*/TRUE); 3675 if (sending_reply == FALSE && reject == FALSE) { 3676 3677 if (tinfo->goal.period) { 3678 struct ahc_syncrate *rate; 3679 u_int period; 3680 u_int offset; 3681 3682 /* Start the sync negotiation */ 3683 period = tinfo->goal.period; 3684 rate = ahc_devlimited_syncrate(ahc, 3685 &period); 3686 offset = tinfo->goal.offset; 3687 ahc_validate_offset(ahc, rate, &offset, 3688 tinfo->current.width); 3689 ahc->msgout_index = 0; 3690 ahc->msgout_len = 0; 3691 ahc_construct_sdtr(ahc, period, offset); 3692 ahc->msgout_index = 0; 3693 response = TRUE; 3694 } 3695 } 3696 done = MSGLOOP_MSGCOMPLETE; 3697 break; 3698 } 3699 default: 3700 /* Unknown extended message. Reject it. */ 3701 reject = TRUE; 3702 break; 3703 } 3704 break; 3705 } 3706 case MSG_BUS_DEV_RESET: 3707 ahc_handle_devreset(ahc, devinfo, 3708 CAM_BDR_SENT, AC_SENT_BDR, 3709 "Bus Device Reset Received", 3710 /*verbose_level*/0); 3711 restart_sequencer(ahc); 3712 done = MSGLOOP_TERMINATED; 3713 break; 3714 case MSG_ABORT_TAG: 3715 case MSG_ABORT: 3716 case MSG_CLEAR_QUEUE: 3717 /* Target mode messages */ 3718 if (devinfo->role != ROLE_TARGET) { 3719 reject = TRUE; 3720 break; 3721 } 3722 ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 3723 devinfo->lun, 3724 ahc->msgin_buf[0] == MSG_ABORT_TAG 3725 ? SCB_LIST_NULL 3726 : ahc_inb(ahc, INITIATOR_TAG), 3727 ROLE_TARGET, CAM_REQ_ABORTED); 3728 3729 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 3730 if (tstate != NULL) { 3731 struct tmode_lstate* lstate; 3732 3733 lstate = tstate->enabled_luns[devinfo->lun]; 3734 if (lstate != NULL) { 3735 ahc_queue_lstate_event(ahc, lstate, 3736 devinfo->our_scsiid, 3737 ahc->msgin_buf[0], 3738 /*arg*/0); 3739 ahc_send_lstate_events(ahc, lstate); 3740 } 3741 } 3742 done = MSGLOOP_MSGCOMPLETE; 3743 break; 3744 case MSG_TERM_IO_PROC: 3745 default: 3746 reject = TRUE; 3747 break; 3748 } 3749 3750 if (reject) { 3751 /* 3752 * Setup to reject the message. 3753 */ 3754 ahc->msgout_index = 0; 3755 ahc->msgout_len = 1; 3756 ahc->msgout_buf[0] = MSG_MESSAGE_REJECT; 3757 done = MSGLOOP_MSGCOMPLETE; 3758 response = TRUE; 3759 } 3760 3761 if (done != MSGLOOP_IN_PROG && !response) 3762 /* Clear the outgoing message buffer */ 3763 ahc->msgout_len = 0; 3764 3765 return (done); 3766 } 3767 3768 static void 3769 ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 3770 { 3771 u_int scb_index; 3772 struct scb *scb; 3773 3774 scb_index = ahc_inb(ahc, SCB_TAG); 3775 scb = &ahc->scb_data->scbarray[scb_index]; 3776 if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0 3777 || (scb->ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_IN) { 3778 /* 3779 * Ignore the message if we haven't 3780 * seen an appropriate data phase yet. 3781 */ 3782 } else { 3783 /* 3784 * If the residual occurred on the last 3785 * transfer and the transfer request was 3786 * expected to end on an odd count, do 3787 * nothing. Otherwise, subtract a byte 3788 * and update the residual count accordingly. 3789 */ 3790 u_int resid_sgcnt; 3791 3792 resid_sgcnt = ahc_inb(ahc, SCB_RESID_SGCNT); 3793 if (resid_sgcnt == 0 3794 && ahc_inb(ahc, DATA_COUNT_ODD) == 1) { 3795 /* 3796 * If the residual occurred on the last 3797 * transfer and the transfer request was 3798 * expected to end on an odd count, do 3799 * nothing. 3800 */ 3801 } else { 3802 u_int data_cnt; 3803 u_int data_addr; 3804 u_int sg_index; 3805 3806 data_cnt = (ahc_inb(ahc, SCB_RESID_DCNT + 2) << 16) 3807 | (ahc_inb(ahc, SCB_RESID_DCNT + 1) << 8) 3808 | (ahc_inb(ahc, SCB_RESID_DCNT)); 3809 3810 data_addr = (ahc_inb(ahc, SHADDR + 3) << 24) 3811 | (ahc_inb(ahc, SHADDR + 2) << 16) 3812 | (ahc_inb(ahc, SHADDR + 1) << 8) 3813 | (ahc_inb(ahc, SHADDR)); 3814 3815 data_cnt += 1; 3816 data_addr -= 1; 3817 3818 sg_index = scb->sg_count - resid_sgcnt; 3819 3820 if (sg_index != 0 3821 && (scb->sg_list[sg_index].len < data_cnt)) { 3822 u_int sg_addr; 3823 3824 sg_index--; 3825 data_cnt = 1; 3826 data_addr = scb->sg_list[sg_index].addr 3827 + scb->sg_list[sg_index].len - 1; 3828 3829 /* 3830 * The physical address base points to the 3831 * second entry as it is always used for 3832 * calculating the "next S/G pointer". 3833 */ 3834 sg_addr = scb->sg_list_phys 3835 + (sg_index* sizeof(*scb->sg_list)); 3836 ahc_outb(ahc, SG_NEXT + 3, sg_addr >> 24); 3837 ahc_outb(ahc, SG_NEXT + 2, sg_addr >> 16); 3838 ahc_outb(ahc, SG_NEXT + 1, sg_addr >> 8); 3839 ahc_outb(ahc, SG_NEXT, sg_addr); 3840 } 3841 3842 ahc_outb(ahc, SCB_RESID_DCNT + 2, data_cnt >> 16); 3843 ahc_outb(ahc, SCB_RESID_DCNT + 1, data_cnt >> 8); 3844 ahc_outb(ahc, SCB_RESID_DCNT, data_cnt); 3845 3846 ahc_outb(ahc, SHADDR + 3, data_addr >> 24); 3847 ahc_outb(ahc, SHADDR + 2, data_addr >> 16); 3848 ahc_outb(ahc, SHADDR + 1, data_addr >> 8); 3849 ahc_outb(ahc, SHADDR, data_addr); 3850 } 3851 } 3852 } 3853 3854 static void 3855 ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 3856 cam_status status, ac_code acode, char *message, 3857 int verbose_level) 3858 { 3859 struct cam_path *path; 3860 int found; 3861 int error; 3862 struct tmode_tstate* tstate; 3863 u_int lun; 3864 3865 error = ahc_create_path(ahc, devinfo, &path); 3866 3867 found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 3868 CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role, 3869 status); 3870 3871 /* 3872 * Send an immediate notify ccb to all target more peripheral 3873 * drivers affected by this action. 3874 */ 3875 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 3876 if (tstate != NULL) { 3877 for (lun = 0; lun <= 7; lun++) { 3878 struct tmode_lstate* lstate; 3879 3880 lstate = tstate->enabled_luns[lun]; 3881 if (lstate == NULL) 3882 continue; 3883 3884 ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid, 3885 MSG_BUS_DEV_RESET, /*arg*/0); 3886 ahc_send_lstate_events(ahc, lstate); 3887 } 3888 } 3889 3890 /* 3891 * Go back to async/narrow transfers and renegotiate. 3892 * ahc_set_width and ahc_set_syncrate can cope with NULL 3893 * paths. 3894 */ 3895 ahc_set_width(ahc, devinfo, path, MSG_EXT_WDTR_BUS_8_BIT, 3896 AHC_TRANS_CUR, /*paused*/TRUE); 3897 ahc_set_syncrate(ahc, devinfo, path, /*syncrate*/NULL, 3898 /*period*/0, /*offset*/0, AHC_TRANS_CUR, 3899 /*paused*/TRUE); 3900 3901 if (error == CAM_REQ_CMP && acode != 0) 3902 xpt_async(AC_SENT_BDR, path, NULL); 3903 3904 if (error == CAM_REQ_CMP) 3905 xpt_free_path(path); 3906 3907 if (message != NULL 3908 && (verbose_level <= bootverbose)) 3909 printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc), 3910 message, devinfo->channel, devinfo->target, found); 3911 } 3912 3913 /* 3914 * We have an scb which has been processed by the 3915 * adaptor, now we look to see how the operation 3916 * went. 3917 */ 3918 static void 3919 ahc_done(struct ahc_softc *ahc, struct scb *scb) 3920 { 3921 union ccb *ccb; 3922 3923 CAM_DEBUG(scb->ccb->ccb_h.path, CAM_DEBUG_TRACE, 3924 ("ahc_done - scb %d\n", scb->hscb->tag)); 3925 3926 ccb = scb->ccb; 3927 LIST_REMOVE(&ccb->ccb_h, sim_links.le); 3928 3929 untimeout(ahc_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch); 3930 3931 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 3932 bus_dmasync_op_t op; 3933 3934 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 3935 op = BUS_DMASYNC_POSTREAD; 3936 else 3937 op = BUS_DMASYNC_POSTWRITE; 3938 bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op); 3939 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap); 3940 } 3941 3942 /* 3943 * Unbusy this target/channel/lun. 3944 * XXX if we are holding two commands per lun, 3945 * send the next command. 3946 */ 3947 ahc_index_busy_tcl(ahc, scb->hscb->tcl, /*unbusy*/TRUE); 3948 3949 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 3950 if (ahc_ccb_status(ccb) == CAM_REQ_INPROG) 3951 ccb->ccb_h.status |= CAM_REQ_CMP; 3952 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3953 ahcfreescb(ahc, scb); 3954 xpt_done(ccb); 3955 return; 3956 } 3957 3958 /* 3959 * If the recovery SCB completes, we have to be 3960 * out of our timeout. 3961 */ 3962 if ((scb->flags & SCB_RECOVERY_SCB) != 0) { 3963 3964 struct ccb_hdr *ccbh; 3965 3966 /* 3967 * We were able to complete the command successfully, 3968 * so reinstate the timeouts for all other pending 3969 * commands. 3970 */ 3971 ccbh = ahc->pending_ccbs.lh_first; 3972 while (ccbh != NULL) { 3973 struct scb *pending_scb; 3974 3975 pending_scb = (struct scb *)ccbh->ccb_scb_ptr; 3976 ccbh->timeout_ch = 3977 timeout(ahc_timeout, pending_scb, 3978 (ccbh->timeout * hz)/1000); 3979 ccbh = LIST_NEXT(ccbh, sim_links.le); 3980 } 3981 3982 /* 3983 * Ensure that we didn't put a second instance of this 3984 * SCB into the QINFIFO. 3985 */ 3986 ahc_search_qinfifo(ahc, SCB_TARGET(scb), SCB_CHANNEL(scb), 3987 SCB_LUN(scb), scb->hscb->tag, 3988 ROLE_INITIATOR, /*status*/0, 3989 SEARCH_REMOVE); 3990 if (ahc_ccb_status(ccb) == CAM_BDR_SENT 3991 || ahc_ccb_status(ccb) == CAM_REQ_ABORTED) 3992 ahcsetccbstatus(ccb, CAM_CMD_TIMEOUT); 3993 xpt_print_path(ccb->ccb_h.path); 3994 printf("no longer in timeout, status = %x\n", 3995 ccb->ccb_h.status); 3996 } 3997 3998 /* Don't clobber any existing error state */ 3999 if (ahc_ccb_status(ccb) == CAM_REQ_INPROG) { 4000 ccb->ccb_h.status |= CAM_REQ_CMP; 4001 } else if ((scb->flags & SCB_SENSE) != 0) { 4002 /* 4003 * We performed autosense retrieval. 4004 * 4005 * bzero the sense data before having 4006 * the drive fill it. The SCSI spec mandates 4007 * that any untransfered data should be 4008 * assumed to be zero. Complete the 'bounce' 4009 * of sense information through buffers accessible 4010 * via bus-space by copying it into the clients 4011 * csio. 4012 */ 4013 bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data)); 4014 bcopy(&ahc->scb_data->sense[scb->hscb->tag], 4015 &ccb->csio.sense_data, scb->sg_list->len); 4016 scb->ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 4017 } 4018 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4019 ahcfreescb(ahc, scb); 4020 xpt_done(ccb); 4021 } 4022 4023 /* 4024 * Determine the number of SCBs available on the controller 4025 */ 4026 int 4027 ahc_probe_scbs(struct ahc_softc *ahc) { 4028 int i; 4029 4030 for (i = 0; i < AHC_SCB_MAX; i++) { 4031 ahc_outb(ahc, SCBPTR, i); 4032 ahc_outb(ahc, SCB_CONTROL, i); 4033 if (ahc_inb(ahc, SCB_CONTROL) != i) 4034 break; 4035 ahc_outb(ahc, SCBPTR, 0); 4036 if (ahc_inb(ahc, SCB_CONTROL) != 0) 4037 break; 4038 } 4039 return (i); 4040 } 4041 4042 /* 4043 * Start the board, ready for normal operation 4044 */ 4045 int 4046 ahc_init(struct ahc_softc *ahc) 4047 { 4048 int max_targ = 15; 4049 int i; 4050 int term; 4051 u_int scsi_conf; 4052 u_int scsiseq_template; 4053 u_int ultraenb; 4054 u_int discenable; 4055 u_int tagenable; 4056 size_t driver_data_size; 4057 u_int32_t physaddr; 4058 4059 #ifdef AHC_PRINT_SRAM 4060 printf("Scratch Ram:"); 4061 for (i = 0x20; i < 0x5f; i++) { 4062 if (((i % 8) == 0) && (i != 0)) { 4063 printf ("\n "); 4064 } 4065 printf (" 0x%x", ahc_inb(ahc, i)); 4066 } 4067 if ((ahc->features & AHC_MORE_SRAM) != 0) { 4068 for (i = 0x70; i < 0x7f; i++) { 4069 if (((i % 8) == 0) && (i != 0)) { 4070 printf ("\n "); 4071 } 4072 printf (" 0x%x", ahc_inb(ahc, i)); 4073 } 4074 } 4075 printf ("\n"); 4076 #endif 4077 4078 /* 4079 * Assume we have a board at this stage and it has been reset. 4080 */ 4081 if ((ahc->flags & AHC_USEDEFAULTS) != 0) 4082 ahc->our_id = ahc->our_id_b = 7; 4083 4084 /* 4085 * Default to allowing initiator operations. 4086 */ 4087 ahc->flags |= AHC_INITIATORMODE; 4088 4089 /* 4090 * XXX Would be better to use a per device flag, but PCI and EISA 4091 * devices don't have them yet. 4092 */ 4093 if ((AHC_TMODE_ENABLE & (0x01 << ahc->unit)) != 0) { 4094 ahc->flags |= AHC_TARGETMODE; 4095 /* 4096 * Although we have space for both the initiator and 4097 * target roles on ULTRA2 chips, we currently disable 4098 * the initiator role to allow multi-scsi-id target mode 4099 * configurations. We can only respond on the same SCSI 4100 * ID as our initiator role if we allow initiator operation. 4101 * At some point, we should add a configuration knob to 4102 * allow both roles to be loaded. 4103 */ 4104 ahc->flags &= ~AHC_INITIATORMODE; 4105 } 4106 4107 /* DMA tag for mapping buffers into device visible space. */ 4108 if (bus_dma_tag_create(ahc->parent_dmat, /*alignment*/1, /*boundary*/0, 4109 /*lowaddr*/BUS_SPACE_MAXADDR, 4110 /*highaddr*/BUS_SPACE_MAXADDR, 4111 /*filter*/NULL, /*filterarg*/NULL, 4112 /*maxsize*/MAXBSIZE, /*nsegments*/AHC_NSEG, 4113 /*maxsegsz*/AHC_MAXTRANSFER_SIZE, 4114 /*flags*/BUS_DMA_ALLOCNOW, 4115 &ahc->buffer_dmat) != 0) { 4116 return (ENOMEM); 4117 } 4118 4119 ahc->init_level++; 4120 4121 /* 4122 * DMA tag for our command fifos and other data in system memory 4123 * the card's sequencer must be able to access. For initiator 4124 * roles, we need to allocate space for the qinfifo, qoutfifo, 4125 * and untagged_scb arrays each of which are composed of 256 4126 * 1 byte elements. When providing for the target mode role, 4127 * we additionally must provide space for the incoming target 4128 * command fifo. 4129 */ 4130 driver_data_size = 3 * 256 * sizeof(u_int8_t); 4131 if ((ahc->flags & AHC_TARGETMODE) != 0) 4132 driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd) 4133 + /*DMA WideOdd Bug Buffer*/1; 4134 if (bus_dma_tag_create(ahc->parent_dmat, /*alignment*/1, /*boundary*/0, 4135 /*lowaddr*/BUS_SPACE_MAXADDR, 4136 /*highaddr*/BUS_SPACE_MAXADDR, 4137 /*filter*/NULL, /*filterarg*/NULL, 4138 driver_data_size, 4139 /*nsegments*/1, 4140 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 4141 /*flags*/0, &ahc->shared_data_dmat) != 0) { 4142 return (ENOMEM); 4143 } 4144 4145 ahc->init_level++; 4146 4147 /* Allocation of driver data */ 4148 if (bus_dmamem_alloc(ahc->shared_data_dmat, (void **)&ahc->qoutfifo, 4149 BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) { 4150 return (ENOMEM); 4151 } 4152 4153 ahc->init_level++; 4154 4155 /* And permanently map it in */ 4156 bus_dmamap_load(ahc->shared_data_dmat, ahc->shared_data_dmamap, 4157 ahc->qoutfifo, driver_data_size, 4158 ahcdmamapcb, &ahc->shared_data_busaddr, /*flags*/0); 4159 4160 ahc->init_level++; 4161 4162 /* Allocate SCB data now that buffer_dmat is initialized */ 4163 if (ahc->scb_data->maxhscbs == 0) 4164 if (ahcinitscbdata(ahc) != 0) 4165 return (ENOMEM); 4166 4167 ahc->qinfifo = &ahc->qoutfifo[256]; 4168 ahc->untagged_scbs = &ahc->qinfifo[256]; 4169 /* There are no untagged SCBs active yet. */ 4170 for (i = 0; i < 256; i++) 4171 ahc->untagged_scbs[i] = SCB_LIST_NULL; 4172 4173 /* All of our queues are empty */ 4174 for (i = 0; i < 256; i++) 4175 ahc->qoutfifo[i] = SCB_LIST_NULL; 4176 4177 if ((ahc->features & AHC_MULTI_TID) != 0) { 4178 ahc_outb(ahc, TARGID, 0); 4179 ahc_outb(ahc, TARGID + 1, 0); 4180 } 4181 4182 if ((ahc->flags & AHC_TARGETMODE) != 0) { 4183 4184 ahc->targetcmds = (struct target_cmd *)&ahc->untagged_scbs[256]; 4185 ahc->dma_bug_buf = ahc->shared_data_busaddr 4186 + driver_data_size - 1; 4187 /* All target command blocks start out invalid. */ 4188 for (i = 0; i < AHC_TMODE_CMDS; i++) 4189 ahc->targetcmds[i].cmd_valid = 0; 4190 ahc->tqinfifonext = 1; 4191 ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1); 4192 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); 4193 } 4194 4195 /* 4196 * Allocate a tstate to house information for our 4197 * initiator presence on the bus as well as the user 4198 * data for any target mode initiator. 4199 */ 4200 if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) { 4201 printf("%s: unable to allocate tmode_tstate. " 4202 "Failing attach\n", ahc_name(ahc)); 4203 return (-1); 4204 } 4205 4206 if ((ahc->features & AHC_TWIN) != 0) { 4207 if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) { 4208 printf("%s: unable to allocate tmode_tstate. " 4209 "Failing attach\n", ahc_name(ahc)); 4210 return (-1); 4211 } 4212 printf("Twin Channel, A SCSI Id=%d, B SCSI Id=%d, primary %c, ", 4213 ahc->our_id, ahc->our_id_b, 4214 ahc->flags & AHC_CHANNEL_B_PRIMARY? 'B': 'A'); 4215 } else { 4216 if ((ahc->features & AHC_WIDE) != 0) { 4217 printf("Wide "); 4218 } else { 4219 printf("Single "); 4220 } 4221 printf("Channel %c, SCSI Id=%d, ", ahc->channel, ahc->our_id); 4222 } 4223 4224 ahc_outb(ahc, SEQ_FLAGS, 0); 4225 4226 if (ahc->scb_data->maxhscbs < AHC_SCB_MAX) { 4227 ahc->flags |= AHC_PAGESCBS; 4228 printf("%d/%d SCBs\n", ahc->scb_data->maxhscbs, AHC_SCB_MAX); 4229 } else { 4230 ahc->flags &= ~AHC_PAGESCBS; 4231 printf("%d SCBs\n", ahc->scb_data->maxhscbs); 4232 } 4233 4234 #ifdef AHC_DEBUG 4235 if (ahc_debug & AHC_SHOWMISC) { 4236 printf("%s: hardware scb %d bytes; kernel scb %d bytes; " 4237 "ahc_dma %d bytes\n", 4238 ahc_name(ahc), 4239 sizeof(struct hardware_scb), 4240 sizeof(struct scb), 4241 sizeof(struct ahc_dma_seg)); 4242 } 4243 #endif /* AHC_DEBUG */ 4244 4245 /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/ 4246 if (ahc->features & AHC_TWIN) { 4247 4248 /* 4249 * The device is gated to channel B after a chip reset, 4250 * so set those values first 4251 */ 4252 term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0; 4253 if ((ahc->features & AHC_ULTRA2) != 0) 4254 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id_b); 4255 else 4256 ahc_outb(ahc, SCSIID, ahc->our_id_b); 4257 scsi_conf = ahc_inb(ahc, SCSICONF + 1); 4258 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 4259 |term|ENSTIMER|ACTNEGEN); 4260 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 4261 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 4262 4263 if ((scsi_conf & RESET_SCSI) != 0 4264 && (ahc->flags & AHC_INITIATORMODE) != 0) 4265 ahc->flags |= AHC_RESET_BUS_B; 4266 4267 /* Select Channel A */ 4268 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 4269 } 4270 term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0; 4271 if ((ahc->features & AHC_ULTRA2) != 0) 4272 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); 4273 else 4274 ahc_outb(ahc, SCSIID, ahc->our_id); 4275 scsi_conf = ahc_inb(ahc, SCSICONF); 4276 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 4277 |term 4278 |ENSTIMER|ACTNEGEN); 4279 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 4280 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 4281 4282 if ((scsi_conf & RESET_SCSI) != 0 4283 && (ahc->flags & AHC_INITIATORMODE) != 0) 4284 ahc->flags |= AHC_RESET_BUS_A; 4285 4286 /* 4287 * Look at the information that board initialization or 4288 * the board bios has left us. 4289 */ 4290 ultraenb = 0; 4291 tagenable = ALL_TARGETS_MASK; 4292 4293 /* Grab the disconnection disable table and invert it for our needs */ 4294 if (ahc->flags & AHC_USEDEFAULTS) { 4295 printf("%s: Host Adapter Bios disabled. Using default SCSI " 4296 "device parameters\n", ahc_name(ahc)); 4297 ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B| 4298 AHC_TERM_ENB_A|AHC_TERM_ENB_B; 4299 discenable = ALL_TARGETS_MASK; 4300 if ((ahc->features & AHC_ULTRA) != 0) 4301 ultraenb = ALL_TARGETS_MASK; 4302 } else { 4303 discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8) 4304 | ahc_inb(ahc, DISC_DSB)); 4305 if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0) 4306 ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8) 4307 | ahc_inb(ahc, ULTRA_ENB); 4308 } 4309 4310 if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) 4311 max_targ = 7; 4312 4313 for (i = 0; i <= max_targ; i++) { 4314 struct ahc_initiator_tinfo *tinfo; 4315 struct tmode_tstate *tstate; 4316 u_int our_id; 4317 u_int target_id; 4318 char channel; 4319 4320 channel = 'A'; 4321 our_id = ahc->our_id; 4322 target_id = i; 4323 if (i > 7 && (ahc->features & AHC_TWIN) != 0) { 4324 channel = 'B'; 4325 our_id = ahc->our_id_b; 4326 target_id = i % 8; 4327 } 4328 tinfo = ahc_fetch_transinfo(ahc, channel, our_id, 4329 target_id, &tstate); 4330 /* Default to async narrow across the board */ 4331 bzero(tinfo, sizeof(*tinfo)); 4332 if (ahc->flags & AHC_USEDEFAULTS) { 4333 if ((ahc->features & AHC_WIDE) != 0) 4334 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 4335 4336 /* 4337 * These will be truncated when we determine the 4338 * connection type we have with the target. 4339 */ 4340 tinfo->user.period = ahc_syncrates->period; 4341 tinfo->user.offset = ~0; 4342 } else { 4343 u_int scsirate; 4344 u_int16_t mask; 4345 4346 /* Take the settings leftover in scratch RAM. */ 4347 scsirate = ahc_inb(ahc, TARG_SCSIRATE + i); 4348 mask = (0x01 << i); 4349 if ((ahc->features & AHC_ULTRA2) != 0) { 4350 u_int offset; 4351 u_int maxsync; 4352 4353 if ((scsirate & SOFS) == 0x0F) { 4354 /* 4355 * Haven't negotiated yet, 4356 * so the format is different. 4357 */ 4358 scsirate = (scsirate & SXFR) >> 4 4359 | (ultraenb & mask) 4360 ? 0x08 : 0x0 4361 | (scsirate & WIDEXFER); 4362 offset = MAX_OFFSET_ULTRA2; 4363 } else 4364 offset = ahc_inb(ahc, TARG_OFFSET + i); 4365 maxsync = AHC_SYNCRATE_ULTRA2; 4366 if ((ahc->features & AHC_DT) != 0) 4367 maxsync = AHC_SYNCRATE_DT; 4368 tinfo->user.period = 4369 ahc_find_period(ahc, scsirate, maxsync); 4370 if (offset == 0) 4371 tinfo->user.period = 0; 4372 else 4373 tinfo->user.offset = ~0; 4374 } else if ((scsirate & SOFS) != 0) { 4375 tinfo->user.period = 4376 ahc_find_period(ahc, scsirate, 4377 (ultraenb & mask) 4378 ? AHC_SYNCRATE_ULTRA 4379 : AHC_SYNCRATE_FAST); 4380 if (tinfo->user.period != 0) 4381 tinfo->user.offset = ~0; 4382 } 4383 if ((scsirate & WIDEXFER) != 0 4384 && (ahc->features & AHC_WIDE) != 0) 4385 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 4386 } 4387 tstate->ultraenb = ultraenb; 4388 tstate->discenable = discenable; 4389 tstate->tagenable = 0; /* Wait until the XPT says its okay */ 4390 } 4391 ahc->user_discenable = discenable; 4392 ahc->user_tagenable = tagenable; 4393 4394 /* 4395 * Tell the sequencer where it can find our arrays in memory. 4396 */ 4397 physaddr = ahc->scb_data->hscb_busaddr; 4398 ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF); 4399 ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF); 4400 ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF); 4401 ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF); 4402 4403 physaddr = ahc->shared_data_busaddr; 4404 ahc_outb(ahc, SCBID_ADDR, physaddr & 0xFF); 4405 ahc_outb(ahc, SCBID_ADDR + 1, (physaddr >> 8) & 0xFF); 4406 ahc_outb(ahc, SCBID_ADDR + 2, (physaddr >> 16) & 0xFF); 4407 ahc_outb(ahc, SCBID_ADDR + 3, (physaddr >> 24) & 0xFF); 4408 4409 /* Target mode incomding command fifo */ 4410 physaddr += 3 * 256 * sizeof(u_int8_t); 4411 ahc_outb(ahc, TMODE_CMDADDR, physaddr & 0xFF); 4412 ahc_outb(ahc, TMODE_CMDADDR + 1, (physaddr >> 8) & 0xFF); 4413 ahc_outb(ahc, TMODE_CMDADDR + 2, (physaddr >> 16) & 0xFF); 4414 ahc_outb(ahc, TMODE_CMDADDR + 3, (physaddr >> 24) & 0xFF); 4415 4416 /* 4417 * Initialize the group code to command length table. 4418 * This overrides the values in TARG_SCSIRATE, so only 4419 * setup the table after we have processed that information. 4420 */ 4421 ahc_outb(ahc, CMDSIZE_TABLE, 5); 4422 ahc_outb(ahc, CMDSIZE_TABLE + 1, 9); 4423 ahc_outb(ahc, CMDSIZE_TABLE + 2, 9); 4424 ahc_outb(ahc, CMDSIZE_TABLE + 3, 0); 4425 ahc_outb(ahc, CMDSIZE_TABLE + 4, 15); 4426 ahc_outb(ahc, CMDSIZE_TABLE + 5, 11); 4427 ahc_outb(ahc, CMDSIZE_TABLE + 6, 0); 4428 ahc_outb(ahc, CMDSIZE_TABLE + 7, 0); 4429 4430 /* Tell the sequencer of our initial queue positions */ 4431 ahc_outb(ahc, KERNEL_QINPOS, 0); 4432 ahc_outb(ahc, QINPOS, 0); 4433 ahc_outb(ahc, QOUTPOS, 0); 4434 4435 /* Don't have any special messages to send to targets */ 4436 ahc_outb(ahc, TARGET_MSG_REQUEST, 0); 4437 ahc_outb(ahc, TARGET_MSG_REQUEST + 1, 0); 4438 4439 /* 4440 * Use the built in queue management registers 4441 * if they are available. 4442 */ 4443 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 4444 ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256); 4445 ahc_outb(ahc, SDSCB_QOFF, 0); 4446 ahc_outb(ahc, SNSCB_QOFF, 0); 4447 ahc_outb(ahc, HNSCB_QOFF, 0); 4448 } 4449 4450 4451 /* We don't have any waiting selections */ 4452 ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL); 4453 4454 /* Our disconnection list is empty too */ 4455 ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL); 4456 4457 /* Message out buffer starts empty */ 4458 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 4459 4460 /* 4461 * Setup the allowed SCSI Sequences based on operational mode. 4462 * If we are a target, we'll enalbe select in operations once 4463 * we've had a lun enabled. 4464 */ 4465 scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP; 4466 if ((ahc->flags & AHC_INITIATORMODE) != 0) 4467 scsiseq_template |= ENRSELI; 4468 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template); 4469 4470 /* 4471 * Load the Sequencer program and Enable the adapter 4472 * in "fast" mode. 4473 */ 4474 if (bootverbose) 4475 printf("%s: Downloading Sequencer Program...", 4476 ahc_name(ahc)); 4477 4478 ahc_loadseq(ahc); 4479 4480 /* We have to wait until after any system dumps... */ 4481 EVENTHANDLER_REGISTER(shutdown_final, ahc_shutdown, 4482 ahc, SHUTDOWN_PRI_DEFAULT); 4483 4484 return (0); 4485 } 4486 4487 static cam_status 4488 ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb, 4489 struct tmode_tstate **tstate, struct tmode_lstate **lstate, 4490 int notfound_failure) 4491 { 4492 int our_id; 4493 4494 /* 4495 * If we are not configured for target mode, someone 4496 * is really confused to be sending this to us. 4497 */ 4498 if ((ahc->flags & AHC_TARGETMODE) == 0) 4499 return (CAM_REQ_INVALID); 4500 4501 /* Range check target and lun */ 4502 4503 /* 4504 * Handle the 'black hole' device that sucks up 4505 * requests to unattached luns on enabled targets. 4506 */ 4507 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD 4508 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 4509 *tstate = NULL; 4510 *lstate = ahc->black_hole; 4511 } else { 4512 u_int max_id; 4513 4514 if (cam_sim_bus(sim) == 0) 4515 our_id = ahc->our_id; 4516 else 4517 our_id = ahc->our_id_b; 4518 4519 max_id = (ahc->features & AHC_WIDE) ? 15 : 7; 4520 if (ccb->ccb_h.target_id > max_id) 4521 return (CAM_TID_INVALID); 4522 4523 if (ccb->ccb_h.target_lun > 7) 4524 return (CAM_LUN_INVALID); 4525 4526 if (ccb->ccb_h.target_id != our_id) { 4527 if ((ahc->features & AHC_MULTI_TID) != 0) { 4528 /* 4529 * Only allow additional targets if 4530 * the initiator role is disabled. 4531 * The hardware cannot handle a re-select-in 4532 * on the initiator id during a re-select-out 4533 * on a different target id. 4534 */ 4535 if ((ahc->flags & AHC_INITIATORMODE) != 0) 4536 return (CAM_TID_INVALID); 4537 } else { 4538 /* 4539 * Only allow our target id to change 4540 * if the initiator role is not configured 4541 * and there are no enabled luns which 4542 * are attached to the currently registered 4543 * scsi id. 4544 */ 4545 if ((ahc->flags & AHC_INITIATORMODE) != 0 4546 || ahc->enabled_luns > 0) 4547 return (CAM_TID_INVALID); 4548 } 4549 } 4550 4551 *tstate = ahc->enabled_targets[ccb->ccb_h.target_id]; 4552 *lstate = NULL; 4553 if (*tstate != NULL) 4554 *lstate = 4555 (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; 4556 } 4557 4558 if (notfound_failure != 0 && *lstate == NULL) 4559 return (CAM_PATH_INVALID); 4560 4561 return (CAM_REQ_CMP); 4562 } 4563 4564 static void 4565 ahc_action(struct cam_sim *sim, union ccb *ccb) 4566 { 4567 struct ahc_softc *ahc; 4568 struct tmode_lstate *lstate; 4569 u_int target_id; 4570 u_int our_id; 4571 int s; 4572 4573 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahc_action\n")); 4574 4575 ahc = (struct ahc_softc *)cam_sim_softc(sim); 4576 4577 target_id = ccb->ccb_h.target_id; 4578 our_id = SIM_SCSI_ID(ahc, sim); 4579 4580 switch (ccb->ccb_h.func_code) { 4581 /* Common cases first */ 4582 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ 4583 case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/ 4584 { 4585 struct tmode_tstate *tstate; 4586 cam_status status; 4587 4588 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, 4589 &lstate, TRUE); 4590 4591 if (status != CAM_REQ_CMP) { 4592 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 4593 /* Response from the black hole device */ 4594 tstate = NULL; 4595 lstate = ahc->black_hole; 4596 } else { 4597 ccb->ccb_h.status = status; 4598 xpt_done(ccb); 4599 break; 4600 } 4601 } 4602 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 4603 int s; 4604 4605 s = splcam(); 4606 SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h, 4607 sim_links.sle); 4608 ccb->ccb_h.status = CAM_REQ_INPROG; 4609 if ((ahc->flags & AHC_TQINFIFO_BLOCKED) != 0) 4610 ahc_run_tqinfifo(ahc, /*paused*/FALSE); 4611 splx(s); 4612 break; 4613 } 4614 4615 /* 4616 * The target_id represents the target we attempt to 4617 * select. In target mode, this is the initiator of 4618 * the original command. 4619 */ 4620 our_id = target_id; 4621 target_id = ccb->csio.init_id; 4622 /* FALLTHROUGH */ 4623 } 4624 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 4625 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 4626 { 4627 struct scb *scb; 4628 struct hardware_scb *hscb; 4629 struct ahc_initiator_tinfo *tinfo; 4630 struct tmode_tstate *tstate; 4631 u_int16_t mask; 4632 4633 /* 4634 * get an scb to use. 4635 */ 4636 if ((scb = ahcgetscb(ahc)) == NULL) { 4637 int s; 4638 4639 s = splcam(); 4640 ahc->flags |= AHC_RESOURCE_SHORTAGE; 4641 splx(s); 4642 xpt_freeze_simq(ahc->sim, /*count*/1); 4643 ahcsetccbstatus(ccb, CAM_REQUEUE_REQ); 4644 xpt_done(ccb); 4645 return; 4646 } 4647 4648 hscb = scb->hscb; 4649 4650 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE, 4651 ("start scb(%p)\n", scb)); 4652 scb->ccb = ccb; 4653 /* 4654 * So we can find the SCB when an abort is requested 4655 */ 4656 ccb->ccb_h.ccb_scb_ptr = scb; 4657 ccb->ccb_h.ccb_ahc_ptr = ahc; 4658 4659 /* 4660 * Put all the arguments for the xfer in the scb 4661 */ 4662 hscb->tcl = ((target_id << 4) & 0xF0) 4663 | (SIM_IS_SCSIBUS_B(ahc, sim) ? SELBUSB : 0) 4664 | (ccb->ccb_h.target_lun & 0x07); 4665 4666 mask = SCB_TARGET_MASK(scb); 4667 tinfo = ahc_fetch_transinfo(ahc, SIM_CHANNEL(ahc, sim), our_id, 4668 target_id, &tstate); 4669 4670 hscb->scsirate = tinfo->scsirate; 4671 hscb->scsioffset = tinfo->current.offset; 4672 if ((tstate->ultraenb & mask) != 0) 4673 hscb->control |= ULTRAENB; 4674 4675 if ((tstate->discenable & mask) != 0 4676 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0) 4677 hscb->control |= DISCENB; 4678 4679 if (ccb->ccb_h.func_code == XPT_RESET_DEV) { 4680 hscb->cmdpointer = NULL; 4681 scb->flags |= SCB_DEVICE_RESET; 4682 hscb->control |= MK_MESSAGE; 4683 ahc_execute_scb(scb, NULL, 0, 0); 4684 } else { 4685 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 4686 if (ahc->pending_device == lstate) { 4687 scb->flags |= SCB_TARGET_IMMEDIATE; 4688 ahc->pending_device = NULL; 4689 } 4690 hscb->control |= TARGET_SCB; 4691 hscb->cmdpointer = IDENTIFY_SEEN; 4692 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 4693 hscb->cmdpointer |= SPHASE_PENDING; 4694 hscb->status = ccb->csio.scsi_status; 4695 } 4696 4697 /* Overloaded with tag ID */ 4698 hscb->cmdlen = ccb->csio.tag_id; 4699 /* 4700 * Overloaded with the value to place 4701 * in SCSIID for reselection. 4702 */ 4703 hscb->cmdpointer |= 4704 (our_id|(hscb->tcl & 0xF0)) << 16; 4705 } 4706 if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) 4707 hscb->control |= ccb->csio.tag_action; 4708 4709 ahc_setup_data(ahc, &ccb->csio, scb); 4710 } 4711 break; 4712 } 4713 case XPT_NOTIFY_ACK: 4714 case XPT_IMMED_NOTIFY: 4715 { 4716 struct tmode_tstate *tstate; 4717 struct tmode_lstate *lstate; 4718 cam_status status; 4719 4720 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, 4721 &lstate, TRUE); 4722 4723 if (status != CAM_REQ_CMP) { 4724 ccb->ccb_h.status = status; 4725 xpt_done(ccb); 4726 break; 4727 } 4728 SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h, 4729 sim_links.sle); 4730 ccb->ccb_h.status = CAM_REQ_INPROG; 4731 ahc_send_lstate_events(ahc, lstate); 4732 break; 4733 } 4734 case XPT_EN_LUN: /* Enable LUN as a target */ 4735 ahc_handle_en_lun(ahc, sim, ccb); 4736 xpt_done(ccb); 4737 break; 4738 case XPT_ABORT: /* Abort the specified CCB */ 4739 { 4740 ahc_abort_ccb(ahc, sim, ccb); 4741 break; 4742 } 4743 case XPT_SET_TRAN_SETTINGS: 4744 { 4745 struct ahc_devinfo devinfo; 4746 struct ccb_trans_settings *cts; 4747 struct ahc_initiator_tinfo *tinfo; 4748 struct tmode_tstate *tstate; 4749 u_int16_t *discenable; 4750 u_int16_t *tagenable; 4751 u_int update_type; 4752 int s; 4753 4754 cts = &ccb->cts; 4755 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim), 4756 cts->ccb_h.target_id, 4757 cts->ccb_h.target_lun, 4758 SIM_CHANNEL(ahc, sim), 4759 ROLE_UNKNOWN); 4760 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 4761 devinfo.our_scsiid, 4762 devinfo.target, &tstate); 4763 update_type = 0; 4764 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { 4765 update_type |= AHC_TRANS_GOAL; 4766 discenable = &tstate->discenable; 4767 tagenable = &tstate->tagenable; 4768 } else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { 4769 update_type |= AHC_TRANS_USER; 4770 discenable = &ahc->user_discenable; 4771 tagenable = &ahc->user_tagenable; 4772 } else { 4773 ccb->ccb_h.status = CAM_REQ_INVALID; 4774 xpt_done(ccb); 4775 break; 4776 } 4777 4778 s = splcam(); 4779 4780 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 4781 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) 4782 *discenable |= devinfo.target_mask; 4783 else 4784 *discenable &= ~devinfo.target_mask; 4785 } 4786 4787 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 4788 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) 4789 *tagenable |= devinfo.target_mask; 4790 else 4791 *tagenable &= ~devinfo.target_mask; 4792 } 4793 4794 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { 4795 switch (cts->bus_width) { 4796 case MSG_EXT_WDTR_BUS_16_BIT: 4797 if ((ahc->features & AHC_WIDE) != 0) 4798 break; 4799 /* FALLTHROUGH to 8bit */ 4800 case MSG_EXT_WDTR_BUS_32_BIT: 4801 case MSG_EXT_WDTR_BUS_8_BIT: 4802 default: 4803 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 4804 break; 4805 } 4806 ahc_set_width(ahc, &devinfo, cts->ccb_h.path, 4807 cts->bus_width, update_type, 4808 /*paused*/FALSE); 4809 } 4810 4811 if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) 4812 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) { 4813 struct ahc_syncrate *syncrate; 4814 u_int maxsync; 4815 4816 if ((ahc->features & AHC_ULTRA2) != 0) 4817 maxsync = AHC_SYNCRATE_ULTRA2; 4818 else if ((ahc->features & AHC_ULTRA) != 0) 4819 maxsync = AHC_SYNCRATE_ULTRA; 4820 else 4821 maxsync = AHC_SYNCRATE_FAST; 4822 4823 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) { 4824 if (update_type & AHC_TRANS_USER) 4825 cts->sync_offset = tinfo->user.offset; 4826 else 4827 cts->sync_offset = tinfo->goal.offset; 4828 } 4829 4830 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) { 4831 if (update_type & AHC_TRANS_USER) 4832 cts->sync_period = tinfo->user.period; 4833 else 4834 cts->sync_period = tinfo->goal.period; 4835 } 4836 4837 syncrate = ahc_find_syncrate(ahc, &cts->sync_period, 4838 maxsync); 4839 ahc_validate_offset(ahc, syncrate, &cts->sync_offset, 4840 MSG_EXT_WDTR_BUS_8_BIT); 4841 4842 /* We use a period of 0 to represent async */ 4843 if (cts->sync_offset == 0) 4844 cts->sync_period = 0; 4845 4846 ahc_set_syncrate(ahc, &devinfo, cts->ccb_h.path, 4847 syncrate, cts->sync_period, 4848 cts->sync_offset, update_type, 4849 /*paused*/FALSE); 4850 } 4851 splx(s); 4852 ccb->ccb_h.status = CAM_REQ_CMP; 4853 xpt_done(ccb); 4854 break; 4855 } 4856 case XPT_GET_TRAN_SETTINGS: 4857 /* Get default/user set transfer settings for the target */ 4858 { 4859 struct ahc_devinfo devinfo; 4860 struct ccb_trans_settings *cts; 4861 struct ahc_initiator_tinfo *targ_info; 4862 struct tmode_tstate *tstate; 4863 struct ahc_transinfo *tinfo; 4864 int s; 4865 4866 cts = &ccb->cts; 4867 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim), 4868 cts->ccb_h.target_id, 4869 cts->ccb_h.target_lun, 4870 SIM_CHANNEL(ahc, sim), 4871 ROLE_UNKNOWN); 4872 targ_info = ahc_fetch_transinfo(ahc, devinfo.channel, 4873 devinfo.our_scsiid, 4874 devinfo.target, &tstate); 4875 4876 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) 4877 tinfo = &targ_info->current; 4878 else 4879 tinfo = &targ_info->user; 4880 4881 s = splcam(); 4882 4883 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 4884 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { 4885 if ((ahc->user_discenable & devinfo.target_mask) != 0) 4886 cts->flags |= CCB_TRANS_DISC_ENB; 4887 4888 if ((ahc->user_tagenable & devinfo.target_mask) != 0) 4889 cts->flags |= CCB_TRANS_TAG_ENB; 4890 } else { 4891 if ((tstate->discenable & devinfo.target_mask) != 0) 4892 cts->flags |= CCB_TRANS_DISC_ENB; 4893 4894 if ((tstate->tagenable & devinfo.target_mask) != 0) 4895 cts->flags |= CCB_TRANS_TAG_ENB; 4896 } 4897 4898 cts->sync_period = tinfo->period; 4899 cts->sync_offset = tinfo->offset; 4900 cts->bus_width = tinfo->width; 4901 4902 splx(s); 4903 4904 cts->valid = CCB_TRANS_SYNC_RATE_VALID 4905 | CCB_TRANS_SYNC_OFFSET_VALID 4906 | CCB_TRANS_BUS_WIDTH_VALID 4907 | CCB_TRANS_DISC_VALID 4908 | CCB_TRANS_TQ_VALID; 4909 4910 ccb->ccb_h.status = CAM_REQ_CMP; 4911 xpt_done(ccb); 4912 break; 4913 } 4914 case XPT_CALC_GEOMETRY: 4915 { 4916 struct ccb_calc_geometry *ccg; 4917 u_int32_t size_mb; 4918 u_int32_t secs_per_cylinder; 4919 int extended; 4920 4921 ccg = &ccb->ccg; 4922 size_mb = ccg->volume_size 4923 / ((1024L * 1024L) / ccg->block_size); 4924 extended = SIM_IS_SCSIBUS_B(ahc, sim) 4925 ? ahc->flags & AHC_EXTENDED_TRANS_B 4926 : ahc->flags & AHC_EXTENDED_TRANS_A; 4927 4928 if (size_mb > 1024 && extended) { 4929 ccg->heads = 255; 4930 ccg->secs_per_track = 63; 4931 } else { 4932 ccg->heads = 64; 4933 ccg->secs_per_track = 32; 4934 } 4935 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 4936 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 4937 ccb->ccb_h.status = CAM_REQ_CMP; 4938 xpt_done(ccb); 4939 break; 4940 } 4941 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 4942 { 4943 int found; 4944 4945 s = splcam(); 4946 found = ahc_reset_channel(ahc, SIM_CHANNEL(ahc, sim), 4947 /*initiate reset*/TRUE); 4948 splx(s); 4949 if (bootverbose) { 4950 xpt_print_path(SIM_PATH(ahc, sim)); 4951 printf("SCSI bus reset delivered. " 4952 "%d SCBs aborted.\n", found); 4953 } 4954 ccb->ccb_h.status = CAM_REQ_CMP; 4955 xpt_done(ccb); 4956 break; 4957 } 4958 case XPT_TERM_IO: /* Terminate the I/O process */ 4959 /* XXX Implement */ 4960 ccb->ccb_h.status = CAM_REQ_INVALID; 4961 xpt_done(ccb); 4962 break; 4963 case XPT_PATH_INQ: /* Path routing inquiry */ 4964 { 4965 struct ccb_pathinq *cpi = &ccb->cpi; 4966 4967 cpi->version_num = 1; /* XXX??? */ 4968 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; 4969 if ((ahc->features & AHC_WIDE) != 0) 4970 cpi->hba_inquiry |= PI_WIDE_16; 4971 if ((ahc->flags & AHC_TARGETMODE) != 0) { 4972 cpi->target_sprt = PIT_PROCESSOR 4973 | PIT_DISCONNECT 4974 | PIT_TERM_IO; 4975 } else { 4976 cpi->target_sprt = 0; 4977 } 4978 cpi->hba_misc = (ahc->flags & AHC_INITIATORMODE) 4979 ? 0 : PIM_NOINITIATOR; 4980 cpi->hba_eng_cnt = 0; 4981 cpi->max_target = (ahc->features & AHC_WIDE) ? 15 : 7; 4982 cpi->max_lun = 7; 4983 if (SIM_IS_SCSIBUS_B(ahc, sim)) { 4984 cpi->initiator_id = ahc->our_id_b; 4985 if ((ahc->flags & AHC_RESET_BUS_B) == 0) 4986 cpi->hba_misc |= PIM_NOBUSRESET; 4987 } else { 4988 cpi->initiator_id = ahc->our_id; 4989 if ((ahc->flags & AHC_RESET_BUS_A) == 0) 4990 cpi->hba_misc |= PIM_NOBUSRESET; 4991 } 4992 cpi->bus_id = cam_sim_bus(sim); 4993 cpi->base_transfer_speed = 3300; 4994 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 4995 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); 4996 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 4997 cpi->unit_number = cam_sim_unit(sim); 4998 cpi->ccb_h.status = CAM_REQ_CMP; 4999 xpt_done(ccb); 5000 break; 5001 } 5002 default: 5003 ccb->ccb_h.status = CAM_REQ_INVALID; 5004 xpt_done(ccb); 5005 break; 5006 } 5007 } 5008 5009 static void 5010 ahc_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) 5011 { 5012 struct ahc_softc *ahc; 5013 struct cam_sim *sim; 5014 5015 sim = (struct cam_sim *)callback_arg; 5016 ahc = (struct ahc_softc *)cam_sim_softc(sim); 5017 switch (code) { 5018 case AC_LOST_DEVICE: 5019 { 5020 struct ahc_devinfo devinfo; 5021 int s; 5022 5023 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim), 5024 xpt_path_target_id(path), 5025 xpt_path_lun_id(path), 5026 SIM_CHANNEL(ahc, sim), 5027 ROLE_UNKNOWN); 5028 5029 /* 5030 * Revert to async/narrow transfers 5031 * for the next device. 5032 */ 5033 s = splcam(); 5034 ahc_set_width(ahc, &devinfo, path, MSG_EXT_WDTR_BUS_8_BIT, 5035 AHC_TRANS_GOAL|AHC_TRANS_CUR, 5036 /*paused*/FALSE); 5037 ahc_set_syncrate(ahc, &devinfo, path, /*syncrate*/NULL, 5038 /*period*/0, /*offset*/0, 5039 AHC_TRANS_GOAL|AHC_TRANS_CUR, 5040 /*paused*/FALSE); 5041 splx(s); 5042 break; 5043 } 5044 default: 5045 break; 5046 } 5047 } 5048 5049 static void 5050 ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments, 5051 int error) 5052 { 5053 struct scb *scb; 5054 union ccb *ccb; 5055 struct ahc_softc *ahc; 5056 int s; 5057 5058 scb = (struct scb *)arg; 5059 ccb = scb->ccb; 5060 ahc = (struct ahc_softc *)ccb->ccb_h.ccb_ahc_ptr; 5061 5062 if (error != 0) { 5063 if (error == EFBIG) 5064 ahcsetccbstatus(scb->ccb, CAM_REQ_TOO_BIG); 5065 else 5066 ahcsetccbstatus(scb->ccb, CAM_REQ_CMP_ERR); 5067 if (nsegments != 0) 5068 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap); 5069 ahcfreescb(ahc, scb); 5070 xpt_done(ccb); 5071 return; 5072 } 5073 if (nsegments != 0) { 5074 struct ahc_dma_seg *sg; 5075 bus_dma_segment_t *end_seg; 5076 bus_dmasync_op_t op; 5077 5078 end_seg = dm_segs + nsegments; 5079 5080 /* Copy the first SG into the data pointer area */ 5081 scb->hscb->data = dm_segs->ds_addr; 5082 scb->hscb->datalen = dm_segs->ds_len; 5083 5084 /* Copy the segments into our SG list */ 5085 sg = scb->sg_list; 5086 while (dm_segs < end_seg) { 5087 sg->addr = dm_segs->ds_addr; 5088 sg->len = dm_segs->ds_len; 5089 sg++; 5090 dm_segs++; 5091 } 5092 5093 /* Note where to find the SG entries in bus space */ 5094 scb->hscb->SG_pointer = scb->sg_list_phys; 5095 5096 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 5097 op = BUS_DMASYNC_PREREAD; 5098 else 5099 op = BUS_DMASYNC_PREWRITE; 5100 5101 bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op); 5102 5103 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 5104 scb->hscb->cmdpointer |= DPHASE_PENDING; 5105 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 5106 scb->hscb->cmdpointer |= (TARGET_DATA_IN << 8); 5107 5108 /* 5109 * If the transfer is of an odd length and in the 5110 * "in" direction (scsi->HostBus), then it may 5111 * trigger a bug in the 'WideODD' feature of 5112 * non-Ultra2 chips. Force the total data-length 5113 * to be even by adding an extra, 1 byte, SG, 5114 * element. We do this even if we are not currently 5115 * negotiated wide as negotiation could occur before 5116 * this command is executed. 5117 */ 5118 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN 5119 && (ccb->csio.dxfer_len & 0x1) != 0 5120 && (ahc->features & AHC_TARG_DMABUG) != 0) { 5121 5122 nsegments++; 5123 if (nsegments > AHC_NSEG) { 5124 5125 ahcsetccbstatus(scb->ccb, 5126 CAM_REQ_TOO_BIG); 5127 bus_dmamap_unload(ahc->buffer_dmat, 5128 scb->dmamap); 5129 ahcfreescb(ahc, scb); 5130 xpt_done(ccb); 5131 return; 5132 } 5133 sg->addr = ahc->dma_bug_buf; 5134 sg->len = 1; 5135 } 5136 } 5137 } else { 5138 scb->hscb->SG_pointer = 0; 5139 scb->hscb->data = 0; 5140 scb->hscb->datalen = 0; 5141 } 5142 5143 scb->sg_count = scb->hscb->SG_count = nsegments; 5144 5145 s = splcam(); 5146 5147 /* 5148 * Last time we need to check if this SCB needs to 5149 * be aborted. 5150 */ 5151 if (ahc_ccb_status(ccb) != CAM_REQ_INPROG) { 5152 if (nsegments != 0) 5153 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap); 5154 ahcfreescb(ahc, scb); 5155 xpt_done(ccb); 5156 splx(s); 5157 return; 5158 } 5159 5160 /* Busy this tcl if we are untagged */ 5161 if ((scb->hscb->control & TAG_ENB) == 0) 5162 ahc_busy_tcl(ahc, scb); 5163 5164 LIST_INSERT_HEAD(&ahc->pending_ccbs, &ccb->ccb_h, 5165 sim_links.le); 5166 5167 scb->flags |= SCB_ACTIVE; 5168 ccb->ccb_h.status |= CAM_SIM_QUEUED; 5169 5170 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 5171 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) 5172 ccb->ccb_h.timeout = 5 * 1000; 5173 ccb->ccb_h.timeout_ch = 5174 timeout(ahc_timeout, (caddr_t)scb, 5175 (ccb->ccb_h.timeout * hz) / 1000); 5176 } 5177 5178 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) { 5179 #if 0 5180 printf("Continueing Immediate Command %d:%d\n", 5181 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 5182 #endif 5183 pause_sequencer(ahc); 5184 if ((ahc->flags & AHC_PAGESCBS) == 0) 5185 ahc_outb(ahc, SCBPTR, scb->hscb->tag); 5186 ahc_outb(ahc, SCB_TAG, scb->hscb->tag); 5187 ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP); 5188 unpause_sequencer(ahc); 5189 } else { 5190 5191 ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; 5192 5193 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5194 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 5195 } else { 5196 pause_sequencer(ahc); 5197 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 5198 unpause_sequencer(ahc); 5199 } 5200 } 5201 5202 splx(s); 5203 } 5204 5205 static void 5206 ahc_poll(struct cam_sim *sim) 5207 { 5208 ahc_intr(cam_sim_softc(sim)); 5209 } 5210 5211 static void 5212 ahc_setup_data(struct ahc_softc *ahc, struct ccb_scsiio *csio, 5213 struct scb *scb) 5214 { 5215 struct hardware_scb *hscb; 5216 struct ccb_hdr *ccb_h; 5217 5218 hscb = scb->hscb; 5219 ccb_h = &csio->ccb_h; 5220 5221 if (ccb_h->func_code == XPT_SCSI_IO) { 5222 hscb->cmdlen = csio->cdb_len; 5223 if ((ccb_h->flags & CAM_CDB_POINTER) != 0) { 5224 if ((ccb_h->flags & CAM_CDB_PHYS) == 0) 5225 if (hscb->cmdlen <= 16) { 5226 memcpy(hscb->cmdstore, 5227 csio->cdb_io.cdb_ptr, 5228 hscb->cmdlen); 5229 hscb->cmdpointer = 5230 hscb->cmdstore_busaddr; 5231 } else { 5232 ahcsetccbstatus(scb->ccb, 5233 CAM_REQ_INVALID); 5234 xpt_done(scb->ccb); 5235 ahcfreescb(ahc, scb); 5236 return; 5237 } 5238 else 5239 hscb->cmdpointer = 5240 ((intptr_t)csio->cdb_io.cdb_ptr) & 0xffffffff; 5241 } else { 5242 /* 5243 * CCB CDB Data Storage area is only 16 bytes 5244 * so no additional testing is required 5245 */ 5246 memcpy(hscb->cmdstore, csio->cdb_io.cdb_bytes, 5247 hscb->cmdlen); 5248 hscb->cmdpointer = hscb->cmdstore_busaddr; 5249 } 5250 } 5251 5252 /* Only use S/G if there is a transfer */ 5253 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 5254 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) { 5255 /* We've been given a pointer to a single buffer */ 5256 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) { 5257 int s; 5258 int error; 5259 5260 s = splsoftvm(); 5261 error = bus_dmamap_load(ahc->buffer_dmat, 5262 scb->dmamap, 5263 csio->data_ptr, 5264 csio->dxfer_len, 5265 ahc_execute_scb, 5266 scb, /*flags*/0); 5267 if (error == EINPROGRESS) { 5268 /* 5269 * So as to maintain ordering, 5270 * freeze the controller queue 5271 * until our mapping is 5272 * returned. 5273 */ 5274 xpt_freeze_simq(ahc->sim, 5275 /*count*/1); 5276 scb->ccb->ccb_h.status |= 5277 CAM_RELEASE_SIMQ; 5278 } 5279 splx(s); 5280 } else { 5281 struct bus_dma_segment seg; 5282 5283 /* Pointer to physical buffer */ 5284 if (csio->dxfer_len > AHC_MAXTRANSFER_SIZE) 5285 panic("ahc_setup_data - Transfer size " 5286 "larger than can device max"); 5287 5288 seg.ds_addr = (bus_addr_t)csio->data_ptr; 5289 seg.ds_len = csio->dxfer_len; 5290 ahc_execute_scb(scb, &seg, 1, 0); 5291 } 5292 } else { 5293 struct bus_dma_segment *segs; 5294 5295 if ((ccb_h->flags & CAM_DATA_PHYS) != 0) 5296 panic("ahc_setup_data - Physical segment " 5297 "pointers unsupported"); 5298 5299 if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) 5300 panic("ahc_setup_data - Virtual segment " 5301 "addresses unsupported"); 5302 5303 /* Just use the segments provided */ 5304 segs = (struct bus_dma_segment *)csio->data_ptr; 5305 ahc_execute_scb(scb, segs, csio->sglist_cnt, 0); 5306 } 5307 } else { 5308 ahc_execute_scb(scb, NULL, 0, 0); 5309 } 5310 } 5311 5312 static void 5313 ahc_freeze_devq(struct ahc_softc *ahc, struct cam_path *path) 5314 { 5315 int target; 5316 char channel; 5317 int lun; 5318 5319 target = xpt_path_target_id(path); 5320 lun = xpt_path_lun_id(path); 5321 channel = xpt_path_sim(path)->bus_id == 0 ? 'A' : 'B'; 5322 5323 ahc_search_qinfifo(ahc, target, channel, lun, 5324 /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, 5325 CAM_REQUEUE_REQ, SEARCH_COMPLETE); 5326 } 5327 5328 static void 5329 ahcallocscbs(struct ahc_softc *ahc) 5330 { 5331 struct scb_data *scb_data; 5332 struct scb *next_scb; 5333 struct sg_map_node *sg_map; 5334 bus_addr_t physaddr; 5335 struct ahc_dma_seg *segs; 5336 int newcount; 5337 int i; 5338 5339 scb_data = ahc->scb_data; 5340 if (scb_data->numscbs >= AHC_SCB_MAX) 5341 /* Can't allocate any more */ 5342 return; 5343 5344 next_scb = &scb_data->scbarray[scb_data->numscbs]; 5345 5346 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); 5347 5348 if (sg_map == NULL) 5349 return; 5350 5351 /* Allocate S/G space for the next batch of SCBS */ 5352 if (bus_dmamem_alloc(scb_data->sg_dmat, (void **)&sg_map->sg_vaddr, 5353 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { 5354 free(sg_map, M_DEVBUF); 5355 return; 5356 } 5357 5358 SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); 5359 5360 bus_dmamap_load(scb_data->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, 5361 PAGE_SIZE, ahcdmamapcb, &sg_map->sg_physaddr, 5362 /*flags*/0); 5363 5364 segs = sg_map->sg_vaddr; 5365 physaddr = sg_map->sg_physaddr; 5366 5367 newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg))); 5368 for (i = 0; scb_data->numscbs < AHC_SCB_MAX && i < newcount; i++) { 5369 int error; 5370 5371 next_scb->sg_list = segs; 5372 /* 5373 * The sequencer always starts with the second entry. 5374 * The first entry is embedded in the scb. 5375 */ 5376 next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg); 5377 next_scb->flags = SCB_FREE; 5378 error = bus_dmamap_create(ahc->buffer_dmat, /*flags*/0, 5379 &next_scb->dmamap); 5380 if (error != 0) 5381 break; 5382 next_scb->hscb = &scb_data->hscbs[scb_data->numscbs]; 5383 next_scb->hscb->tag = ahc->scb_data->numscbs; 5384 next_scb->hscb->cmdstore_busaddr = 5385 ahc_hscb_busaddr(ahc, next_scb->hscb->tag) 5386 + offsetof(struct hardware_scb, cmdstore); 5387 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, next_scb, links); 5388 segs += AHC_NSEG; 5389 physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg)); 5390 next_scb++; 5391 ahc->scb_data->numscbs++; 5392 } 5393 } 5394 5395 #ifdef AHC_DUMP_SEQ 5396 static void 5397 ahc_dumpseq(struct ahc_softc* ahc) 5398 { 5399 int i; 5400 int max_prog; 5401 5402 if ((ahc->chip & AHC_BUS_MASK) < AHC_PCI) 5403 max_prog = 448; 5404 else if ((ahc->features & AHC_ULTRA2) != 0) 5405 max_prog = 768; 5406 else 5407 max_prog = 512; 5408 5409 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 5410 ahc_outb(ahc, SEQADDR0, 0); 5411 ahc_outb(ahc, SEQADDR1, 0); 5412 for (i = 0; i < max_prog; i++) { 5413 u_int8_t ins_bytes[4]; 5414 5415 ahc_insb(ahc, SEQRAM, ins_bytes, 4); 5416 printf("0x%08x\n", ins_bytes[0] << 24 5417 | ins_bytes[1] << 16 5418 | ins_bytes[2] << 8 5419 | ins_bytes[3]); 5420 } 5421 } 5422 #endif 5423 5424 static void 5425 ahc_loadseq(struct ahc_softc *ahc) 5426 { 5427 struct patch *cur_patch; 5428 int i; 5429 int downloaded; 5430 int skip_addr; 5431 u_int8_t download_consts[4]; 5432 5433 /* Setup downloadable constant table */ 5434 #if 0 5435 /* No downloaded constants are currently defined. */ 5436 download_consts[TMODE_NUMCMDS] = ahc->num_targetcmds; 5437 #endif 5438 5439 cur_patch = patches; 5440 downloaded = 0; 5441 skip_addr = 0; 5442 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 5443 ahc_outb(ahc, SEQADDR0, 0); 5444 ahc_outb(ahc, SEQADDR1, 0); 5445 5446 for (i = 0; i < sizeof(seqprog)/4; i++) { 5447 if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) { 5448 /* 5449 * Don't download this instruction as it 5450 * is in a patch that was removed. 5451 */ 5452 continue; 5453 } 5454 ahc_download_instr(ahc, i, download_consts); 5455 downloaded++; 5456 } 5457 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE); 5458 restart_sequencer(ahc); 5459 5460 if (bootverbose) 5461 printf(" %d instructions downloaded\n", downloaded); 5462 } 5463 5464 static int 5465 ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch, 5466 int start_instr, int *skip_addr) 5467 { 5468 struct patch *cur_patch; 5469 struct patch *last_patch; 5470 int num_patches; 5471 5472 num_patches = sizeof(patches)/sizeof(struct patch); 5473 last_patch = &patches[num_patches]; 5474 cur_patch = *start_patch; 5475 5476 while (cur_patch < last_patch && start_instr == cur_patch->begin) { 5477 5478 if (cur_patch->patch_func(ahc) == 0) { 5479 5480 /* Start rejecting code */ 5481 *skip_addr = start_instr + cur_patch->skip_instr; 5482 cur_patch += cur_patch->skip_patch; 5483 } else { 5484 /* Accepted this patch. Advance to the next 5485 * one and wait for our intruction pointer to 5486 * hit this point. 5487 */ 5488 cur_patch++; 5489 } 5490 } 5491 5492 *start_patch = cur_patch; 5493 if (start_instr < *skip_addr) 5494 /* Still skipping */ 5495 return (0); 5496 5497 return (1); 5498 } 5499 5500 static void 5501 ahc_download_instr(struct ahc_softc *ahc, int instrptr, u_int8_t *dconsts) 5502 { 5503 union ins_formats instr; 5504 struct ins_format1 *fmt1_ins; 5505 struct ins_format3 *fmt3_ins; 5506 u_int opcode; 5507 5508 /* Structure copy */ 5509 instr = *(union ins_formats*)&seqprog[instrptr * 4]; 5510 5511 fmt1_ins = &instr.format1; 5512 fmt3_ins = NULL; 5513 5514 /* Pull the opcode */ 5515 opcode = instr.format1.opcode; 5516 switch (opcode) { 5517 case AIC_OP_JMP: 5518 case AIC_OP_JC: 5519 case AIC_OP_JNC: 5520 case AIC_OP_CALL: 5521 case AIC_OP_JNE: 5522 case AIC_OP_JNZ: 5523 case AIC_OP_JE: 5524 case AIC_OP_JZ: 5525 { 5526 struct patch *cur_patch; 5527 int address_offset; 5528 u_int address; 5529 int skip_addr; 5530 int i; 5531 5532 fmt3_ins = &instr.format3; 5533 address_offset = 0; 5534 address = fmt3_ins->address; 5535 cur_patch = patches; 5536 skip_addr = 0; 5537 5538 for (i = 0; i < address;) { 5539 5540 ahc_check_patch(ahc, &cur_patch, i, &skip_addr); 5541 5542 if (skip_addr > i) { 5543 int end_addr; 5544 5545 end_addr = MIN(address, skip_addr); 5546 address_offset += end_addr - i; 5547 i = skip_addr; 5548 } else { 5549 i++; 5550 } 5551 } 5552 address -= address_offset; 5553 fmt3_ins->address = address; 5554 /* FALLTHROUGH */ 5555 } 5556 case AIC_OP_OR: 5557 case AIC_OP_AND: 5558 case AIC_OP_XOR: 5559 case AIC_OP_ADD: 5560 case AIC_OP_ADC: 5561 case AIC_OP_BMOV: 5562 if (fmt1_ins->parity != 0) { 5563 fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; 5564 } 5565 fmt1_ins->parity = 0; 5566 /* FALLTHROUGH */ 5567 case AIC_OP_ROL: 5568 if ((ahc->features & AHC_ULTRA2) != 0) { 5569 int i, count; 5570 5571 /* Calculate odd parity for the instruction */ 5572 for (i = 0, count = 0; i < 31; i++) { 5573 u_int32_t mask; 5574 5575 mask = 0x01 << i; 5576 if ((instr.integer & mask) != 0) 5577 count++; 5578 } 5579 if ((count & 0x01) == 0) 5580 instr.format1.parity = 1; 5581 } else { 5582 /* Compress the instruction for older sequencers */ 5583 if (fmt3_ins != NULL) { 5584 instr.integer = 5585 fmt3_ins->immediate 5586 | (fmt3_ins->source << 8) 5587 | (fmt3_ins->address << 16) 5588 | (fmt3_ins->opcode << 25); 5589 } else { 5590 instr.integer = 5591 fmt1_ins->immediate 5592 | (fmt1_ins->source << 8) 5593 | (fmt1_ins->destination << 16) 5594 | (fmt1_ins->ret << 24) 5595 | (fmt1_ins->opcode << 25); 5596 } 5597 } 5598 ahc_outsb(ahc, SEQRAM, instr.bytes, 4); 5599 break; 5600 default: 5601 panic("Unknown opcode encountered in seq program"); 5602 break; 5603 } 5604 } 5605 5606 static void 5607 ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) { 5608 5609 if ((scb->flags & SCB_RECOVERY_SCB) == 0) { 5610 struct ccb_hdr *ccbh; 5611 5612 scb->flags |= SCB_RECOVERY_SCB; 5613 5614 /* 5615 * Take all queued, but not sent SCBs out of the equation. 5616 * Also ensure that no new CCBs are queued to us while we 5617 * try to fix this problem. 5618 */ 5619 if ((scb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { 5620 xpt_freeze_simq(ahc->sim, /*count*/1); 5621 scb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5622 } 5623 5624 /* 5625 * Go through all of our pending SCBs and remove 5626 * any scheduled timeouts for them. We will reschedule 5627 * them after we've successfully fixed this problem. 5628 */ 5629 ccbh = ahc->pending_ccbs.lh_first; 5630 while (ccbh != NULL) { 5631 struct scb *pending_scb; 5632 5633 pending_scb = (struct scb *)ccbh->ccb_scb_ptr; 5634 untimeout(ahc_timeout, pending_scb, ccbh->timeout_ch); 5635 ccbh = ccbh->sim_links.le.le_next; 5636 } 5637 } 5638 } 5639 5640 static void 5641 ahc_timeout(void *arg) 5642 { 5643 struct scb *scb; 5644 struct ahc_softc *ahc; 5645 int s, found; 5646 u_int last_phase; 5647 int target; 5648 int lun; 5649 int i; 5650 char channel; 5651 5652 scb = (struct scb *)arg; 5653 ahc = (struct ahc_softc *)scb->ccb->ccb_h.ccb_ahc_ptr; 5654 5655 s = splcam(); 5656 5657 /* 5658 * Ensure that the card doesn't do anything 5659 * behind our back. Also make sure that we 5660 * didn't "just" miss an interrupt that would 5661 * affect this timeout. 5662 */ 5663 do { 5664 ahc_intr(ahc); 5665 pause_sequencer(ahc); 5666 } while (ahc_inb(ahc, INTSTAT) & INT_PEND); 5667 5668 if ((scb->flags & SCB_ACTIVE) == 0) { 5669 /* Previous timeout took care of me already */ 5670 printf("Timedout SCB handled by another timeout\n"); 5671 unpause_sequencer(ahc); 5672 splx(s); 5673 return; 5674 } 5675 5676 target = SCB_TARGET(scb); 5677 channel = SCB_CHANNEL(scb); 5678 lun = SCB_LUN(scb); 5679 5680 xpt_print_path(scb->ccb->ccb_h.path); 5681 printf("SCB 0x%x - timed out ", scb->hscb->tag); 5682 /* 5683 * Take a snapshot of the bus state and print out 5684 * some information so we can track down driver bugs. 5685 */ 5686 last_phase = ahc_inb(ahc, LASTPHASE); 5687 5688 for (i = 0; i < num_phases; i++) { 5689 if (last_phase == phase_table[i].phase) 5690 break; 5691 } 5692 printf("%s", phase_table[i].phasemsg); 5693 5694 printf(", SEQADDR == 0x%x\n", 5695 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); 5696 5697 #if 0 5698 printf("SSTAT1 == 0x%x\n", ahc_inb(ahc, SSTAT1)); 5699 printf("SSTAT3 == 0x%x\n", ahc_inb(ahc, SSTAT3)); 5700 printf("SCSIPHASE == 0x%x\n", ahc_inb(ahc, SCSIPHASE)); 5701 printf("SCSIRATE == 0x%x\n", ahc_inb(ahc, SCSIRATE)); 5702 printf("SCSIOFFSET == 0x%x\n", ahc_inb(ahc, SCSIOFFSET)); 5703 printf("SEQ_FLAGS == 0x%x\n", ahc_inb(ahc, SEQ_FLAGS)); 5704 printf("SCB_DATAPTR == 0x%x\n", ahc_inb(ahc, SCB_DATAPTR) 5705 | ahc_inb(ahc, SCB_DATAPTR + 1) << 8 5706 | ahc_inb(ahc, SCB_DATAPTR + 2) << 16 5707 | ahc_inb(ahc, SCB_DATAPTR + 3) << 24); 5708 printf("SCB_DATACNT == 0x%x\n", ahc_inb(ahc, SCB_DATACNT) 5709 | ahc_inb(ahc, SCB_DATACNT + 1) << 8 5710 | ahc_inb(ahc, SCB_DATACNT + 2) << 16); 5711 printf("SCB_SGCOUNT == 0x%x\n", ahc_inb(ahc, SCB_SGCOUNT)); 5712 printf("CCSCBCTL == 0x%x\n", ahc_inb(ahc, CCSCBCTL)); 5713 printf("CCSCBCNT == 0x%x\n", ahc_inb(ahc, CCSCBCNT)); 5714 printf("DFCNTRL == 0x%x\n", ahc_inb(ahc, DFCNTRL)); 5715 printf("DFSTATUS == 0x%x\n", ahc_inb(ahc, DFSTATUS)); 5716 printf("CCHCNT == 0x%x\n", ahc_inb(ahc, CCHCNT)); 5717 if (scb->sg_count > 0) { 5718 for (i = 0; i < scb->sg_count; i++) { 5719 printf("sg[%d] - Addr 0x%x : Length %d\n", 5720 i, 5721 scb->sg_list[i].addr, 5722 scb->sg_list[i].len); 5723 } 5724 } 5725 #endif 5726 if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) { 5727 /* 5728 * Been down this road before. 5729 * Do a full bus reset. 5730 */ 5731 bus_reset: 5732 ahcsetccbstatus(scb->ccb, CAM_CMD_TIMEOUT); 5733 found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE); 5734 printf("%s: Issued Channel %c Bus Reset. " 5735 "%d SCBs aborted\n", ahc_name(ahc), channel, found); 5736 } else { 5737 /* 5738 * If we are a target, transition to bus free and report 5739 * the timeout. 5740 * 5741 * The target/initiator that is holding up the bus may not 5742 * be the same as the one that triggered this timeout 5743 * (different commands have different timeout lengths). 5744 * If the bus is idle and we are actiing as the initiator 5745 * for this request, queue a BDR message to the timed out 5746 * target. Otherwise, if the timed out transaction is 5747 * active: 5748 * Initiator transaction: 5749 * Stuff the message buffer with a BDR message and assert 5750 * ATN in the hopes that the target will let go of the bus 5751 * and go to the mesgout phase. If this fails, we'll 5752 * get another timeout 2 seconds later which will attempt 5753 * a bus reset. 5754 * 5755 * Target transaction: 5756 * Transition to BUS FREE and report the error. 5757 * It's good to be the target! 5758 */ 5759 u_int active_scb_index; 5760 5761 active_scb_index = ahc_inb(ahc, SCB_TAG); 5762 5763 if (last_phase != P_BUSFREE 5764 && (active_scb_index < ahc->scb_data->numscbs)) { 5765 struct scb *active_scb; 5766 5767 /* 5768 * If the active SCB is not from our device, 5769 * assume that another device is hogging the bus 5770 * and wait for it's timeout to expire before 5771 * taking additional action. 5772 */ 5773 active_scb = &ahc->scb_data->scbarray[active_scb_index]; 5774 if (active_scb->hscb->tcl != scb->hscb->tcl) { 5775 struct ccb_hdr *ccbh; 5776 u_int newtimeout; 5777 5778 xpt_print_path(scb->ccb->ccb_h.path); 5779 printf("Other SCB Timeout%s", 5780 (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0 5781 ? " again\n" : "\n"); 5782 scb->flags |= SCB_OTHERTCL_TIMEOUT; 5783 newtimeout = MAX(active_scb->ccb->ccb_h.timeout, 5784 scb->ccb->ccb_h.timeout); 5785 ccbh = &scb->ccb->ccb_h; 5786 scb->ccb->ccb_h.timeout_ch = 5787 timeout(ahc_timeout, scb, 5788 (newtimeout * hz) / 1000); 5789 splx(s); 5790 return; 5791 } 5792 5793 /* It's us */ 5794 if ((scb->hscb->control & TARGET_SCB) != 0) { 5795 5796 /* 5797 * Send back any queued up transactions 5798 * and properly record the error condition. 5799 */ 5800 ahc_freeze_devq(ahc, scb->ccb->ccb_h.path); 5801 ahcsetccbstatus(scb->ccb, CAM_CMD_TIMEOUT); 5802 ahc_freeze_ccb(scb->ccb); 5803 ahc_done(ahc, scb); 5804 5805 /* Will clear us from the bus */ 5806 restart_sequencer(ahc); 5807 return; 5808 } 5809 5810 ahc_set_recoveryscb(ahc, active_scb); 5811 ahc_outb(ahc, MSG_OUT, MSG_BUS_DEV_RESET); 5812 ahc_outb(ahc, SCSISIGO, last_phase|ATNO); 5813 xpt_print_path(active_scb->ccb->ccb_h.path); 5814 printf("BDR message in message buffer\n"); 5815 active_scb->flags |= SCB_DEVICE_RESET; 5816 active_scb->ccb->ccb_h.timeout_ch = 5817 timeout(ahc_timeout, (caddr_t)active_scb, 2 * hz); 5818 unpause_sequencer(ahc); 5819 } else { 5820 int disconnected; 5821 5822 /* XXX Shouldn't panic. Just punt instead */ 5823 if ((scb->hscb->control & TARGET_SCB) != 0) 5824 panic("Timed-out target SCB but bus idle"); 5825 5826 if (last_phase != P_BUSFREE 5827 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) { 5828 /* XXX What happened to the SCB? */ 5829 /* Hung target selection. Goto busfree */ 5830 printf("%s: Hung target selection\n", 5831 ahc_name(ahc)); 5832 restart_sequencer(ahc); 5833 return; 5834 } 5835 5836 if (ahc_search_qinfifo(ahc, target, channel, lun, 5837 scb->hscb->tag, ROLE_INITIATOR, 5838 /*status*/0, SEARCH_COUNT) > 0) { 5839 disconnected = FALSE; 5840 } else { 5841 disconnected = TRUE; 5842 } 5843 5844 if (disconnected) { 5845 u_int active_scb; 5846 5847 ahc_set_recoveryscb(ahc, scb); 5848 /* 5849 * Simply set the MK_MESSAGE control bit. 5850 */ 5851 scb->hscb->control |= MK_MESSAGE; 5852 scb->flags |= SCB_QUEUED_MSG 5853 | SCB_DEVICE_RESET; 5854 5855 /* 5856 * Mark the cached copy of this SCB in the 5857 * disconnected list too, so that a reconnect 5858 * at this point causes a BDR or abort. 5859 */ 5860 active_scb = ahc_inb(ahc, SCBPTR); 5861 if (ahc_search_disc_list(ahc, target, 5862 channel, lun, 5863 scb->hscb->tag, 5864 /*stop_on_first*/TRUE, 5865 /*remove*/FALSE, 5866 /*save_state*/FALSE)) { 5867 u_int scb_control; 5868 5869 scb_control = ahc_inb(ahc, SCB_CONTROL); 5870 scb_control |= MK_MESSAGE; 5871 ahc_outb(ahc, SCB_CONTROL, scb_control); 5872 } 5873 ahc_outb(ahc, SCBPTR, active_scb); 5874 ahc_index_busy_tcl(ahc, scb->hscb->tcl, 5875 /*unbusy*/TRUE); 5876 5877 /* 5878 * Actually re-queue this SCB in case we can 5879 * select the device before it reconnects. 5880 * Clear out any entries in the QINFIFO first 5881 * so we are the next SCB for this target 5882 * to run. 5883 */ 5884 ahc_search_qinfifo(ahc, SCB_TARGET(scb), 5885 channel, SCB_LUN(scb), 5886 SCB_LIST_NULL, 5887 ROLE_INITIATOR, 5888 CAM_REQUEUE_REQ, 5889 SEARCH_COMPLETE); 5890 xpt_print_path(scb->ccb->ccb_h.path); 5891 printf("Queuing a BDR SCB\n"); 5892 ahc->qinfifo[ahc->qinfifonext++] = 5893 scb->hscb->tag; 5894 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5895 ahc_outb(ahc, HNSCB_QOFF, 5896 ahc->qinfifonext); 5897 } else { 5898 ahc_outb(ahc, KERNEL_QINPOS, 5899 ahc->qinfifonext); 5900 } 5901 scb->ccb->ccb_h.timeout_ch = 5902 timeout(ahc_timeout, (caddr_t)scb, 2 * hz); 5903 unpause_sequencer(ahc); 5904 } else { 5905 /* Go "immediatly" to the bus reset */ 5906 /* This shouldn't happen */ 5907 ahc_set_recoveryscb(ahc, scb); 5908 xpt_print_path(scb->ccb->ccb_h.path); 5909 printf("SCB %d: Immediate reset. " 5910 "Flags = 0x%x\n", scb->hscb->tag, 5911 scb->flags); 5912 goto bus_reset; 5913 } 5914 } 5915 } 5916 splx(s); 5917 } 5918 5919 static int 5920 ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel, 5921 int lun, u_int tag, role_t role, u_int32_t status, 5922 ahc_search_action action) 5923 { 5924 struct scb *scbp; 5925 u_int8_t qinpos; 5926 u_int8_t qintail; 5927 int found; 5928 5929 qinpos = ahc_inb(ahc, QINPOS); 5930 qintail = ahc->qinfifonext; 5931 found = 0; 5932 5933 /* 5934 * Start with an empty queue. Entries that are not chosen 5935 * for removal will be re-added to the queue as we go. 5936 */ 5937 ahc->qinfifonext = qinpos; 5938 5939 while (qinpos != qintail) { 5940 scbp = &ahc->scb_data->scbarray[ahc->qinfifo[qinpos]]; 5941 if (ahc_match_scb(scbp, target, channel, lun, tag, role)) { 5942 /* 5943 * We found an scb that needs to be removed. 5944 */ 5945 switch (action) { 5946 case SEARCH_COMPLETE: 5947 if (ahc_ccb_status(scbp->ccb) == CAM_REQ_INPROG) 5948 ahcsetccbstatus(scbp->ccb, status); 5949 ahc_freeze_ccb(scbp->ccb); 5950 ahc_done(ahc, scbp); 5951 break; 5952 case SEARCH_COUNT: 5953 ahc->qinfifo[ahc->qinfifonext++] = 5954 scbp->hscb->tag; 5955 break; 5956 case SEARCH_REMOVE: 5957 break; 5958 } 5959 found++; 5960 } else { 5961 ahc->qinfifo[ahc->qinfifonext++] = scbp->hscb->tag; 5962 } 5963 qinpos++; 5964 } 5965 5966 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5967 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 5968 } else { 5969 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 5970 } 5971 5972 return (found); 5973 } 5974 5975 5976 static void 5977 ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) 5978 { 5979 union ccb *abort_ccb; 5980 5981 abort_ccb = ccb->cab.abort_ccb; 5982 switch (abort_ccb->ccb_h.func_code) { 5983 case XPT_ACCEPT_TARGET_IO: 5984 case XPT_IMMED_NOTIFY: 5985 case XPT_CONT_TARGET_IO: 5986 { 5987 struct tmode_tstate *tstate; 5988 struct tmode_lstate *lstate; 5989 struct ccb_hdr_slist *list; 5990 cam_status status; 5991 5992 status = ahc_find_tmode_devs(ahc, sim, abort_ccb, &tstate, 5993 &lstate, TRUE); 5994 5995 if (status != CAM_REQ_CMP) { 5996 ccb->ccb_h.status = status; 5997 break; 5998 } 5999 6000 if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) 6001 list = &lstate->accept_tios; 6002 else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) 6003 list = &lstate->immed_notifies; 6004 else 6005 list = NULL; 6006 6007 if (list != NULL) { 6008 struct ccb_hdr *curelm; 6009 int found; 6010 6011 curelm = SLIST_FIRST(list); 6012 found = 0; 6013 if (curelm == &abort_ccb->ccb_h) { 6014 found = 1; 6015 SLIST_REMOVE_HEAD(list, sim_links.sle); 6016 } else { 6017 while(curelm != NULL) { 6018 struct ccb_hdr *nextelm; 6019 6020 nextelm = 6021 SLIST_NEXT(curelm, sim_links.sle); 6022 6023 if (nextelm == &abort_ccb->ccb_h) { 6024 found = 1; 6025 SLIST_NEXT(curelm, 6026 sim_links.sle) = 6027 SLIST_NEXT(nextelm, 6028 sim_links.sle); 6029 break; 6030 } 6031 curelm = nextelm; 6032 } 6033 } 6034 6035 if (found) { 6036 abort_ccb->ccb_h.status = CAM_REQ_ABORTED; 6037 xpt_done(abort_ccb); 6038 ccb->ccb_h.status = CAM_REQ_CMP; 6039 } else { 6040 printf("Not found\n"); 6041 ccb->ccb_h.status = CAM_PATH_INVALID; 6042 } 6043 break; 6044 } 6045 /* FALLTHROUGH */ 6046 } 6047 case XPT_SCSI_IO: 6048 /* XXX Fully implement the hard ones */ 6049 ccb->ccb_h.status = CAM_UA_ABORT; 6050 break; 6051 default: 6052 ccb->ccb_h.status = CAM_REQ_INVALID; 6053 break; 6054 } 6055 xpt_done(ccb); 6056 } 6057 6058 /* 6059 * Abort all SCBs that match the given description (target/channel/lun/tag), 6060 * setting their status to the passed in status if the status has not already 6061 * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer 6062 * is paused before it is called. 6063 */ 6064 static int 6065 ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, 6066 int lun, u_int tag, role_t role, u_int32_t status) 6067 { 6068 struct scb *scbp; 6069 u_int active_scb; 6070 int i; 6071 int found; 6072 6073 /* restore this when we're done */ 6074 active_scb = ahc_inb(ahc, SCBPTR); 6075 6076 found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL, 6077 role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); 6078 6079 /* 6080 * Search waiting for selection list. 6081 */ 6082 { 6083 u_int8_t next, prev; 6084 6085 next = ahc_inb(ahc, WAITING_SCBH); /* Start at head of list. */ 6086 prev = SCB_LIST_NULL; 6087 6088 while (next != SCB_LIST_NULL) { 6089 u_int8_t scb_index; 6090 6091 ahc_outb(ahc, SCBPTR, next); 6092 scb_index = ahc_inb(ahc, SCB_TAG); 6093 if (scb_index >= ahc->scb_data->numscbs) { 6094 panic("Waiting List inconsistency. " 6095 "SCB index == %d, yet numscbs == %d.", 6096 scb_index, ahc->scb_data->numscbs); 6097 } 6098 scbp = &ahc->scb_data->scbarray[scb_index]; 6099 if (ahc_match_scb(scbp, target, channel, 6100 lun, SCB_LIST_NULL, role)) { 6101 6102 next = ahc_abort_wscb(ahc, next, prev); 6103 } else { 6104 6105 prev = next; 6106 next = ahc_inb(ahc, SCB_NEXT); 6107 } 6108 } 6109 } 6110 /* 6111 * Go through the disconnected list and remove any entries we 6112 * have queued for completion, 0'ing their control byte too. 6113 * We save the active SCB and restore it ourselves, so there 6114 * is no reason for this search to restore it too. 6115 */ 6116 ahc_search_disc_list(ahc, target, channel, lun, tag, 6117 /*stop_on_first*/FALSE, /*remove*/TRUE, 6118 /*save_state*/FALSE); 6119 6120 /* 6121 * Go through the hardware SCB array looking for commands that 6122 * were active but not on any list. 6123 */ 6124 for(i = 0; i < ahc->scb_data->maxhscbs; i++) { 6125 u_int scbid; 6126 6127 ahc_outb(ahc, SCBPTR, i); 6128 scbid = ahc_inb(ahc, SCB_TAG); 6129 scbp = &ahc->scb_data->scbarray[scbid]; 6130 if (scbid < ahc->scb_data->numscbs 6131 && ahc_match_scb(scbp, target, channel, lun, tag, role)) 6132 ahc_add_curscb_to_free_list(ahc); 6133 } 6134 6135 /* 6136 * Go through the pending CCB list and look for 6137 * commands for this target that are still active. 6138 * These are other tagged commands that were 6139 * disconnected when the reset occured. 6140 */ 6141 { 6142 struct ccb_hdr *ccb_h; 6143 6144 ccb_h = ahc->pending_ccbs.lh_first; 6145 while (ccb_h != NULL) { 6146 scbp = (struct scb *)ccb_h->ccb_scb_ptr; 6147 ccb_h = ccb_h->sim_links.le.le_next; 6148 if (ahc_match_scb(scbp, target, channel, 6149 lun, tag, role)) { 6150 if (ahc_ccb_status(scbp->ccb) == CAM_REQ_INPROG) 6151 ahcsetccbstatus(scbp->ccb, status); 6152 ahc_freeze_ccb(scbp->ccb); 6153 ahc_done(ahc, scbp); 6154 found++; 6155 } 6156 } 6157 } 6158 ahc_outb(ahc, SCBPTR, active_scb); 6159 return found; 6160 } 6161 6162 static int 6163 ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel, 6164 int lun, u_int tag, int stop_on_first, int remove, 6165 int save_state) 6166 { 6167 struct scb *scbp; 6168 u_int next; 6169 u_int prev; 6170 u_int count; 6171 u_int active_scb; 6172 6173 count = 0; 6174 next = ahc_inb(ahc, DISCONNECTED_SCBH); 6175 prev = SCB_LIST_NULL; 6176 6177 if (save_state) { 6178 /* restore this when we're done */ 6179 active_scb = ahc_inb(ahc, SCBPTR); 6180 } else 6181 /* Silence compiler */ 6182 active_scb = SCB_LIST_NULL; 6183 6184 while (next != SCB_LIST_NULL) { 6185 u_int scb_index; 6186 6187 ahc_outb(ahc, SCBPTR, next); 6188 scb_index = ahc_inb(ahc, SCB_TAG); 6189 if (scb_index >= ahc->scb_data->numscbs) { 6190 panic("Disconnected List inconsistency. " 6191 "SCB index == %d, yet numscbs == %d.", 6192 scb_index, ahc->scb_data->numscbs); 6193 } 6194 scbp = &ahc->scb_data->scbarray[scb_index]; 6195 if (ahc_match_scb(scbp, target, channel, lun, 6196 tag, ROLE_INITIATOR)) { 6197 count++; 6198 if (remove) { 6199 next = 6200 ahc_rem_scb_from_disc_list(ahc, prev, next); 6201 } else { 6202 prev = next; 6203 next = ahc_inb(ahc, SCB_NEXT); 6204 } 6205 if (stop_on_first) 6206 break; 6207 } else { 6208 prev = next; 6209 next = ahc_inb(ahc, SCB_NEXT); 6210 } 6211 } 6212 if (save_state) 6213 ahc_outb(ahc, SCBPTR, active_scb); 6214 return (count); 6215 } 6216 6217 static u_int 6218 ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr) 6219 { 6220 u_int next; 6221 6222 ahc_outb(ahc, SCBPTR, scbptr); 6223 next = ahc_inb(ahc, SCB_NEXT); 6224 6225 ahc_outb(ahc, SCB_CONTROL, 0); 6226 6227 ahc_add_curscb_to_free_list(ahc); 6228 6229 if (prev != SCB_LIST_NULL) { 6230 ahc_outb(ahc, SCBPTR, prev); 6231 ahc_outb(ahc, SCB_NEXT, next); 6232 } else 6233 ahc_outb(ahc, DISCONNECTED_SCBH, next); 6234 6235 return (next); 6236 } 6237 6238 static void 6239 ahc_add_curscb_to_free_list(struct ahc_softc *ahc) 6240 { 6241 /* Invalidate the tag so that ahc_find_scb doesn't think it's active */ 6242 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 6243 6244 ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH)); 6245 ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR)); 6246 } 6247 6248 /* 6249 * Manipulate the waiting for selection list and return the 6250 * scb that follows the one that we remove. 6251 */ 6252 static u_int 6253 ahc_abort_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev) 6254 { 6255 u_int curscb, next; 6256 6257 /* 6258 * Select the SCB we want to abort and 6259 * pull the next pointer out of it. 6260 */ 6261 curscb = ahc_inb(ahc, SCBPTR); 6262 ahc_outb(ahc, SCBPTR, scbpos); 6263 next = ahc_inb(ahc, SCB_NEXT); 6264 6265 /* Clear the necessary fields */ 6266 ahc_outb(ahc, SCB_CONTROL, 0); 6267 6268 ahc_add_curscb_to_free_list(ahc); 6269 6270 /* update the waiting list */ 6271 if (prev == SCB_LIST_NULL) { 6272 /* First in the list */ 6273 ahc_outb(ahc, WAITING_SCBH, next); 6274 6275 /* 6276 * Ensure we aren't attempting to perform 6277 * selection for this entry. 6278 */ 6279 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 6280 } else { 6281 /* 6282 * Select the scb that pointed to us 6283 * and update its next pointer. 6284 */ 6285 ahc_outb(ahc, SCBPTR, prev); 6286 ahc_outb(ahc, SCB_NEXT, next); 6287 } 6288 6289 /* 6290 * Point us back at the original scb position. 6291 */ 6292 ahc_outb(ahc, SCBPTR, curscb); 6293 return next; 6294 } 6295 6296 static void 6297 ahc_clear_intstat(struct ahc_softc *ahc) 6298 { 6299 /* Clear any interrupt conditions this may have caused */ 6300 ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO); 6301 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI 6302 |CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG| 6303 CLRREQINIT); 6304 ahc_outb(ahc, CLRINT, CLRSCSIINT); 6305 } 6306 6307 static void 6308 ahc_reset_current_bus(struct ahc_softc *ahc) 6309 { 6310 u_int8_t scsiseq; 6311 6312 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST); 6313 scsiseq = ahc_inb(ahc, SCSISEQ); 6314 ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO); 6315 DELAY(AHC_BUSRESET_DELAY); 6316 /* Turn off the bus reset */ 6317 ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO); 6318 6319 ahc_clear_intstat(ahc); 6320 6321 /* Re-enable reset interrupts */ 6322 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST); 6323 } 6324 6325 static int 6326 ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset) 6327 { 6328 struct cam_path *path; 6329 u_int initiator, target, max_scsiid; 6330 u_int sblkctl; 6331 u_int our_id; 6332 int found; 6333 int restart_needed; 6334 char cur_channel; 6335 6336 ahc->pending_device = NULL; 6337 6338 pause_sequencer(ahc); 6339 6340 /* 6341 * Run our command complete fifos to ensure that we perform 6342 * completion processing on any commands that 'completed' 6343 * before the reset occurred. 6344 */ 6345 ahc_run_qoutfifo(ahc); 6346 if ((ahc->flags & AHC_TARGETMODE) != 0) { 6347 ahc_run_tqinfifo(ahc, /*paused*/TRUE); 6348 } 6349 6350 /* 6351 * Reset the bus if we are initiating this reset 6352 */ 6353 sblkctl = ahc_inb(ahc, SBLKCTL); 6354 cur_channel = 'A'; 6355 if ((ahc->features & AHC_TWIN) != 0 6356 && ((sblkctl & SELBUSB) != 0)) 6357 cur_channel = 'B'; 6358 if (cur_channel != channel) { 6359 /* Case 1: Command for another bus is active 6360 * Stealthily reset the other bus without 6361 * upsetting the current bus. 6362 */ 6363 ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB); 6364 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 6365 ahc_outb(ahc, SCSISEQ, 6366 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 6367 if (initiate_reset) 6368 ahc_reset_current_bus(ahc); 6369 ahc_clear_intstat(ahc); 6370 ahc_outb(ahc, SBLKCTL, sblkctl); 6371 restart_needed = FALSE; 6372 } else { 6373 /* Case 2: A command from this bus is active or we're idle */ 6374 ahc_clear_msg_state(ahc); 6375 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 6376 ahc_outb(ahc, SCSISEQ, 6377 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 6378 if (initiate_reset) 6379 ahc_reset_current_bus(ahc); 6380 ahc_clear_intstat(ahc); 6381 6382 /* 6383 * Since we are going to restart the sequencer, avoid 6384 * a race in the sequencer that could cause corruption 6385 * of our Q pointers by starting over from index 1. 6386 */ 6387 ahc->qoutfifonext = 0; 6388 if ((ahc->features & AHC_QUEUE_REGS) != 0) 6389 ahc_outb(ahc, SDSCB_QOFF, 0); 6390 else 6391 ahc_outb(ahc, QOUTPOS, 0); 6392 if ((ahc->flags & AHC_TARGETMODE) != 0) { 6393 ahc->tqinfifonext = 1; 6394 ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1); 6395 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); 6396 if ((ahc->features & AHC_HS_MAILBOX) != 0) { 6397 u_int hs_mailbox; 6398 6399 hs_mailbox = ahc_inb(ahc, HS_MAILBOX); 6400 hs_mailbox &= ~HOST_TQINPOS; 6401 ahc_outb(ahc, HS_MAILBOX, hs_mailbox); 6402 } 6403 } 6404 restart_needed = TRUE; 6405 } 6406 6407 /* 6408 * Clean up all the state information for the 6409 * pending transactions on this bus. 6410 */ 6411 found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel, 6412 CAM_LUN_WILDCARD, SCB_LIST_NULL, 6413 ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); 6414 if (channel == 'B') { 6415 path = ahc->path_b; 6416 our_id = ahc->our_id_b; 6417 } else { 6418 path = ahc->path; 6419 our_id = ahc->our_id; 6420 } 6421 6422 max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7; 6423 6424 /* 6425 * Send an immediate notify ccb to all target more peripheral 6426 * drivers affected by this action. 6427 */ 6428 for (target = 0; target <= max_scsiid; target++) { 6429 struct tmode_tstate* tstate; 6430 u_int lun; 6431 6432 tstate = ahc->enabled_targets[target]; 6433 if (tstate == NULL) 6434 continue; 6435 for (lun = 0; lun <= 7; lun++) { 6436 struct tmode_lstate* lstate; 6437 6438 lstate = tstate->enabled_luns[lun]; 6439 if (lstate == NULL) 6440 continue; 6441 6442 ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD, 6443 EVENT_TYPE_BUS_RESET, /*arg*/0); 6444 ahc_send_lstate_events(ahc, lstate); 6445 } 6446 } 6447 6448 /* Notify the XPT that a bus reset occurred */ 6449 xpt_async(AC_BUS_RESET, path, NULL); 6450 6451 /* 6452 * Revert to async/narrow transfers until we renegotiate. 6453 */ 6454 for (target = 0; target <= max_scsiid; target++) { 6455 6456 if (ahc->enabled_targets[target] == NULL) 6457 continue; 6458 for (initiator = 0; initiator <= max_scsiid; initiator++) { 6459 struct ahc_devinfo devinfo; 6460 6461 ahc_compile_devinfo(&devinfo, target, initiator, 6462 CAM_LUN_WILDCARD, 6463 channel, ROLE_UNKNOWN); 6464 ahc_set_width(ahc, &devinfo, path, 6465 MSG_EXT_WDTR_BUS_8_BIT, 6466 AHC_TRANS_CUR, /*paused*/TRUE); 6467 ahc_set_syncrate(ahc, &devinfo, path, 6468 /*syncrate*/NULL, /*period*/0, 6469 /*offset*/0, AHC_TRANS_CUR, 6470 /*paused*/TRUE); 6471 } 6472 } 6473 6474 if (restart_needed) 6475 restart_sequencer(ahc); 6476 else 6477 unpause_sequencer(ahc); 6478 return found; 6479 } 6480 6481 static int 6482 ahc_match_scb(struct scb *scb, int target, char channel, 6483 int lun, u_int tag, role_t role) 6484 { 6485 int targ = SCB_TARGET(scb); 6486 char chan = SCB_CHANNEL(scb); 6487 int slun = SCB_LUN(scb); 6488 int match; 6489 6490 match = ((chan == channel) || (channel == ALL_CHANNELS)); 6491 if (match != 0) 6492 match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); 6493 if (match != 0) 6494 match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); 6495 if (match != 0) { 6496 int group; 6497 6498 group = XPT_FC_GROUP(scb->ccb->ccb_h.func_code); 6499 if (role == ROLE_INITIATOR) { 6500 match = (group == XPT_FC_GROUP_COMMON) 6501 && ((tag == scb->hscb->tag) 6502 || (tag == SCB_LIST_NULL)); 6503 } else if (role == ROLE_TARGET) { 6504 match = (group == XPT_FC_GROUP_TMODE) 6505 && ((tag == scb->ccb->csio.tag_id) 6506 || (tag == SCB_LIST_NULL)); 6507 } 6508 } 6509 6510 return match; 6511 } 6512 6513 static void 6514 ahc_construct_sdtr(struct ahc_softc *ahc, u_int period, u_int offset) 6515 { 6516 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 6517 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN; 6518 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR; 6519 ahc->msgout_buf[ahc->msgout_index++] = period; 6520 ahc->msgout_buf[ahc->msgout_index++] = offset; 6521 ahc->msgout_len += 5; 6522 } 6523 6524 static void 6525 ahc_construct_wdtr(struct ahc_softc *ahc, u_int bus_width) 6526 { 6527 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 6528 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN; 6529 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR; 6530 ahc->msgout_buf[ahc->msgout_index++] = bus_width; 6531 ahc->msgout_len += 4; 6532 } 6533 6534 static void 6535 ahc_calc_residual(struct scb *scb) 6536 { 6537 struct hardware_scb *hscb; 6538 6539 hscb = scb->hscb; 6540 6541 /* 6542 * If the disconnected flag is still set, this is bogus 6543 * residual information left over from a sequencer 6544 * pagin/pageout, so ignore this case. 6545 */ 6546 if ((scb->hscb->control & DISCONNECTED) == 0) { 6547 u_int32_t resid; 6548 int resid_sgs; 6549 int sg; 6550 6551 /* 6552 * Remainder of the SG where the transfer 6553 * stopped. 6554 */ 6555 resid = (hscb->residual_data_count[2] << 16) 6556 | (hscb->residual_data_count[1] <<8) 6557 | (hscb->residual_data_count[0]); 6558 6559 /* 6560 * Add up the contents of all residual 6561 * SG segments that are after the SG where 6562 * the transfer stopped. 6563 */ 6564 resid_sgs = scb->hscb->residual_SG_count - 1/*current*/; 6565 sg = scb->sg_count - resid_sgs; 6566 while (resid_sgs > 0) { 6567 6568 resid += scb->sg_list[sg].len; 6569 sg++; 6570 resid_sgs--; 6571 } 6572 if ((scb->flags & SCB_SENSE) == 0) { 6573 6574 scb->ccb->csio.resid = resid; 6575 } else { 6576 6577 scb->ccb->csio.sense_resid = resid; 6578 } 6579 6580 #ifdef AHC_DEBUG 6581 if (ahc_debug & AHC_SHOWMISC) { 6582 xpt_print_path(scb->ccb->ccb_h.path); 6583 printf("Handled Residual of %d bytes\n", resid); 6584 } 6585 #endif 6586 } 6587 6588 /* 6589 * Clean out the residual information in this SCB for its 6590 * next consumer. 6591 */ 6592 hscb->residual_SG_count = 0; 6593 } 6594 6595 static void 6596 ahc_update_pending_syncrates(struct ahc_softc *ahc) 6597 { 6598 struct ccb_hdr *ccbh; 6599 int pending_ccb_count; 6600 int i; 6601 u_int saved_scbptr; 6602 6603 /* 6604 * Traverse the pending SCB list and ensure that all of the 6605 * SCBs there have the proper settings. 6606 */ 6607 ccbh = LIST_FIRST(&ahc->pending_ccbs); 6608 pending_ccb_count = 0; 6609 while (ccbh != NULL) { 6610 struct ahc_devinfo devinfo; 6611 union ccb *ccb; 6612 struct scb *pending_scb; 6613 struct hardware_scb *pending_hscb; 6614 struct ahc_initiator_tinfo *tinfo; 6615 struct tmode_tstate *tstate; 6616 u_int our_id, remote_id; 6617 6618 ccb = (union ccb*)ccbh; 6619 pending_scb = (struct scb *)ccbh->ccb_scb_ptr; 6620 pending_hscb = pending_scb->hscb; 6621 if (ccbh->func_code == XPT_CONT_TARGET_IO) { 6622 our_id = ccb->ccb_h.target_id; 6623 remote_id = ccb->ctio.init_id; 6624 } else { 6625 our_id = SCB_IS_SCSIBUS_B(pending_scb) 6626 ? ahc->our_id_b : ahc->our_id; 6627 remote_id = ccb->ccb_h.target_id; 6628 } 6629 ahc_compile_devinfo(&devinfo, our_id, remote_id, 6630 SCB_LUN(pending_scb), 6631 SCB_CHANNEL(pending_scb), 6632 ROLE_UNKNOWN); 6633 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 6634 our_id, remote_id, &tstate); 6635 pending_hscb->control &= ~ULTRAENB; 6636 if ((tstate->ultraenb & devinfo.target_mask) != 0) 6637 pending_hscb->control |= ULTRAENB; 6638 pending_hscb->scsirate = tinfo->scsirate; 6639 pending_hscb->scsioffset = tinfo->current.offset; 6640 pending_ccb_count++; 6641 ccbh = LIST_NEXT(ccbh, sim_links.le); 6642 } 6643 6644 if (pending_ccb_count == 0) 6645 return; 6646 6647 saved_scbptr = ahc_inb(ahc, SCBPTR); 6648 /* Ensure that the hscbs down on the card match the new information */ 6649 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 6650 u_int scb_tag; 6651 6652 ahc_outb(ahc, SCBPTR, i); 6653 scb_tag = ahc_inb(ahc, SCB_TAG); 6654 if (scb_tag != SCB_LIST_NULL) { 6655 struct ahc_devinfo devinfo; 6656 union ccb *ccb; 6657 struct scb *pending_scb; 6658 struct hardware_scb *pending_hscb; 6659 struct ahc_initiator_tinfo *tinfo; 6660 struct tmode_tstate *tstate; 6661 u_int our_id, remote_id; 6662 u_int control; 6663 6664 pending_scb = &ahc->scb_data->scbarray[scb_tag]; 6665 if (pending_scb->flags == SCB_FREE) 6666 continue; 6667 pending_hscb = pending_scb->hscb; 6668 ccb = pending_scb->ccb; 6669 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 6670 our_id = ccb->ccb_h.target_id; 6671 remote_id = ccb->ctio.init_id; 6672 } else { 6673 our_id = SCB_IS_SCSIBUS_B(pending_scb) 6674 ? ahc->our_id_b : ahc->our_id; 6675 remote_id = ccb->ccb_h.target_id; 6676 } 6677 ahc_compile_devinfo(&devinfo, our_id, remote_id, 6678 SCB_LUN(pending_scb), 6679 SCB_CHANNEL(pending_scb), 6680 ROLE_UNKNOWN); 6681 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 6682 our_id, remote_id, &tstate); 6683 control = ahc_inb(ahc, SCB_CONTROL); 6684 control &= ~ULTRAENB; 6685 if ((tstate->ultraenb & devinfo.target_mask) != 0) 6686 control |= ULTRAENB; 6687 ahc_outb(ahc, SCB_CONTROL, control); 6688 ahc_outb(ahc, SCB_SCSIRATE, tinfo->scsirate); 6689 ahc_outb(ahc, SCB_SCSIOFFSET, tinfo->current.offset); 6690 } 6691 } 6692 ahc_outb(ahc, SCBPTR, saved_scbptr); 6693 } 6694 6695 #if UNUSED 6696 static void 6697 ahc_dump_targcmd(struct target_cmd *cmd) 6698 { 6699 u_int8_t *byte; 6700 u_int8_t *last_byte; 6701 int i; 6702 6703 byte = &cmd->initiator_channel; 6704 /* Debugging info for received commands */ 6705 last_byte = &cmd[1].initiator_channel; 6706 6707 i = 0; 6708 while (byte < last_byte) { 6709 if (i == 0) 6710 printf("\t"); 6711 printf("%#x", *byte++); 6712 i++; 6713 if (i == 8) { 6714 printf("\n"); 6715 i = 0; 6716 } else { 6717 printf(", "); 6718 } 6719 } 6720 } 6721 #endif 6722 6723 static void 6724 ahc_shutdown(void *arg, int howto) 6725 { 6726 struct ahc_softc *ahc; 6727 int i; 6728 u_int sxfrctl1_a, sxfrctl1_b; 6729 6730 ahc = (struct ahc_softc *)arg; 6731 6732 pause_sequencer(ahc); 6733 6734 /* 6735 * Preserve the value of the SXFRCTL1 register for all channels. 6736 * It contains settings that affect termination and we don't want 6737 * to disturb the integrity of the bus during shutdown in case 6738 * we are in a multi-initiator setup. 6739 */ 6740 sxfrctl1_b = 0; 6741 if ((ahc->features & AHC_TWIN) != 0) { 6742 u_int sblkctl; 6743 6744 sblkctl = ahc_inb(ahc, SBLKCTL); 6745 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); 6746 sxfrctl1_b = ahc_inb(ahc, SXFRCTL1); 6747 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); 6748 } 6749 6750 sxfrctl1_a = ahc_inb(ahc, SXFRCTL1); 6751 6752 /* This will reset most registers to 0, but not all */ 6753 ahc_reset(ahc); 6754 6755 if ((ahc->features & AHC_TWIN) != 0) { 6756 u_int sblkctl; 6757 6758 sblkctl = ahc_inb(ahc, SBLKCTL); 6759 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); 6760 ahc_outb(ahc, SXFRCTL1, sxfrctl1_b); 6761 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); 6762 } 6763 ahc_outb(ahc, SXFRCTL1, sxfrctl1_a); 6764 6765 ahc_outb(ahc, SCSISEQ, 0); 6766 ahc_outb(ahc, SXFRCTL0, 0); 6767 ahc_outb(ahc, DSPCISTATUS, 0); 6768 6769 for (i = TARG_SCSIRATE; i < HA_274_BIOSCTRL; i++) 6770 ahc_outb(ahc, i, 0); 6771 } 6772 6773 /* 6774 * Add a target mode event to this lun's queue 6775 */ 6776 static void 6777 ahc_queue_lstate_event(struct ahc_softc *ahc, struct tmode_lstate *lstate, 6778 u_int initiator_id, u_int event_type, u_int event_arg) 6779 { 6780 struct ahc_tmode_event *event; 6781 int pending; 6782 6783 xpt_freeze_devq(lstate->path, /*count*/1); 6784 if (lstate->event_w_idx >= lstate->event_r_idx) 6785 pending = lstate->event_w_idx - lstate->event_r_idx; 6786 else 6787 pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1 6788 - (lstate->event_r_idx - lstate->event_w_idx); 6789 6790 if (event_type == EVENT_TYPE_BUS_RESET 6791 || event_type == MSG_BUS_DEV_RESET) { 6792 /* 6793 * Any earlier events are irrelevant, so reset our buffer. 6794 * This has the effect of allowing us to deal with reset 6795 * floods (an external device holding down the reset line) 6796 * without losing the event that is really interesting. 6797 */ 6798 lstate->event_r_idx = 0; 6799 lstate->event_w_idx = 0; 6800 xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); 6801 } 6802 6803 if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) { 6804 xpt_print_path(lstate->path); 6805 printf("immediate event %x:%x lost\n", 6806 lstate->event_buffer[lstate->event_r_idx].event_type, 6807 lstate->event_buffer[lstate->event_r_idx].event_arg); 6808 lstate->event_r_idx++; 6809 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6810 lstate->event_r_idx = 0; 6811 xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); 6812 } 6813 6814 event = &lstate->event_buffer[lstate->event_w_idx]; 6815 event->initiator_id = initiator_id; 6816 event->event_type = event_type; 6817 event->event_arg = event_arg; 6818 lstate->event_w_idx++; 6819 if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6820 lstate->event_w_idx = 0; 6821 } 6822 6823 /* 6824 * Send any target mode events queued up waiting 6825 * for immediate notify resources. 6826 */ 6827 static void 6828 ahc_send_lstate_events(struct ahc_softc *ahc, struct tmode_lstate *lstate) 6829 { 6830 struct ccb_hdr *ccbh; 6831 struct ccb_immed_notify *inot; 6832 6833 while (lstate->event_r_idx != lstate->event_w_idx 6834 && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { 6835 struct ahc_tmode_event *event; 6836 6837 event = &lstate->event_buffer[lstate->event_r_idx]; 6838 SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); 6839 inot = (struct ccb_immed_notify *)ccbh; 6840 switch (event->event_type) { 6841 case EVENT_TYPE_BUS_RESET: 6842 ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; 6843 break; 6844 default: 6845 ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 6846 inot->message_args[0] = event->event_type; 6847 inot->message_args[1] = event->event_arg; 6848 break; 6849 } 6850 inot->initiator_id = event->initiator_id; 6851 inot->sense_len = 0; 6852 xpt_done((union ccb *)inot); 6853 lstate->event_r_idx++; 6854 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6855 lstate->event_r_idx = 0; 6856 } 6857 } 6858