1 /* 2 * Generic driver for the aic7xxx based adaptec SCSI controllers 3 * Product specific probe and attach routines can be found in: 4 * i386/eisa/ahc_eisa.c 27/284X and aic7770 motherboard controllers 5 * pci/ahc_pci.c 3985, 3980, 3940, 2940, aic7895, aic7890, 6 * aic7880, aic7870, aic7860, and aic7850 controllers 7 * 8 * Copyright (c) 1994, 1995, 1996, 1997, 1998, 1999 Justin T. Gibbs. 9 * All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions, and the following disclaimer, 16 * without modification. 17 * 2. The name of the author may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * the GNU Public License ("GPL"). 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 27 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * $FreeBSD$ 36 */ 37 /* 38 * A few notes on features of the driver. 39 * 40 * SCB paging takes advantage of the fact that devices stay disconnected 41 * from the bus a relatively long time and that while they're disconnected, 42 * having the SCBs for these transactions down on the host adapter is of 43 * little use. Instead of leaving this idle SCB down on the card we copy 44 * it back up into kernel memory and reuse the SCB slot on the card to 45 * schedule another transaction. This can be a real payoff when doing random 46 * I/O to tagged queueing devices since there are more transactions active at 47 * once for the device to sort for optimal seek reduction. The algorithm goes 48 * like this... 49 * 50 * The sequencer maintains two lists of its hardware SCBs. The first is the 51 * singly linked free list which tracks all SCBs that are not currently in 52 * use. The second is the doubly linked disconnected list which holds the 53 * SCBs of transactions that are in the disconnected state sorted most 54 * recently disconnected first. When the kernel queues a transaction to 55 * the card, a hardware SCB to "house" this transaction is retrieved from 56 * either of these two lists. If the SCB came from the disconnected list, 57 * a check is made to see if any data transfer or SCB linking (more on linking 58 * in a bit) information has been changed since it was copied from the host 59 * and if so, DMAs the SCB back up before it can be used. Once a hardware 60 * SCB has been obtained, the SCB is DMAed from the host. Before any work 61 * can begin on this SCB, the sequencer must ensure that either the SCB is 62 * for a tagged transaction or the target is not already working on another 63 * non-tagged transaction. If a conflict arises in the non-tagged case, the 64 * sequencer finds the SCB for the active transactions and sets the SCB_LINKED 65 * field in that SCB to this next SCB to execute. To facilitate finding 66 * active non-tagged SCBs, the last four bytes of up to the first four hardware 67 * SCBs serve as a storage area for the currently active SCB ID for each 68 * target. 69 * 70 * When a device reconnects, a search is made of the hardware SCBs to find 71 * the SCB for this transaction. If the search fails, a hardware SCB is 72 * pulled from either the free or disconnected SCB list and the proper 73 * SCB is DMAed from the host. If the MK_MESSAGE control bit is set 74 * in the control byte of the SCB while it was disconnected, the sequencer 75 * will assert ATN and attempt to issue a message to the host. 76 * 77 * When a command completes, a check for non-zero status and residuals is 78 * made. If either of these conditions exists, the SCB is DMAed back up to 79 * the host so that it can interpret this information. Additionally, in the 80 * case of bad status, the sequencer generates a special interrupt and pauses 81 * itself. This allows the host to setup a request sense command if it 82 * chooses for this target synchronously with the error so that sense 83 * information isn't lost. 84 * 85 */ 86 87 #include <opt_aic7xxx.h> 88 89 #include <pci.h> 90 #include <stddef.h> /* For offsetof */ 91 92 #include <sys/param.h> 93 #include <sys/systm.h> 94 #include <sys/malloc.h> 95 #include <sys/eventhandler.h> 96 #include <sys/buf.h> 97 #include <sys/proc.h> 98 99 #include <cam/cam.h> 100 #include <cam/cam_ccb.h> 101 #include <cam/cam_sim.h> 102 #include <cam/cam_xpt_sim.h> 103 #include <cam/cam_debug.h> 104 105 #include <cam/scsi/scsi_all.h> 106 #include <cam/scsi/scsi_message.h> 107 108 #if NPCI > 0 109 #include <machine/bus_memio.h> 110 #endif 111 #include <machine/bus_pio.h> 112 #include <machine/bus.h> 113 #include <machine/clock.h> 114 #include <sys/rman.h> 115 116 #include <vm/vm.h> 117 #include <vm/vm_param.h> 118 #include <vm/pmap.h> 119 120 #include <dev/aic7xxx/aic7xxx.h> 121 #include <dev/aic7xxx/sequencer.h> 122 123 #include <aic7xxx_reg.h> 124 #include <aic7xxx_seq.h> 125 126 #include <sys/kernel.h> 127 128 #ifndef AHC_TMODE_ENABLE 129 #define AHC_TMODE_ENABLE 0 130 #endif 131 132 #define MAX(a,b) (((a) > (b)) ? (a) : (b)) 133 #define MIN(a,b) (((a) < (b)) ? (a) : (b)) 134 #define ALL_CHANNELS '\0' 135 #define ALL_TARGETS_MASK 0xFFFF 136 #define INITIATOR_WILDCARD (~0) 137 138 #define SIM_IS_SCSIBUS_B(ahc, sim) \ 139 ((sim) == ahc->sim_b) 140 #define SIM_CHANNEL(ahc, sim) \ 141 (((sim) == ahc->sim_b) ? 'B' : 'A') 142 #define SIM_SCSI_ID(ahc, sim) \ 143 (((sim) == ahc->sim_b) ? ahc->our_id_b : ahc->our_id) 144 #define SIM_PATH(ahc, sim) \ 145 (((sim) == ahc->sim_b) ? ahc->path_b : ahc->path) 146 #define SCB_IS_SCSIBUS_B(scb) \ 147 (((scb)->hscb->tcl & SELBUSB) != 0) 148 #define SCB_TARGET(scb) \ 149 (((scb)->hscb->tcl & TID) >> 4) 150 #define SCB_CHANNEL(scb) \ 151 (SCB_IS_SCSIBUS_B(scb) ? 'B' : 'A') 152 #define SCB_LUN(scb) \ 153 ((scb)->hscb->tcl & LID) 154 #define SCB_TARGET_OFFSET(scb) \ 155 (SCB_TARGET(scb) + (SCB_IS_SCSIBUS_B(scb) ? 8 : 0)) 156 #define SCB_TARGET_MASK(scb) \ 157 (0x01 << (SCB_TARGET_OFFSET(scb))) 158 #define TCL_CHANNEL(ahc, tcl) \ 159 ((((ahc)->features & AHC_TWIN) && ((tcl) & SELBUSB)) ? 'B' : 'A') 160 #define TCL_SCSI_ID(ahc, tcl) \ 161 (TCL_CHANNEL((ahc), (tcl)) == 'B' ? (ahc)->our_id_b : (ahc)->our_id) 162 #define TCL_TARGET(tcl) (((tcl) & TID) >> TCL_TARGET_SHIFT) 163 #define TCL_LUN(tcl) ((tcl) & LID) 164 165 #define ccb_scb_ptr spriv_ptr0 166 #define ccb_ahc_ptr spriv_ptr1 167 168 typedef enum { 169 ROLE_UNKNOWN, 170 ROLE_INITIATOR, 171 ROLE_TARGET 172 } role_t; 173 174 struct ahc_devinfo { 175 int our_scsiid; 176 int target_offset; 177 u_int16_t target_mask; 178 u_int8_t target; 179 u_int8_t lun; 180 char channel; 181 role_t role; /* 182 * Only guaranteed to be correct if not 183 * in the busfree state. 184 */ 185 }; 186 187 typedef enum { 188 SEARCH_COMPLETE, 189 SEARCH_COUNT, 190 SEARCH_REMOVE 191 } ahc_search_action; 192 193 #ifdef AHC_DEBUG 194 static int ahc_debug = AHC_DEBUG; 195 #endif 196 197 #if NPCI > 0 198 void ahc_pci_intr(struct ahc_softc *ahc); 199 #endif 200 201 static int ahcinitscbdata(struct ahc_softc *ahc); 202 static void ahcfiniscbdata(struct ahc_softc *ahc); 203 204 static bus_dmamap_callback_t ahcdmamapcb; 205 206 #if UNUSED 207 static void ahc_dump_targcmd(struct target_cmd *cmd); 208 #endif 209 static void ahc_shutdown(void *arg, int howto); 210 static cam_status 211 ahc_find_tmode_devs(struct ahc_softc *ahc, 212 struct cam_sim *sim, union ccb *ccb, 213 struct tmode_tstate **tstate, 214 struct tmode_lstate **lstate, 215 int notfound_failure); 216 static void ahc_action(struct cam_sim *sim, union ccb *ccb); 217 static void ahc_async(void *callback_arg, u_int32_t code, 218 struct cam_path *path, void *arg); 219 static void ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, 220 int nsegments, int error); 221 static void ahc_poll(struct cam_sim *sim); 222 static void ahc_setup_data(struct ahc_softc *ahc, 223 struct ccb_scsiio *csio, struct scb *scb); 224 static void ahc_freeze_devq(struct ahc_softc *ahc, struct cam_path *path); 225 static void ahcallocscbs(struct ahc_softc *ahc); 226 static void ahc_scb_devinfo(struct ahc_softc *ahc, 227 struct ahc_devinfo *devinfo, 228 struct scb *scb); 229 static void ahc_fetch_devinfo(struct ahc_softc *ahc, 230 struct ahc_devinfo *devinfo); 231 static void ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, 232 u_int target, u_int lun, char channel, 233 role_t role); 234 static u_int ahc_abort_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev); 235 static void ahc_done(struct ahc_softc *ahc, struct scb *scbp); 236 static struct tmode_tstate * 237 ahc_alloc_tstate(struct ahc_softc *ahc, 238 u_int scsi_id, char channel); 239 static void ahc_free_tstate(struct ahc_softc *ahc, 240 u_int scsi_id, char channel, int force); 241 static void ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, 242 union ccb *ccb); 243 static int ahc_handle_target_cmd(struct ahc_softc *ahc, 244 struct target_cmd *cmd); 245 static void ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat); 246 static void ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat); 247 static void ahc_build_transfer_msg(struct ahc_softc *ahc, 248 struct ahc_devinfo *devinfo); 249 static void ahc_setup_initiator_msgout(struct ahc_softc *ahc, 250 struct ahc_devinfo *devinfo, 251 struct scb *scb); 252 static void ahc_setup_target_msgin(struct ahc_softc *ahc, 253 struct ahc_devinfo *devinfo); 254 static int ahc_handle_msg_reject(struct ahc_softc *ahc, 255 struct ahc_devinfo *devinfo); 256 static void ahc_clear_msg_state(struct ahc_softc *ahc); 257 static void ahc_handle_message_phase(struct ahc_softc *ahc, 258 struct cam_path *path); 259 static int ahc_sent_msg(struct ahc_softc *ahc, u_int msgtype, int full); 260 typedef enum { 261 MSGLOOP_IN_PROG, 262 MSGLOOP_MSGCOMPLETE, 263 MSGLOOP_TERMINATED 264 } msg_loop_stat; 265 static int ahc_parse_msg(struct ahc_softc *ahc, struct cam_path *path, 266 struct ahc_devinfo *devinfo); 267 static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc, 268 struct ahc_devinfo *devinfo); 269 static void ahc_handle_devreset(struct ahc_softc *ahc, 270 struct ahc_devinfo *devinfo, 271 cam_status status, ac_code acode, 272 char *message, 273 int verbose_level); 274 #ifdef AHC_DUMP_SEQ 275 static void ahc_dumpseq(struct ahc_softc *ahc); 276 #endif 277 static void ahc_loadseq(struct ahc_softc *ahc); 278 static int ahc_check_patch(struct ahc_softc *ahc, 279 struct patch **start_patch, 280 int start_instr, int *skip_addr); 281 static void ahc_download_instr(struct ahc_softc *ahc, 282 int instrptr, u_int8_t *dconsts); 283 static int ahc_match_scb(struct scb *scb, int target, char channel, 284 int lun, u_int tag, role_t role); 285 #ifdef AHC_DEBUG 286 static void ahc_print_scb(struct scb *scb); 287 #endif 288 static int ahc_search_qinfifo(struct ahc_softc *ahc, int target, 289 char channel, int lun, u_int tag, 290 role_t role, u_int32_t status, 291 ahc_search_action action); 292 static void ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, 293 union ccb *ccb); 294 static int ahc_reset_channel(struct ahc_softc *ahc, char channel, 295 int initiate_reset); 296 static int ahc_abort_scbs(struct ahc_softc *ahc, int target, 297 char channel, int lun, u_int tag, role_t role, 298 u_int32_t status); 299 static int ahc_search_disc_list(struct ahc_softc *ahc, int target, 300 char channel, int lun, u_int tag); 301 static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, 302 u_int prev, u_int scbptr); 303 static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc); 304 static void ahc_clear_intstat(struct ahc_softc *ahc); 305 static void ahc_reset_current_bus(struct ahc_softc *ahc); 306 static struct ahc_syncrate * 307 ahc_devlimited_syncrate(struct ahc_softc *ahc, u_int *period); 308 static struct ahc_syncrate * 309 ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, 310 u_int maxsync); 311 static u_int ahc_find_period(struct ahc_softc *ahc, u_int scsirate, 312 u_int maxsync); 313 static void ahc_validate_offset(struct ahc_softc *ahc, 314 struct ahc_syncrate *syncrate, 315 u_int *offset, int wide); 316 static void ahc_update_target_msg_request(struct ahc_softc *ahc, 317 struct ahc_devinfo *devinfo, 318 struct ahc_initiator_tinfo *tinfo, 319 int force, int paused); 320 static int ahc_create_path(struct ahc_softc *ahc, 321 struct ahc_devinfo *devinfo, 322 struct cam_path **path); 323 static void ahc_set_syncrate(struct ahc_softc *ahc, 324 struct ahc_devinfo *devinfo, 325 struct cam_path *path, 326 struct ahc_syncrate *syncrate, 327 u_int period, u_int offset, u_int type, 328 int paused); 329 static void ahc_set_width(struct ahc_softc *ahc, 330 struct ahc_devinfo *devinfo, 331 struct cam_path *path, u_int width, u_int type, 332 int paused); 333 static void ahc_set_tags(struct ahc_softc *ahc, 334 struct ahc_devinfo *devinfo, 335 int enable); 336 static void ahc_construct_sdtr(struct ahc_softc *ahc, 337 u_int period, u_int offset); 338 339 static void ahc_construct_wdtr(struct ahc_softc *ahc, u_int bus_width); 340 341 static void ahc_calc_residual(struct scb *scb); 342 343 static void ahc_update_pending_syncrates(struct ahc_softc *ahc); 344 345 static void ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb); 346 347 static timeout_t 348 ahc_timeout; 349 static void ahc_queue_lstate_event(struct ahc_softc *ahc, 350 struct tmode_lstate *lstate, 351 u_int initiator_id, u_int event_type, 352 u_int event_arg); 353 static void ahc_send_lstate_events(struct ahc_softc *ahc, 354 struct tmode_lstate *lstate); 355 static __inline int sequencer_paused(struct ahc_softc *ahc); 356 static __inline void pause_sequencer(struct ahc_softc *ahc); 357 static __inline void unpause_sequencer(struct ahc_softc *ahc, 358 int unpause_always); 359 static __inline void restart_sequencer(struct ahc_softc *ahc); 360 static __inline u_int ahc_index_busy_tcl(struct ahc_softc *ahc, 361 u_int tcl, int unbusy); 362 363 static __inline void ahc_busy_tcl(struct ahc_softc *ahc, struct scb *scb); 364 365 static __inline void ahc_freeze_ccb(union ccb* ccb); 366 static __inline cam_status ahc_ccb_status(union ccb* ccb); 367 static __inline void ahcsetccbstatus(union ccb* ccb, 368 cam_status status); 369 static void ahc_run_tqinfifo(struct ahc_softc *ahc); 370 static void ahc_run_qoutfifo(struct ahc_softc *ahc); 371 372 static __inline struct ahc_initiator_tinfo * 373 ahc_fetch_transinfo(struct ahc_softc *ahc, 374 char channel, 375 u_int our_id, u_int target, 376 struct tmode_tstate **tstate); 377 static void ahcfreescb(struct ahc_softc *ahc, struct scb *scb); 378 static __inline struct scb *ahcgetscb(struct ahc_softc *ahc); 379 380 static __inline u_int32_t 381 ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index) 382 { 383 return (ahc->scb_data->hscb_busaddr 384 + (sizeof(struct hardware_scb) * index)); 385 } 386 387 #define AHC_BUSRESET_DELAY 25 /* Reset delay in us */ 388 389 static __inline int 390 sequencer_paused(struct ahc_softc *ahc) 391 { 392 return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0); 393 } 394 395 static __inline void 396 pause_sequencer(struct ahc_softc *ahc) 397 { 398 ahc_outb(ahc, HCNTRL, ahc->pause); 399 400 /* 401 * Since the sequencer can disable pausing in a critical section, we 402 * must loop until it actually stops. 403 */ 404 while (sequencer_paused(ahc) == 0) 405 ; 406 } 407 408 static __inline void 409 unpause_sequencer(struct ahc_softc *ahc, int unpause_always) 410 { 411 if (unpause_always 412 || (ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0) 413 ahc_outb(ahc, HCNTRL, ahc->unpause); 414 } 415 416 /* 417 * Restart the sequencer program from address zero 418 */ 419 static __inline void 420 restart_sequencer(struct ahc_softc *ahc) 421 { 422 pause_sequencer(ahc); 423 ahc_outb(ahc, SEQCTL, FASTMODE|SEQRESET); 424 unpause_sequencer(ahc, /*unpause_always*/TRUE); 425 } 426 427 static __inline u_int 428 ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl, int unbusy) 429 { 430 u_int scbid; 431 432 scbid = ahc->untagged_scbs[tcl]; 433 if (unbusy) 434 ahc->untagged_scbs[tcl] = SCB_LIST_NULL; 435 436 return (scbid); 437 } 438 439 static __inline void 440 ahc_busy_tcl(struct ahc_softc *ahc, struct scb *scb) 441 { 442 ahc->untagged_scbs[scb->hscb->tcl] = scb->hscb->tag; 443 } 444 445 static __inline void 446 ahc_freeze_ccb(union ccb* ccb) 447 { 448 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 449 ccb->ccb_h.status |= CAM_DEV_QFRZN; 450 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 451 } 452 } 453 454 static __inline cam_status 455 ahc_ccb_status(union ccb* ccb) 456 { 457 return (ccb->ccb_h.status & CAM_STATUS_MASK); 458 } 459 460 static __inline void 461 ahcsetccbstatus(union ccb* ccb, cam_status status) 462 { 463 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 464 ccb->ccb_h.status |= status; 465 } 466 467 static __inline struct ahc_initiator_tinfo * 468 ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id, 469 u_int remote_id, struct tmode_tstate **tstate) 470 { 471 /* 472 * Transfer data structures are stored from the perspective 473 * of the target role. Since the parameters for a connection 474 * in the initiator role to a given target are the same as 475 * when the roles are reversed, we pretend we are the target. 476 */ 477 if (channel == 'B') 478 our_id += 8; 479 *tstate = ahc->enabled_targets[our_id]; 480 return (&(*tstate)->transinfo[remote_id]); 481 } 482 483 static void 484 ahc_run_tqinfifo(struct ahc_softc *ahc) 485 { 486 struct target_cmd *cmd; 487 488 while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) { 489 490 /* 491 * Only advance through the queue if we 492 * had the resources to process the command. 493 */ 494 if (ahc_handle_target_cmd(ahc, cmd) != 0) 495 break; 496 497 ahc->tqinfifonext++; 498 cmd->cmd_valid = 0; 499 500 /* 501 * Lazily update our position in the target mode incomming 502 * command queue as seen by the sequencer. 503 */ 504 if ((ahc->tqinfifonext & (TQINFIFO_UPDATE_CNT-1)) == 0) { 505 pause_sequencer(ahc); 506 ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1); 507 unpause_sequencer(ahc, /*unpause_always*/FALSE); 508 } 509 } 510 } 511 512 static void 513 ahc_run_qoutfifo(struct ahc_softc *ahc) 514 { 515 struct scb *scb; 516 u_int scb_index; 517 518 while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) { 519 scb_index = ahc->qoutfifo[ahc->qoutfifonext]; 520 ahc->qoutfifo[ahc->qoutfifonext++] = SCB_LIST_NULL; 521 522 scb = &ahc->scb_data->scbarray[scb_index]; 523 if (scb_index >= ahc->scb_data->numscbs 524 || (scb->flags & SCB_ACTIVE) == 0) { 525 printf("%s: WARNING no command for scb %d " 526 "(cmdcmplt)\nQOUTPOS = %d\n", 527 ahc_name(ahc), scb_index, 528 ahc->qoutfifonext - 1); 529 continue; 530 } 531 532 /* 533 * Save off the residual 534 * if there is one. 535 */ 536 if (scb->hscb->residual_SG_count != 0) 537 ahc_calc_residual(scb); 538 else 539 scb->ccb->csio.resid = 0; 540 ahc_done(ahc, scb); 541 } 542 } 543 544 545 /* 546 * An scb (and hence an scb entry on the board) is put onto the 547 * free list. 548 */ 549 static void 550 ahcfreescb(struct ahc_softc *ahc, struct scb *scb) 551 { 552 struct hardware_scb *hscb; 553 int opri; 554 555 hscb = scb->hscb; 556 557 opri = splcam(); 558 559 if ((ahc->flags & AHC_RESOURCE_SHORTAGE) != 0 560 && (scb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { 561 scb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 562 ahc->flags &= ~AHC_RESOURCE_SHORTAGE; 563 } 564 565 /* Clean up for the next user */ 566 scb->flags = SCB_FREE; 567 hscb->control = 0; 568 hscb->status = 0; 569 570 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links); 571 splx(opri); 572 } 573 574 /* 575 * Get a free scb, either one already assigned to a hardware slot 576 * on the adapter or one that will require an SCB to be paged out before 577 * use. If there are none, see if we can allocate a new SCB. Otherwise 578 * either return an error or sleep. 579 */ 580 static __inline struct scb * 581 ahcgetscb(struct ahc_softc *ahc) 582 { 583 struct scb *scbp; 584 int opri; 585 586 opri = splcam(); 587 if ((scbp = SLIST_FIRST(&ahc->scb_data->free_scbs))) { 588 SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links); 589 } else { 590 ahcallocscbs(ahc); 591 scbp = SLIST_FIRST(&ahc->scb_data->free_scbs); 592 if (scbp != NULL) 593 SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links); 594 } 595 596 splx(opri); 597 598 return (scbp); 599 } 600 601 char * 602 ahc_name(struct ahc_softc *ahc) 603 { 604 static char name[10]; 605 606 snprintf(name, sizeof(name), "ahc%d", ahc->unit); 607 return (name); 608 } 609 610 #ifdef AHC_DEBUG 611 static void 612 ahc_print_scb(struct scb *scb) 613 { 614 struct hardware_scb *hscb = scb->hscb; 615 616 printf("scb:%p control:0x%x tcl:0x%x cmdlen:%d cmdpointer:0x%lx\n", 617 scb, 618 hscb->control, 619 hscb->tcl, 620 hscb->cmdlen, 621 hscb->cmdpointer ); 622 printf(" datlen:%d data:0x%lx segs:0x%x segp:0x%lx\n", 623 hscb->datalen, 624 hscb->data, 625 hscb->SG_count, 626 hscb->SG_pointer); 627 printf(" sg_addr:%lx sg_len:%ld\n", 628 scb->sg_list[0].addr, 629 scb->sg_list[0].len); 630 printf(" cdb:%x %x %x %x %x %x %x %x %x %x %x %x\n", 631 hscb->cmdstore[0], hscb->cmdstore[1], hscb->cmdstore[2], 632 hscb->cmdstore[3], hscb->cmdstore[4], hscb->cmdstore[5], 633 hscb->cmdstore[6], hscb->cmdstore[7], hscb->cmdstore[8], 634 hscb->cmdstore[9], hscb->cmdstore[10], hscb->cmdstore[11]); 635 } 636 #endif 637 638 static struct { 639 u_int8_t errno; 640 char *errmesg; 641 } hard_error[] = { 642 { ILLHADDR, "Illegal Host Access" }, 643 { ILLSADDR, "Illegal Sequencer Address referrenced" }, 644 { ILLOPCODE, "Illegal Opcode in sequencer program" }, 645 { SQPARERR, "Sequencer Parity Error" }, 646 { DPARERR, "Data-path Parity Error" }, 647 { MPARERR, "Scratch or SCB Memory Parity Error" }, 648 { PCIERRSTAT, "PCI Error detected" }, 649 { CIOPARERR, "CIOBUS Parity Error" }, 650 }; 651 652 653 /* 654 * Valid SCSIRATE values. (p. 3-17) 655 * Provides a mapping of tranfer periods in ns to the proper value to 656 * stick in the scsiscfr reg to use that transfer rate. 657 */ 658 #define AHC_SYNCRATE_ULTRA2 0 659 #define AHC_SYNCRATE_ULTRA 2 660 #define AHC_SYNCRATE_FAST 5 661 static struct ahc_syncrate ahc_syncrates[] = { 662 /* ultra2 fast/ultra period rate */ 663 { 0x13, 0x000, 10, "40.0" }, 664 { 0x14, 0x000, 11, "33.0" }, 665 { 0x15, 0x100, 12, "20.0" }, 666 { 0x16, 0x110, 15, "16.0" }, 667 { 0x17, 0x120, 18, "13.4" }, 668 { 0x18, 0x000, 25, "10.0" }, 669 { 0x19, 0x010, 31, "8.0" }, 670 { 0x1a, 0x020, 37, "6.67" }, 671 { 0x1b, 0x030, 43, "5.7" }, 672 { 0x10, 0x040, 50, "5.0" }, 673 { 0x00, 0x050, 56, "4.4" }, 674 { 0x00, 0x060, 62, "4.0" }, 675 { 0x00, 0x070, 68, "3.6" }, 676 { 0x00, 0x000, 0, NULL } 677 }; 678 679 /* 680 * Allocate a controller structure for a new device and initialize it. 681 */ 682 struct ahc_softc * 683 ahc_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id, 684 bus_dma_tag_t parent_dmat, ahc_chip chip, ahc_feature features, 685 ahc_flag flags, struct scb_data *scb_data) 686 { 687 /* 688 * find unit and check we have that many defined 689 */ 690 struct ahc_softc *ahc; 691 size_t alloc_size; 692 693 /* 694 * Allocate a storage area for us 695 */ 696 if (scb_data == NULL) 697 /* 698 * We are not sharing SCB space with another controller 699 * so allocate our own SCB data space. 700 */ 701 alloc_size = sizeof(struct full_ahc_softc); 702 else 703 alloc_size = sizeof(struct ahc_softc); 704 ahc = malloc(alloc_size, M_DEVBUF, M_NOWAIT); 705 if (!ahc) { 706 device_printf(dev, "cannot malloc softc!\n"); 707 return NULL; 708 } 709 bzero(ahc, alloc_size); 710 LIST_INIT(&ahc->pending_ccbs); 711 ahc->device = dev; 712 ahc->unit = device_get_unit(dev); 713 ahc->regs_res_type = regs_type; 714 ahc->regs_res_id = regs_id; 715 ahc->regs = regs; 716 ahc->tag = rman_get_bustag(regs); 717 ahc->bsh = rman_get_bushandle(regs); 718 ahc->parent_dmat = parent_dmat; 719 ahc->chip = chip; 720 ahc->features = features; 721 ahc->flags = flags; 722 if (scb_data == NULL) { 723 struct full_ahc_softc* full_softc = (struct full_ahc_softc*)ahc; 724 ahc->scb_data = &full_softc->scb_data_storage; 725 } else 726 ahc->scb_data = scb_data; 727 728 ahc->unpause = (ahc_inb(ahc, HCNTRL) & IRQMS) | INTEN; 729 /* The IRQMS bit is only valid on VL and EISA chips */ 730 if ((ahc->chip & AHC_PCI) != 0) 731 ahc->unpause &= ~IRQMS; 732 ahc->pause = ahc->unpause | PAUSE; 733 return (ahc); 734 } 735 736 void 737 ahc_free(ahc) 738 struct ahc_softc *ahc; 739 { 740 ahcfiniscbdata(ahc); 741 switch (ahc->init_level) { 742 case 3: 743 bus_dmamap_unload(ahc->shared_data_dmat, 744 ahc->shared_data_dmamap); 745 case 2: 746 bus_dmamem_free(ahc->shared_data_dmat, ahc->qoutfifo, 747 ahc->shared_data_dmamap); 748 bus_dmamap_destroy(ahc->shared_data_dmat, 749 ahc->shared_data_dmamap); 750 case 1: 751 bus_dma_tag_destroy(ahc->buffer_dmat); 752 break; 753 } 754 755 if (ahc->regs != NULL) 756 bus_release_resource(ahc->device, ahc->regs_res_type, 757 ahc->regs_res_id, ahc->regs); 758 if (ahc->irq != NULL) 759 bus_release_resource(ahc->device, ahc->irq_res_type, 760 0, ahc->irq); 761 762 free(ahc, M_DEVBUF); 763 return; 764 } 765 766 static int 767 ahcinitscbdata(struct ahc_softc *ahc) 768 { 769 struct scb_data *scb_data; 770 int i; 771 772 scb_data = ahc->scb_data; 773 SLIST_INIT(&scb_data->free_scbs); 774 SLIST_INIT(&scb_data->sg_maps); 775 776 /* Allocate SCB resources */ 777 scb_data->scbarray = 778 (struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX, 779 M_DEVBUF, M_NOWAIT); 780 if (scb_data->scbarray == NULL) 781 return (ENOMEM); 782 bzero(scb_data->scbarray, sizeof(struct scb) * AHC_SCB_MAX); 783 784 /* Determine the number of hardware SCBs and initialize them */ 785 786 scb_data->maxhscbs = ahc_probe_scbs(ahc); 787 /* SCB 0 heads the free list */ 788 ahc_outb(ahc, FREE_SCBH, 0); 789 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 790 ahc_outb(ahc, SCBPTR, i); 791 792 /* Clear the control byte. */ 793 ahc_outb(ahc, SCB_CONTROL, 0); 794 795 /* Set the next pointer */ 796 ahc_outb(ahc, SCB_NEXT, i+1); 797 798 /* Make the tag number invalid */ 799 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 800 } 801 802 /* Make sure that the last SCB terminates the free list */ 803 ahc_outb(ahc, SCBPTR, i-1); 804 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); 805 806 /* Ensure we clear the 0 SCB's control byte. */ 807 ahc_outb(ahc, SCBPTR, 0); 808 ahc_outb(ahc, SCB_CONTROL, 0); 809 810 scb_data->maxhscbs = i; 811 812 if (ahc->scb_data->maxhscbs == 0) 813 panic("%s: No SCB space found", ahc_name(ahc)); 814 815 /* 816 * Create our DMA tags. These tags define the kinds of device 817 * accessable memory allocations and memory mappings we will 818 * need to perform during normal operation. 819 * 820 * Unless we need to further restrict the allocation, we rely 821 * on the restrictions of the parent dmat, hence the common 822 * use of MAXADDR and MAXSIZE. 823 */ 824 825 /* DMA tag for our hardware scb structures */ 826 if (bus_dma_tag_create(ahc->parent_dmat, /*alignment*/1, /*boundary*/0, 827 /*lowaddr*/BUS_SPACE_MAXADDR, 828 /*highaddr*/BUS_SPACE_MAXADDR, 829 /*filter*/NULL, /*filterarg*/NULL, 830 AHC_SCB_MAX * sizeof(struct hardware_scb), 831 /*nsegments*/1, 832 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 833 /*flags*/0, &scb_data->hscb_dmat) != 0) { 834 goto error_exit; 835 } 836 837 scb_data->init_level++; 838 839 /* Allocation for our ccbs */ 840 if (bus_dmamem_alloc(scb_data->hscb_dmat, (void **)&scb_data->hscbs, 841 BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) { 842 goto error_exit; 843 } 844 845 scb_data->init_level++; 846 847 /* And permanently map them */ 848 bus_dmamap_load(scb_data->hscb_dmat, scb_data->hscb_dmamap, 849 scb_data->hscbs, 850 AHC_SCB_MAX * sizeof(struct hardware_scb), 851 ahcdmamapcb, &scb_data->hscb_busaddr, /*flags*/0); 852 853 scb_data->init_level++; 854 855 /* DMA tag for our sense buffers */ 856 if (bus_dma_tag_create(ahc->parent_dmat, /*alignment*/1, /*boundary*/0, 857 /*lowaddr*/BUS_SPACE_MAXADDR, 858 /*highaddr*/BUS_SPACE_MAXADDR, 859 /*filter*/NULL, /*filterarg*/NULL, 860 AHC_SCB_MAX * sizeof(struct scsi_sense_data), 861 /*nsegments*/1, 862 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 863 /*flags*/0, &scb_data->sense_dmat) != 0) { 864 goto error_exit; 865 } 866 867 scb_data->init_level++; 868 869 /* Allocate them */ 870 if (bus_dmamem_alloc(scb_data->sense_dmat, (void **)&scb_data->sense, 871 BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) { 872 goto error_exit; 873 } 874 875 scb_data->init_level++; 876 877 /* And permanently map them */ 878 bus_dmamap_load(scb_data->sense_dmat, scb_data->sense_dmamap, 879 scb_data->sense, 880 AHC_SCB_MAX * sizeof(struct scsi_sense_data), 881 ahcdmamapcb, &scb_data->sense_busaddr, /*flags*/0); 882 883 scb_data->init_level++; 884 885 /* DMA tag for our S/G structures. We allocate in page sized chunks */ 886 if (bus_dma_tag_create(ahc->parent_dmat, /*alignment*/1, /*boundary*/0, 887 /*lowaddr*/BUS_SPACE_MAXADDR, 888 /*highaddr*/BUS_SPACE_MAXADDR, 889 /*filter*/NULL, /*filterarg*/NULL, 890 PAGE_SIZE, /*nsegments*/1, 891 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 892 /*flags*/0, &scb_data->sg_dmat) != 0) { 893 goto error_exit; 894 } 895 896 scb_data->init_level++; 897 898 /* Perform initial CCB allocation */ 899 bzero(scb_data->hscbs, AHC_SCB_MAX * sizeof(struct hardware_scb)); 900 ahcallocscbs(ahc); 901 902 if (scb_data->numscbs == 0) { 903 printf("%s: ahc_init_scb_data - " 904 "Unable to allocate initial scbs\n", 905 ahc_name(ahc)); 906 goto error_exit; 907 } 908 909 /* 910 * Note that we were successfull 911 */ 912 return 0; 913 914 error_exit: 915 916 return ENOMEM; 917 } 918 919 static void 920 ahcfiniscbdata(struct ahc_softc *ahc) 921 { 922 struct scb_data *scb_data; 923 924 scb_data = ahc->scb_data; 925 926 switch (scb_data->init_level) { 927 default: 928 case 7: 929 { 930 struct sg_map_node *sg_map; 931 932 while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) { 933 SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); 934 bus_dmamap_unload(scb_data->sg_dmat, 935 sg_map->sg_dmamap); 936 bus_dmamem_free(scb_data->sg_dmat, sg_map->sg_vaddr, 937 sg_map->sg_dmamap); 938 free(sg_map, M_DEVBUF); 939 } 940 bus_dma_tag_destroy(scb_data->sg_dmat); 941 } 942 case 6: 943 bus_dmamap_unload(scb_data->sense_dmat, 944 scb_data->sense_dmamap); 945 case 5: 946 bus_dmamem_free(scb_data->sense_dmat, scb_data->sense, 947 scb_data->sense_dmamap); 948 bus_dmamap_destroy(scb_data->sense_dmat, 949 scb_data->sense_dmamap); 950 case 4: 951 bus_dma_tag_destroy(scb_data->sense_dmat); 952 case 3: 953 bus_dmamap_unload(scb_data->hscb_dmat, scb_data->hscb_dmamap); 954 case 2: 955 bus_dmamem_free(scb_data->hscb_dmat, scb_data->hscbs, 956 scb_data->hscb_dmamap); 957 bus_dmamap_destroy(scb_data->hscb_dmat, scb_data->hscb_dmamap); 958 case 1: 959 bus_dma_tag_destroy(scb_data->hscb_dmat); 960 break; 961 } 962 if (scb_data->scbarray != NULL) 963 free(scb_data->scbarray, M_DEVBUF); 964 } 965 966 static void 967 ahcdmamapcb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 968 { 969 bus_addr_t *baddr; 970 971 baddr = (bus_addr_t *)arg; 972 *baddr = segs->ds_addr; 973 } 974 975 int 976 ahc_reset(struct ahc_softc *ahc) 977 { 978 u_int sblkctl; 979 int wait; 980 981 #ifdef AHC_DUMP_SEQ 982 ahc_dumpseq(ahc); 983 #endif 984 ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause); 985 /* 986 * Ensure that the reset has finished 987 */ 988 wait = 1000; 989 do { 990 DELAY(1000); 991 } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK)); 992 993 if (wait == 0) { 994 printf("%s: WARNING - Failed chip reset! " 995 "Trying to initialize anyway.\n", ahc_name(ahc)); 996 } 997 ahc_outb(ahc, HCNTRL, ahc->pause); 998 999 /* Determine channel configuration */ 1000 sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE); 1001 /* No Twin Channel PCI cards */ 1002 if ((ahc->chip & AHC_PCI) != 0) 1003 sblkctl &= ~SELBUSB; 1004 switch (sblkctl) { 1005 case 0: 1006 /* Single Narrow Channel */ 1007 break; 1008 case 2: 1009 /* Wide Channel */ 1010 ahc->features |= AHC_WIDE; 1011 break; 1012 case 8: 1013 /* Twin Channel */ 1014 ahc->features |= AHC_TWIN; 1015 break; 1016 default: 1017 printf(" Unsupported adapter type. Ignoring\n"); 1018 return(-1); 1019 } 1020 return (0); 1021 } 1022 1023 /* 1024 * Called when we have an active connection to a target on the bus, 1025 * this function finds the nearest syncrate to the input period limited 1026 * by the capabilities of the bus connectivity of the target. 1027 */ 1028 static struct ahc_syncrate * 1029 ahc_devlimited_syncrate(struct ahc_softc *ahc, u_int *period) { 1030 u_int maxsync; 1031 1032 if ((ahc->features & AHC_ULTRA2) != 0) { 1033 if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0 1034 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) { 1035 maxsync = AHC_SYNCRATE_ULTRA2; 1036 } else { 1037 maxsync = AHC_SYNCRATE_ULTRA; 1038 } 1039 } else if ((ahc->features & AHC_ULTRA) != 0) { 1040 maxsync = AHC_SYNCRATE_ULTRA; 1041 } else { 1042 maxsync = AHC_SYNCRATE_FAST; 1043 } 1044 return (ahc_find_syncrate(ahc, period, maxsync)); 1045 } 1046 1047 /* 1048 * Look up the valid period to SCSIRATE conversion in our table. 1049 * Return the period and offset that should be sent to the target 1050 * if this was the beginning of an SDTR. 1051 */ 1052 static struct ahc_syncrate * 1053 ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, u_int maxsync) 1054 { 1055 struct ahc_syncrate *syncrate; 1056 1057 syncrate = &ahc_syncrates[maxsync]; 1058 while ((syncrate->rate != NULL) 1059 && ((ahc->features & AHC_ULTRA2) == 0 1060 || (syncrate->sxfr_ultra2 != 0))) { 1061 1062 if (*period <= syncrate->period) { 1063 /* 1064 * When responding to a target that requests 1065 * sync, the requested rate may fall between 1066 * two rates that we can output, but still be 1067 * a rate that we can receive. Because of this, 1068 * we want to respond to the target with 1069 * the same rate that it sent to us even 1070 * if the period we use to send data to it 1071 * is lower. Only lower the response period 1072 * if we must. 1073 */ 1074 if (syncrate == &ahc_syncrates[maxsync]) { 1075 *period = syncrate->period; 1076 } 1077 break; 1078 } 1079 syncrate++; 1080 } 1081 1082 if ((*period == 0) 1083 || (syncrate->rate == NULL) 1084 || ((ahc->features & AHC_ULTRA2) != 0 1085 && (syncrate->sxfr_ultra2 == 0))) { 1086 /* Use asynchronous transfers. */ 1087 *period = 0; 1088 syncrate = NULL; 1089 } 1090 return (syncrate); 1091 } 1092 1093 static u_int 1094 ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync) 1095 { 1096 struct ahc_syncrate *syncrate; 1097 1098 if ((ahc->features & AHC_ULTRA2) != 0) { 1099 scsirate &= SXFR_ULTRA2; 1100 } else { 1101 scsirate &= SXFR; 1102 } 1103 1104 syncrate = &ahc_syncrates[maxsync]; 1105 while (syncrate->rate != NULL) { 1106 1107 if ((ahc->features & AHC_ULTRA2) != 0) { 1108 if (syncrate->sxfr_ultra2 == 0) 1109 break; 1110 else if (scsirate == syncrate->sxfr_ultra2) 1111 return (syncrate->period); 1112 } else if (scsirate == (syncrate->sxfr & ~ULTRA_SXFR)) { 1113 return (syncrate->period); 1114 } 1115 syncrate++; 1116 } 1117 return (0); /* async */ 1118 } 1119 1120 static void 1121 ahc_validate_offset(struct ahc_softc *ahc, struct ahc_syncrate *syncrate, 1122 u_int *offset, int wide) 1123 { 1124 u_int maxoffset; 1125 1126 /* Limit offset to what we can do */ 1127 if (syncrate == NULL) { 1128 maxoffset = 0; 1129 } else if ((ahc->features & AHC_ULTRA2) != 0) { 1130 maxoffset = MAX_OFFSET_ULTRA2; 1131 } else { 1132 if (wide) 1133 maxoffset = MAX_OFFSET_16BIT; 1134 else 1135 maxoffset = MAX_OFFSET_8BIT; 1136 } 1137 *offset = MIN(*offset, maxoffset); 1138 } 1139 1140 static void 1141 ahc_update_target_msg_request(struct ahc_softc *ahc, 1142 struct ahc_devinfo *devinfo, 1143 struct ahc_initiator_tinfo *tinfo, 1144 int force, int paused) 1145 { 1146 u_int targ_msg_req_orig; 1147 1148 targ_msg_req_orig = ahc->targ_msg_req; 1149 if (tinfo->current.period != tinfo->goal.period 1150 || tinfo->current.width != tinfo->goal.width 1151 || tinfo->current.offset != tinfo->goal.offset 1152 || (force 1153 && (tinfo->goal.period != 0 1154 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT))) 1155 ahc->targ_msg_req |= devinfo->target_mask; 1156 else 1157 ahc->targ_msg_req &= ~devinfo->target_mask; 1158 1159 if (ahc->targ_msg_req != targ_msg_req_orig) { 1160 /* Update the message request bit for this target */ 1161 if ((ahc->features & AHC_HS_MAILBOX) != 0) { 1162 if (paused) { 1163 ahc_outb(ahc, TARGET_MSG_REQUEST, 1164 ahc->targ_msg_req & 0xFF); 1165 ahc_outb(ahc, TARGET_MSG_REQUEST + 1, 1166 (ahc->targ_msg_req >> 8) & 0xFF); 1167 } else { 1168 ahc_outb(ahc, HS_MAILBOX, 1169 0x01 << HOST_MAILBOX_SHIFT); 1170 } 1171 } else { 1172 if (!paused) 1173 pause_sequencer(ahc); 1174 1175 ahc_outb(ahc, TARGET_MSG_REQUEST, 1176 ahc->targ_msg_req & 0xFF); 1177 ahc_outb(ahc, TARGET_MSG_REQUEST + 1, 1178 (ahc->targ_msg_req >> 8) & 0xFF); 1179 1180 if (!paused) 1181 unpause_sequencer(ahc, /*unpause always*/FALSE); 1182 } 1183 } 1184 } 1185 1186 static int 1187 ahc_create_path(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1188 struct cam_path **path) 1189 { 1190 path_id_t path_id; 1191 1192 if (devinfo->channel == 'B') 1193 path_id = cam_sim_path(ahc->sim_b); 1194 else 1195 path_id = cam_sim_path(ahc->sim); 1196 1197 return (xpt_create_path(path, /*periph*/NULL, 1198 path_id, devinfo->target, 1199 devinfo->lun)); 1200 } 1201 1202 static void 1203 ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1204 struct cam_path *path, struct ahc_syncrate *syncrate, 1205 u_int period, u_int offset, u_int type, int paused) 1206 { 1207 struct ahc_initiator_tinfo *tinfo; 1208 struct tmode_tstate *tstate; 1209 u_int old_period; 1210 u_int old_offset; 1211 int active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 1212 1213 if (syncrate == NULL) { 1214 period = 0; 1215 offset = 0; 1216 } 1217 1218 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1219 devinfo->target, &tstate); 1220 old_period = tinfo->current.period; 1221 old_offset = tinfo->current.offset; 1222 1223 if ((type & AHC_TRANS_CUR) != 0 1224 && (old_period != period || old_offset != offset)) { 1225 struct cam_path *path2; 1226 u_int scsirate; 1227 1228 scsirate = tinfo->scsirate; 1229 if ((ahc->features & AHC_ULTRA2) != 0) { 1230 1231 scsirate &= ~SXFR_ULTRA2; 1232 1233 if (syncrate != NULL) { 1234 scsirate |= syncrate->sxfr_ultra2; 1235 } 1236 1237 if (active) 1238 ahc_outb(ahc, SCSIOFFSET, offset); 1239 } else { 1240 1241 scsirate &= ~(SXFR|SOFS); 1242 /* 1243 * Ensure Ultra mode is set properly for 1244 * this target. 1245 */ 1246 tstate->ultraenb &= ~devinfo->target_mask; 1247 if (syncrate != NULL) { 1248 if (syncrate->sxfr & ULTRA_SXFR) { 1249 tstate->ultraenb |= 1250 devinfo->target_mask; 1251 } 1252 scsirate |= syncrate->sxfr & SXFR; 1253 scsirate |= offset & SOFS; 1254 } 1255 if (active) { 1256 u_int sxfrctl0; 1257 1258 sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 1259 sxfrctl0 &= ~FAST20; 1260 if (tstate->ultraenb & devinfo->target_mask) 1261 sxfrctl0 |= FAST20; 1262 ahc_outb(ahc, SXFRCTL0, sxfrctl0); 1263 } 1264 } 1265 if (active) 1266 ahc_outb(ahc, SCSIRATE, scsirate); 1267 1268 tinfo->scsirate = scsirate; 1269 tinfo->current.period = period; 1270 tinfo->current.offset = offset; 1271 1272 /* Update the syncrates in any pending scbs */ 1273 ahc_update_pending_syncrates(ahc); 1274 1275 /* 1276 * If possible, tell the SCSI layer about the 1277 * new transfer parameters. 1278 */ 1279 /* If possible, update the XPT's notion of our transfer rate */ 1280 path2 = NULL; 1281 if (path == NULL) { 1282 int error; 1283 1284 error = ahc_create_path(ahc, devinfo, &path2); 1285 if (error == CAM_REQ_CMP) 1286 path = path2; 1287 else 1288 path2 = NULL; 1289 } 1290 1291 if (path != NULL) { 1292 struct ccb_trans_settings neg; 1293 1294 neg.sync_period = period; 1295 neg.sync_offset = offset; 1296 neg.valid = CCB_TRANS_SYNC_RATE_VALID 1297 | CCB_TRANS_SYNC_OFFSET_VALID; 1298 xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1); 1299 xpt_async(AC_TRANSFER_NEG, path, &neg); 1300 } 1301 1302 if (path2 != NULL) 1303 xpt_free_path(path2); 1304 1305 if (bootverbose) { 1306 if (offset != 0) { 1307 printf("%s: target %d synchronous at %sMHz, " 1308 "offset = 0x%x\n", ahc_name(ahc), 1309 devinfo->target, syncrate->rate, offset); 1310 } else { 1311 printf("%s: target %d using " 1312 "asynchronous transfers\n", 1313 ahc_name(ahc), devinfo->target); 1314 } 1315 } 1316 } 1317 1318 if ((type & AHC_TRANS_GOAL) != 0) { 1319 tinfo->goal.period = period; 1320 tinfo->goal.offset = offset; 1321 } 1322 1323 if ((type & AHC_TRANS_USER) != 0) { 1324 tinfo->user.period = period; 1325 tinfo->user.offset = offset; 1326 } 1327 1328 ahc_update_target_msg_request(ahc, devinfo, tinfo, 1329 /*force*/FALSE, 1330 paused); 1331 } 1332 1333 static void 1334 ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1335 struct cam_path *path, u_int width, u_int type, int paused) 1336 { 1337 struct ahc_initiator_tinfo *tinfo; 1338 struct tmode_tstate *tstate; 1339 u_int oldwidth; 1340 int active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 1341 1342 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1343 devinfo->target, &tstate); 1344 oldwidth = tinfo->current.width; 1345 1346 if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) { 1347 struct cam_path *path2; 1348 u_int scsirate; 1349 1350 scsirate = tinfo->scsirate; 1351 scsirate &= ~WIDEXFER; 1352 if (width == MSG_EXT_WDTR_BUS_16_BIT) 1353 scsirate |= WIDEXFER; 1354 1355 tinfo->scsirate = scsirate; 1356 1357 if (active) 1358 ahc_outb(ahc, SCSIRATE, scsirate); 1359 1360 tinfo->current.width = width; 1361 1362 /* If possible, update the XPT's notion of our transfer rate */ 1363 path2 = NULL; 1364 if (path == NULL) { 1365 int error; 1366 1367 error = ahc_create_path(ahc, devinfo, &path2); 1368 if (error == CAM_REQ_CMP) 1369 path = path2; 1370 else 1371 path2 = NULL; 1372 } 1373 1374 if (path != NULL) { 1375 struct ccb_trans_settings neg; 1376 1377 neg.bus_width = width; 1378 neg.valid = CCB_TRANS_BUS_WIDTH_VALID; 1379 xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1); 1380 xpt_async(AC_TRANSFER_NEG, path, &neg); 1381 } 1382 1383 if (path2 != NULL) 1384 xpt_free_path(path2); 1385 1386 if (bootverbose) { 1387 printf("%s: target %d using %dbit transfers\n", 1388 ahc_name(ahc), devinfo->target, 1389 8 * (0x01 << width)); 1390 } 1391 } 1392 if ((type & AHC_TRANS_GOAL) != 0) 1393 tinfo->goal.width = width; 1394 if ((type & AHC_TRANS_USER) != 0) 1395 tinfo->user.width = width; 1396 1397 ahc_update_target_msg_request(ahc, devinfo, tinfo, 1398 /*force*/FALSE, paused); 1399 } 1400 1401 static void 1402 ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, int enable) 1403 { 1404 struct ahc_initiator_tinfo *tinfo; 1405 struct tmode_tstate *tstate; 1406 1407 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1408 devinfo->target, &tstate); 1409 1410 if (enable) 1411 tstate->tagenable |= devinfo->target_mask; 1412 else 1413 tstate->tagenable &= ~devinfo->target_mask; 1414 } 1415 1416 /* 1417 * Attach all the sub-devices we can find 1418 */ 1419 int 1420 ahc_attach(struct ahc_softc *ahc) 1421 { 1422 struct ccb_setasync csa; 1423 struct cam_devq *devq; 1424 int bus_id; 1425 int bus_id2; 1426 struct cam_sim *sim; 1427 struct cam_sim *sim2; 1428 struct cam_path *path; 1429 struct cam_path *path2; 1430 int count; 1431 int s; 1432 int error; 1433 1434 count = 0; 1435 sim = NULL; 1436 sim2 = NULL; 1437 1438 s = splcam(); 1439 /* Hook up our interrupt handler */ 1440 if ((error = bus_setup_intr(ahc->device, ahc->irq, INTR_TYPE_CAM, 1441 ahc_intr, ahc, &ahc->ih)) != 0) { 1442 device_printf(ahc->device, "bus_setup_intr() failed: %d\n", 1443 error); 1444 goto fail; 1445 } 1446 1447 /* 1448 * Attach secondary channel first if the user has 1449 * declared it the primary channel. 1450 */ 1451 if ((ahc->flags & AHC_CHANNEL_B_PRIMARY) != 0) { 1452 bus_id = 1; 1453 bus_id2 = 0; 1454 } else { 1455 bus_id = 0; 1456 bus_id2 = 1; 1457 } 1458 1459 /* 1460 * Create the device queue for our SIM(s). 1461 */ 1462 devq = cam_simq_alloc(AHC_SCB_MAX); 1463 if (devq == NULL) 1464 goto fail; 1465 1466 /* 1467 * Construct our first channel SIM entry 1468 */ 1469 sim = cam_sim_alloc(ahc_action, ahc_poll, "ahc", ahc, ahc->unit, 1470 1, AHC_SCB_MAX, devq); 1471 if (sim == NULL) { 1472 cam_simq_free(devq); 1473 goto fail; 1474 } 1475 1476 if (xpt_bus_register(sim, bus_id) != CAM_SUCCESS) { 1477 cam_sim_free(sim, /*free_devq*/TRUE); 1478 sim = NULL; 1479 goto fail; 1480 } 1481 1482 if (xpt_create_path(&path, /*periph*/NULL, 1483 cam_sim_path(sim), CAM_TARGET_WILDCARD, 1484 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1485 xpt_bus_deregister(cam_sim_path(sim)); 1486 cam_sim_free(sim, /*free_devq*/TRUE); 1487 sim = NULL; 1488 goto fail; 1489 } 1490 1491 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); 1492 csa.ccb_h.func_code = XPT_SASYNC_CB; 1493 csa.event_enable = AC_LOST_DEVICE; 1494 csa.callback = ahc_async; 1495 csa.callback_arg = sim; 1496 xpt_action((union ccb *)&csa); 1497 count++; 1498 1499 if (ahc->features & AHC_TWIN) { 1500 sim2 = cam_sim_alloc(ahc_action, ahc_poll, "ahc", 1501 ahc, ahc->unit, 1, 1502 AHC_SCB_MAX, devq); 1503 1504 if (sim2 == NULL) { 1505 printf("ahc_attach: Unable to attach second " 1506 "bus due to resource shortage"); 1507 goto fail; 1508 } 1509 1510 if (xpt_bus_register(sim2, bus_id2) != CAM_SUCCESS) { 1511 printf("ahc_attach: Unable to attach second " 1512 "bus due to resource shortage"); 1513 /* 1514 * We do not want to destroy the device queue 1515 * because the first bus is using it. 1516 */ 1517 cam_sim_free(sim2, /*free_devq*/FALSE); 1518 goto fail; 1519 } 1520 1521 if (xpt_create_path(&path2, /*periph*/NULL, 1522 cam_sim_path(sim2), 1523 CAM_TARGET_WILDCARD, 1524 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1525 xpt_bus_deregister(cam_sim_path(sim2)); 1526 cam_sim_free(sim2, /*free_devq*/FALSE); 1527 sim2 = NULL; 1528 goto fail; 1529 } 1530 xpt_setup_ccb(&csa.ccb_h, path2, /*priority*/5); 1531 csa.ccb_h.func_code = XPT_SASYNC_CB; 1532 csa.event_enable = AC_LOST_DEVICE; 1533 csa.callback = ahc_async; 1534 csa.callback_arg = sim2; 1535 xpt_action((union ccb *)&csa); 1536 count++; 1537 } 1538 1539 fail: 1540 if ((ahc->flags & AHC_CHANNEL_B_PRIMARY) != 0) { 1541 ahc->sim_b = sim; 1542 ahc->path_b = path; 1543 ahc->sim = sim2; 1544 ahc->path = path2; 1545 } else { 1546 ahc->sim = sim; 1547 ahc->path = path; 1548 ahc->sim_b = sim2; 1549 ahc->path_b = path2; 1550 } 1551 splx(s); 1552 return (count); 1553 } 1554 1555 static void 1556 ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1557 struct scb *scb) 1558 { 1559 role_t role; 1560 int our_id; 1561 1562 if (scb->ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1563 our_id = scb->ccb->ccb_h.target_id; 1564 role = ROLE_TARGET; 1565 } else { 1566 our_id = SCB_CHANNEL(scb) == 'B' ? ahc->our_id_b : ahc->our_id; 1567 role = ROLE_INITIATOR; 1568 } 1569 ahc_compile_devinfo(devinfo, our_id, SCB_TARGET(scb), 1570 SCB_LUN(scb), SCB_CHANNEL(scb), role); 1571 } 1572 1573 static void 1574 ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 1575 { 1576 u_int saved_tcl; 1577 role_t role; 1578 int our_id; 1579 1580 if (ahc_inb(ahc, SSTAT0) & TARGET) 1581 role = ROLE_TARGET; 1582 else 1583 role = ROLE_INITIATOR; 1584 1585 if (role == ROLE_TARGET 1586 && (ahc->features & AHC_MULTI_TID) != 0 1587 && (ahc_inb(ahc, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) { 1588 /* We were selected, so pull our id from TARGIDIN */ 1589 our_id = ahc_inb(ahc, TARGIDIN) & OID; 1590 } else if ((ahc->features & AHC_ULTRA2) != 0) 1591 our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID; 1592 else 1593 our_id = ahc_inb(ahc, SCSIID) & OID; 1594 1595 saved_tcl = ahc_inb(ahc, SAVED_TCL); 1596 ahc_compile_devinfo(devinfo, our_id, TCL_TARGET(saved_tcl), 1597 TCL_LUN(saved_tcl), TCL_CHANNEL(ahc, saved_tcl), 1598 role); 1599 } 1600 1601 static void 1602 ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target, 1603 u_int lun, char channel, role_t role) 1604 { 1605 devinfo->our_scsiid = our_id; 1606 devinfo->target = target; 1607 devinfo->lun = lun; 1608 devinfo->target_offset = target; 1609 devinfo->channel = channel; 1610 devinfo->role = role; 1611 if (channel == 'B') 1612 devinfo->target_offset += 8; 1613 devinfo->target_mask = (0x01 << devinfo->target_offset); 1614 } 1615 1616 /* 1617 * Catch an interrupt from the adapter 1618 */ 1619 void 1620 ahc_intr(void *arg) 1621 { 1622 struct ahc_softc *ahc; 1623 u_int intstat; 1624 1625 ahc = (struct ahc_softc *)arg; 1626 1627 intstat = ahc_inb(ahc, INTSTAT); 1628 1629 /* 1630 * Any interrupts to process? 1631 */ 1632 #if NPCI > 0 1633 if ((intstat & INT_PEND) == 0) { 1634 if ((ahc->chip & AHC_PCI) != 0 1635 && (ahc->unsolicited_ints > 500)) { 1636 if ((ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0) 1637 ahc_pci_intr(ahc); 1638 ahc->unsolicited_ints = 0; 1639 } else { 1640 ahc->unsolicited_ints++; 1641 } 1642 return; 1643 } else { 1644 ahc->unsolicited_ints = 0; 1645 } 1646 #else 1647 if ((intstat & INT_PEND) == 0) 1648 return; 1649 #endif 1650 1651 if (intstat & CMDCMPLT) { 1652 ahc_outb(ahc, CLRINT, CLRCMDINT); 1653 ahc_run_qoutfifo(ahc); 1654 if ((ahc->flags & AHC_TARGETMODE) != 0) { 1655 ahc_run_tqinfifo(ahc); 1656 } 1657 } 1658 if (intstat & BRKADRINT) { 1659 /* 1660 * We upset the sequencer :-( 1661 * Lookup the error message 1662 */ 1663 int i, error, num_errors; 1664 1665 error = ahc_inb(ahc, ERROR); 1666 num_errors = sizeof(hard_error)/sizeof(hard_error[0]); 1667 for (i = 0; error != 1 && i < num_errors; i++) 1668 error >>= 1; 1669 panic("%s: brkadrint, %s at seqaddr = 0x%x\n", 1670 ahc_name(ahc), hard_error[i].errmesg, 1671 ahc_inb(ahc, SEQADDR0) | 1672 (ahc_inb(ahc, SEQADDR1) << 8)); 1673 1674 /* Tell everyone that this HBA is no longer availible */ 1675 ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS, 1676 CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, 1677 CAM_NO_HBA); 1678 } 1679 if (intstat & SEQINT) 1680 ahc_handle_seqint(ahc, intstat); 1681 1682 if (intstat & SCSIINT) 1683 ahc_handle_scsiint(ahc, intstat); 1684 } 1685 1686 static struct tmode_tstate * 1687 ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel) 1688 { 1689 struct tmode_tstate *master_tstate; 1690 struct tmode_tstate *tstate; 1691 int i, s; 1692 1693 master_tstate = ahc->enabled_targets[ahc->our_id]; 1694 if (channel == 'B') { 1695 scsi_id += 8; 1696 master_tstate = ahc->enabled_targets[ahc->our_id_b + 8]; 1697 } 1698 if (ahc->enabled_targets[scsi_id] != NULL 1699 && ahc->enabled_targets[scsi_id] != master_tstate) 1700 panic("%s: ahc_alloc_tstate - Target already allocated", 1701 ahc_name(ahc)); 1702 tstate = malloc(sizeof(*tstate), M_DEVBUF, M_NOWAIT); 1703 if (tstate == NULL) 1704 return (NULL); 1705 1706 /* 1707 * If we have allocated a master tstate, copy user settings from 1708 * the master tstate (taken from SRAM or the EEPROM) for this 1709 * channel, but reset our current and goal settings to async/narrow 1710 * until an initiator talks to us. 1711 */ 1712 if (master_tstate != NULL) { 1713 bcopy(master_tstate, tstate, sizeof(*tstate)); 1714 bzero(tstate->enabled_luns, sizeof(tstate->enabled_luns)); 1715 tstate->ultraenb = 0; 1716 for (i = 0; i < 16; i++) { 1717 bzero(&tstate->transinfo[i].current, 1718 sizeof(tstate->transinfo[i].current)); 1719 bzero(&tstate->transinfo[i].goal, 1720 sizeof(tstate->transinfo[i].goal)); 1721 } 1722 } else 1723 bzero(tstate, sizeof(*tstate)); 1724 s = splcam(); 1725 ahc->enabled_targets[scsi_id] = tstate; 1726 splx(s); 1727 return (tstate); 1728 } 1729 1730 static void 1731 ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force) 1732 { 1733 struct tmode_tstate *tstate; 1734 1735 /* Don't clean up the entry for our initiator role */ 1736 if ((ahc->flags & AHC_INITIATORMODE) != 0 1737 && ((channel == 'B' && scsi_id == ahc->our_id_b) 1738 || (channel == 'A' && scsi_id == ahc->our_id)) 1739 && force == FALSE) 1740 return; 1741 1742 if (channel == 'B') 1743 scsi_id += 8; 1744 tstate = ahc->enabled_targets[scsi_id]; 1745 if (tstate != NULL) 1746 free(tstate, M_DEVBUF); 1747 ahc->enabled_targets[scsi_id] = NULL; 1748 } 1749 1750 static void 1751 ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) 1752 { 1753 struct tmode_tstate *tstate; 1754 struct tmode_lstate *lstate; 1755 struct ccb_en_lun *cel; 1756 cam_status status; 1757 int target; 1758 int lun; 1759 u_int target_mask; 1760 char channel; 1761 int s; 1762 1763 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate, 1764 /* notfound_failure*/FALSE); 1765 1766 if (status != CAM_REQ_CMP) { 1767 ccb->ccb_h.status = status; 1768 return; 1769 } 1770 1771 cel = &ccb->cel; 1772 target = ccb->ccb_h.target_id; 1773 lun = ccb->ccb_h.target_lun; 1774 channel = SIM_CHANNEL(ahc, sim); 1775 target_mask = 0x01 << target; 1776 if (channel == 'B') 1777 target_mask <<= 8; 1778 1779 if (cel->enable != 0) { 1780 u_int scsiseq; 1781 1782 /* Are we already enabled?? */ 1783 if (lstate != NULL) { 1784 xpt_print_path(ccb->ccb_h.path); 1785 printf("Lun already enabled\n"); 1786 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 1787 return; 1788 } 1789 1790 if (cel->grp6_len != 0 1791 || cel->grp7_len != 0) { 1792 /* 1793 * Don't (yet?) support vendor 1794 * specific commands. 1795 */ 1796 ccb->ccb_h.status = CAM_REQ_INVALID; 1797 printf("Non-zero Group Codes\n"); 1798 return; 1799 } 1800 1801 /* 1802 * Seems to be okay. 1803 * Setup our data structures. 1804 */ 1805 if (target != CAM_TARGET_WILDCARD && tstate == NULL) { 1806 tstate = ahc_alloc_tstate(ahc, target, channel); 1807 if (tstate == NULL) { 1808 xpt_print_path(ccb->ccb_h.path); 1809 printf("Couldn't allocate tstate\n"); 1810 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1811 return; 1812 } 1813 } 1814 lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT); 1815 if (lstate == NULL) { 1816 xpt_print_path(ccb->ccb_h.path); 1817 printf("Couldn't allocate lstate\n"); 1818 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1819 return; 1820 } 1821 bzero(lstate, sizeof(*lstate)); 1822 status = xpt_create_path(&lstate->path, /*periph*/NULL, 1823 xpt_path_path_id(ccb->ccb_h.path), 1824 xpt_path_target_id(ccb->ccb_h.path), 1825 xpt_path_lun_id(ccb->ccb_h.path)); 1826 if (status != CAM_REQ_CMP) { 1827 free(lstate, M_DEVBUF); 1828 xpt_print_path(ccb->ccb_h.path); 1829 printf("Couldn't allocate path\n"); 1830 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1831 return; 1832 } 1833 SLIST_INIT(&lstate->accept_tios); 1834 SLIST_INIT(&lstate->immed_notifies); 1835 s = splcam(); 1836 pause_sequencer(ahc); 1837 if (target != CAM_TARGET_WILDCARD) { 1838 tstate->enabled_luns[lun] = lstate; 1839 ahc->enabled_luns++; 1840 1841 if ((ahc->features & AHC_MULTI_TID) != 0) { 1842 u_int16_t targid_mask; 1843 1844 targid_mask = ahc_inb(ahc, TARGID) 1845 | (ahc_inb(ahc, TARGID + 1) << 8); 1846 1847 targid_mask |= target_mask; 1848 ahc_outb(ahc, TARGID, targid_mask); 1849 ahc_outb(ahc, TARGID+1, (targid_mask >> 8)); 1850 } else { 1851 int our_id; 1852 char channel; 1853 1854 channel = SIM_CHANNEL(ahc, sim); 1855 our_id = SIM_SCSI_ID(ahc, sim); 1856 1857 /* 1858 * This can only happen if selections 1859 * are not enabled 1860 */ 1861 if (target != our_id) { 1862 u_int sblkctl; 1863 char cur_channel; 1864 int swap; 1865 1866 sblkctl = ahc_inb(ahc, SBLKCTL); 1867 cur_channel = (sblkctl & SELBUSB) 1868 ? 'B' : 'A'; 1869 if ((ahc->features & AHC_TWIN) == 0) 1870 cur_channel = 'A'; 1871 swap = cur_channel != channel; 1872 if (channel == 'A') 1873 ahc->our_id = target; 1874 else 1875 ahc->our_id_b = target; 1876 1877 if (swap) 1878 ahc_outb(ahc, SBLKCTL, 1879 sblkctl ^ SELBUSB); 1880 1881 ahc_outb(ahc, SCSIID, target); 1882 1883 if (swap) 1884 ahc_outb(ahc, SBLKCTL, sblkctl); 1885 } 1886 } 1887 } else 1888 ahc->black_hole = lstate; 1889 /* Allow select-in operations */ 1890 if (ahc->black_hole != NULL && ahc->enabled_luns > 0) { 1891 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 1892 scsiseq |= ENSELI; 1893 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 1894 scsiseq = ahc_inb(ahc, SCSISEQ); 1895 scsiseq |= ENSELI; 1896 ahc_outb(ahc, SCSISEQ, scsiseq); 1897 } 1898 unpause_sequencer(ahc, /*always?*/FALSE); 1899 splx(s); 1900 ccb->ccb_h.status = CAM_REQ_CMP; 1901 xpt_print_path(ccb->ccb_h.path); 1902 printf("Lun now enabled for target mode\n"); 1903 } else { 1904 struct ccb_hdr *elm; 1905 1906 if (lstate == NULL) { 1907 ccb->ccb_h.status = CAM_LUN_INVALID; 1908 return; 1909 } 1910 1911 s = splcam(); 1912 ccb->ccb_h.status = CAM_REQ_CMP; 1913 LIST_FOREACH(elm, &ahc->pending_ccbs, sim_links.le) { 1914 if (elm->func_code == XPT_CONT_TARGET_IO 1915 && !xpt_path_comp(elm->path, ccb->ccb_h.path)){ 1916 printf("CTIO pending\n"); 1917 ccb->ccb_h.status = CAM_REQ_INVALID; 1918 splx(s); 1919 return; 1920 } 1921 } 1922 1923 if (SLIST_FIRST(&lstate->accept_tios) != NULL) { 1924 printf("ATIOs pending\n"); 1925 ccb->ccb_h.status = CAM_REQ_INVALID; 1926 } 1927 1928 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { 1929 printf("INOTs pending\n"); 1930 ccb->ccb_h.status = CAM_REQ_INVALID; 1931 } 1932 1933 if (ccb->ccb_h.status == CAM_REQ_CMP) { 1934 int i, empty; 1935 1936 xpt_print_path(ccb->ccb_h.path); 1937 printf("Target mode disabled\n"); 1938 xpt_free_path(lstate->path); 1939 free(lstate, M_DEVBUF); 1940 1941 pause_sequencer(ahc); 1942 /* Can we clean up the target too? */ 1943 if (target != CAM_TARGET_WILDCARD) { 1944 tstate->enabled_luns[lun] = NULL; 1945 ahc->enabled_luns--; 1946 for (empty = 1, i = 0; i < 8; i++) 1947 if (tstate->enabled_luns[i] != NULL) { 1948 empty = 0; 1949 break; 1950 } 1951 1952 if (empty) { 1953 ahc_free_tstate(ahc, target, channel, 1954 /*force*/FALSE); 1955 if (ahc->features & AHC_MULTI_TID) { 1956 u_int16_t targid_mask; 1957 1958 targid_mask = 1959 ahc_inb(ahc, TARGID) 1960 | (ahc_inb(ahc, TARGID + 1) 1961 << 8); 1962 1963 targid_mask &= ~target_mask; 1964 ahc_outb(ahc, TARGID, 1965 targid_mask); 1966 ahc_outb(ahc, TARGID+1, 1967 (targid_mask >> 8)); 1968 } 1969 } 1970 } else { 1971 1972 ahc->black_hole = NULL; 1973 1974 /* 1975 * We can't allow selections without 1976 * our black hole device. 1977 */ 1978 empty = TRUE; 1979 } 1980 if (ahc->enabled_luns == 0) { 1981 /* Disallow select-in */ 1982 u_int scsiseq; 1983 1984 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 1985 scsiseq &= ~ENSELI; 1986 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 1987 scsiseq = ahc_inb(ahc, SCSISEQ); 1988 scsiseq &= ~ENSELI; 1989 ahc_outb(ahc, SCSISEQ, scsiseq); 1990 } 1991 unpause_sequencer(ahc, /*always?*/FALSE); 1992 } 1993 splx(s); 1994 } 1995 } 1996 1997 static int 1998 ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd) 1999 { 2000 struct tmode_tstate *tstate; 2001 struct tmode_lstate *lstate; 2002 struct ccb_accept_tio *atio; 2003 u_int8_t *byte; 2004 int initiator; 2005 int target; 2006 int lun; 2007 2008 initiator = cmd->initiator_channel >> 4; 2009 target = cmd->targ_id; 2010 lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); 2011 2012 byte = cmd->bytes; 2013 tstate = ahc->enabled_targets[target]; 2014 lstate = NULL; 2015 if (tstate != NULL && lun < 8) 2016 lstate = tstate->enabled_luns[lun]; 2017 2018 /* 2019 * Commands for disabled luns go to the black hole driver. 2020 */ 2021 if (lstate == NULL) { 2022 lstate = ahc->black_hole; 2023 atio = 2024 (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); 2025 } else { 2026 atio = 2027 (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); 2028 } 2029 if (atio == NULL) { 2030 ahc->flags |= AHC_TQINFIFO_BLOCKED; 2031 printf("No ATIOs for incoming command\n"); 2032 /* 2033 * Wait for more ATIOs from the peripheral driver for this lun. 2034 */ 2035 return (1); 2036 } else 2037 ahc->flags &= ~AHC_TQINFIFO_BLOCKED; 2038 SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); 2039 2040 if (lstate == ahc->black_hole) { 2041 /* Fill in the wildcards */ 2042 atio->ccb_h.target_id = target; 2043 atio->ccb_h.target_lun = lun; 2044 } 2045 2046 /* 2047 * Package it up and send it off to 2048 * whomever has this lun enabled. 2049 */ 2050 atio->init_id = initiator; 2051 if (byte[0] != 0xFF) { 2052 /* Tag was included */ 2053 atio->tag_action = *byte++; 2054 atio->tag_id = *byte++; 2055 atio->ccb_h.flags = CAM_TAG_ACTION_VALID; 2056 } else { 2057 byte++; 2058 atio->ccb_h.flags = 0; 2059 } 2060 2061 /* Okay. Now determine the cdb size based on the command code */ 2062 switch (*byte >> CMD_GROUP_CODE_SHIFT) { 2063 case 0: 2064 atio->cdb_len = 6; 2065 break; 2066 case 1: 2067 case 2: 2068 atio->cdb_len = 10; 2069 break; 2070 case 4: 2071 atio->cdb_len = 16; 2072 break; 2073 case 5: 2074 atio->cdb_len = 12; 2075 break; 2076 case 3: 2077 default: 2078 /* Only copy the opcode. */ 2079 atio->cdb_len = 1; 2080 printf("Reserved or VU command code type encountered\n"); 2081 break; 2082 } 2083 bcopy(byte, atio->cdb_io.cdb_bytes, atio->cdb_len); 2084 2085 atio->ccb_h.status |= CAM_CDB_RECVD; 2086 2087 if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { 2088 /* 2089 * We weren't allowed to disconnect. 2090 * We're hanging on the bus until a 2091 * continue target I/O comes in response 2092 * to this accept tio. 2093 */ 2094 #if 0 2095 printf("Received Immediate Command %d:%d:%d - %p\n", 2096 initiator, target, lun, ahc->pending_device); 2097 #endif 2098 ahc->pending_device = lstate; 2099 } 2100 xpt_done((union ccb*)atio); 2101 return (0); 2102 } 2103 2104 static void 2105 ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat) 2106 { 2107 struct scb *scb; 2108 struct ahc_devinfo devinfo; 2109 2110 ahc_fetch_devinfo(ahc, &devinfo); 2111 2112 /* 2113 * Clear the upper byte that holds SEQINT status 2114 * codes and clear the SEQINT bit. We will unpause 2115 * the sequencer, if appropriate, after servicing 2116 * the request. 2117 */ 2118 ahc_outb(ahc, CLRINT, CLRSEQINT); 2119 switch (intstat & SEQINT_MASK) { 2120 case NO_MATCH: 2121 { 2122 /* Ensure we don't leave the selection hardware on */ 2123 ahc_outb(ahc, SCSISEQ, 2124 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 2125 2126 printf("%s:%c:%d: no active SCB for reconnecting " 2127 "target - issuing BUS DEVICE RESET\n", 2128 ahc_name(ahc), devinfo.channel, devinfo.target); 2129 printf("SAVED_TCL == 0x%x, ARG_1 == 0x%x, SEQ_FLAGS == 0x%x\n", 2130 ahc_inb(ahc, SAVED_TCL), ahc_inb(ahc, ARG_1), 2131 ahc_inb(ahc, SEQ_FLAGS)); 2132 ahc->msgout_buf[0] = MSG_BUS_DEV_RESET; 2133 ahc->msgout_len = 1; 2134 ahc->msgout_index = 0; 2135 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2136 ahc_outb(ahc, MSG_OUT, HOST_MSG); 2137 ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, LASTPHASE) | ATNO); 2138 break; 2139 } 2140 case UPDATE_TMSG_REQ: 2141 ahc_outb(ahc, TARGET_MSG_REQUEST, ahc->targ_msg_req & 0xFF); 2142 ahc_outb(ahc, TARGET_MSG_REQUEST + 1, 2143 (ahc->targ_msg_req >> 8) & 0xFF); 2144 ahc_outb(ahc, HS_MAILBOX, 0); 2145 break; 2146 case SEND_REJECT: 2147 { 2148 u_int rejbyte = ahc_inb(ahc, ACCUM); 2149 printf("%s:%c:%d: Warning - unknown message received from " 2150 "target (0x%x). Rejecting\n", 2151 ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte); 2152 break; 2153 } 2154 case NO_IDENT: 2155 { 2156 /* 2157 * The reconnecting target either did not send an identify 2158 * message, or did, but we didn't find and SCB to match and 2159 * before it could respond to our ATN/abort, it hit a dataphase. 2160 * The only safe thing to do is to blow it away with a bus 2161 * reset. 2162 */ 2163 int found; 2164 2165 printf("%s:%c:%d: Target did not send an IDENTIFY message. " 2166 "LASTPHASE = 0x%x, SAVED_TCL == 0x%x\n", 2167 ahc_name(ahc), devinfo.channel, devinfo.target, 2168 ahc_inb(ahc, LASTPHASE), ahc_inb(ahc, SAVED_TCL)); 2169 found = ahc_reset_channel(ahc, devinfo.channel, 2170 /*initiate reset*/TRUE); 2171 printf("%s: Issued Channel %c Bus Reset. " 2172 "%d SCBs aborted\n", ahc_name(ahc), devinfo.channel, 2173 found); 2174 return; 2175 } 2176 case BAD_PHASE: 2177 if (ahc_inb(ahc, LASTPHASE) == P_BUSFREE) { 2178 printf("%s:%c:%d: Missed busfree.\n", ahc_name(ahc), 2179 devinfo.channel, devinfo.target); 2180 restart_sequencer(ahc); 2181 return; 2182 } else { 2183 printf("%s:%c:%d: unknown scsi bus phase. Attempting " 2184 "to continue\n", ahc_name(ahc), devinfo.channel, 2185 devinfo.target); 2186 } 2187 break; 2188 case BAD_STATUS: 2189 { 2190 u_int scb_index; 2191 struct hardware_scb *hscb; 2192 struct ccb_scsiio *csio; 2193 /* 2194 * The sequencer will notify us when a command 2195 * has an error that would be of interest to 2196 * the kernel. This allows us to leave the sequencer 2197 * running in the common case of command completes 2198 * without error. The sequencer will already have 2199 * dma'd the SCB back up to us, so we can reference 2200 * the in kernel copy directly. 2201 */ 2202 scb_index = ahc_inb(ahc, SCB_TAG); 2203 scb = &ahc->scb_data->scbarray[scb_index]; 2204 2205 /* 2206 * Set the default return value to 0 (don't 2207 * send sense). The sense code will change 2208 * this if needed. 2209 */ 2210 ahc_outb(ahc, RETURN_1, 0); 2211 if (!(scb_index < ahc->scb_data->numscbs 2212 && (scb->flags & SCB_ACTIVE) != 0)) { 2213 printf("%s:%c:%d: ahc_intr - referenced scb " 2214 "not valid during seqint 0x%x scb(%d)\n", 2215 ahc_name(ahc), devinfo.channel, 2216 devinfo.target, intstat, scb_index); 2217 goto unpause; 2218 } 2219 2220 hscb = scb->hscb; 2221 2222 /* Don't want to clobber the original sense code */ 2223 if ((scb->flags & SCB_SENSE) != 0) { 2224 /* 2225 * Clear the SCB_SENSE Flag and have 2226 * the sequencer do a normal command 2227 * complete. 2228 */ 2229 scb->flags &= ~SCB_SENSE; 2230 ahcsetccbstatus(scb->ccb, CAM_AUTOSENSE_FAIL); 2231 break; 2232 } 2233 ahcsetccbstatus(scb->ccb, CAM_SCSI_STATUS_ERROR); 2234 /* Freeze the queue unit the client sees the error. */ 2235 ahc_freeze_devq(ahc, scb->ccb->ccb_h.path); 2236 ahc_freeze_ccb(scb->ccb); 2237 csio = &scb->ccb->csio; 2238 csio->scsi_status = hscb->status; 2239 switch (hscb->status) { 2240 case SCSI_STATUS_OK: 2241 printf("%s: Interrupted for staus of 0???\n", 2242 ahc_name(ahc)); 2243 break; 2244 case SCSI_STATUS_CMD_TERMINATED: 2245 case SCSI_STATUS_CHECK_COND: 2246 #ifdef AHC_DEBUG 2247 if (ahc_debug & AHC_SHOWSENSE) { 2248 xpt_print_path(csio->ccb_h.path); 2249 printf("SCB %d: requests Check Status\n", 2250 scb->hscb->tag); 2251 } 2252 #endif 2253 2254 if ((csio->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) { 2255 struct ahc_dma_seg *sg; 2256 struct scsi_sense *sc; 2257 struct ahc_initiator_tinfo *tinfo; 2258 struct tmode_tstate *tstate; 2259 2260 sg = scb->sg_list; 2261 sc = (struct scsi_sense *)(&hscb->cmdstore); 2262 /* 2263 * Save off the residual if there is one. 2264 */ 2265 if (hscb->residual_SG_count != 0) 2266 ahc_calc_residual(scb); 2267 else 2268 scb->ccb->csio.resid = 0; 2269 2270 #ifdef AHC_DEBUG 2271 if (ahc_debug & AHC_SHOWSENSE) { 2272 xpt_print_path(csio->ccb_h.path); 2273 printf("Sending Sense\n"); 2274 } 2275 #endif 2276 sg->addr = ahc->scb_data->sense_busaddr 2277 + (hscb->tag*sizeof(struct scsi_sense_data)); 2278 sg->len = MIN(sizeof(struct scsi_sense_data), 2279 csio->sense_len); 2280 2281 sc->opcode = REQUEST_SENSE; 2282 sc->byte2 = SCB_LUN(scb) << 5; 2283 sc->unused[0] = 0; 2284 sc->unused[1] = 0; 2285 sc->length = sg->len; 2286 sc->control = 0; 2287 2288 /* 2289 * Would be nice to preserve DISCENB here, 2290 * but due to the way we page SCBs, we can't. 2291 */ 2292 hscb->control = 0; 2293 2294 /* 2295 * This request sense could be because the 2296 * the device lost power or in some other 2297 * way has lost our transfer negotiations. 2298 * Renegotiate if appropriate. 2299 */ 2300 tinfo = ahc_fetch_transinfo(ahc, 2301 devinfo.channel, 2302 devinfo.our_scsiid, 2303 devinfo.target, 2304 &tstate); 2305 ahc_update_target_msg_request(ahc, &devinfo, 2306 tinfo, 2307 /*force*/TRUE, 2308 /*paused*/TRUE); 2309 hscb->status = 0; 2310 hscb->SG_count = 1; 2311 hscb->SG_pointer = scb->sg_list_phys; 2312 hscb->data = sg->addr; 2313 hscb->datalen = sg->len; 2314 hscb->cmdpointer = hscb->cmdstore_busaddr; 2315 hscb->cmdlen = sizeof(*sc); 2316 scb->sg_count = hscb->SG_count; 2317 scb->flags |= SCB_SENSE; 2318 /* 2319 * Ensure the target is busy since this 2320 * will be an untagged request. 2321 */ 2322 ahc_busy_tcl(ahc, scb); 2323 ahc_outb(ahc, RETURN_1, SEND_SENSE); 2324 2325 /* 2326 * Ensure we have enough time to actually 2327 * retrieve the sense. 2328 */ 2329 untimeout(ahc_timeout, (caddr_t)scb, 2330 scb->ccb->ccb_h.timeout_ch); 2331 scb->ccb->ccb_h.timeout_ch = 2332 timeout(ahc_timeout, (caddr_t)scb, 5 * hz); 2333 } 2334 break; 2335 case SCSI_STATUS_BUSY: 2336 case SCSI_STATUS_QUEUE_FULL: 2337 /* 2338 * Requeue any transactions that haven't been 2339 * sent yet. 2340 */ 2341 ahc_freeze_devq(ahc, scb->ccb->ccb_h.path); 2342 ahc_freeze_ccb(scb->ccb); 2343 break; 2344 } 2345 break; 2346 } 2347 case TRACE_POINT: 2348 { 2349 printf("SSTAT2 = 0x%x DFCNTRL = 0x%x\n", ahc_inb(ahc, SSTAT2), 2350 ahc_inb(ahc, DFCNTRL)); 2351 printf("SSTAT3 = 0x%x DSTATUS = 0x%x\n", ahc_inb(ahc, SSTAT3), 2352 ahc_inb(ahc, DFSTATUS)); 2353 printf("SSTAT0 = 0x%x, SCB_DATACNT = 0x%x\n", 2354 ahc_inb(ahc, SSTAT0), 2355 ahc_inb(ahc, SCB_DATACNT)); 2356 break; 2357 } 2358 case HOST_MSG_LOOP: 2359 { 2360 /* 2361 * The sequencer has encountered a message phase 2362 * that requires host assistance for completion. 2363 * While handling the message phase(s), we will be 2364 * notified by the sequencer after each byte is 2365 * transfered so we can track bus phases. 2366 * 2367 * If this is the first time we've seen a HOST_MSG_LOOP, 2368 * initialize the state of the host message loop. 2369 */ 2370 if (ahc->msg_type == MSG_TYPE_NONE) { 2371 u_int bus_phase; 2372 2373 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 2374 if (bus_phase != P_MESGIN 2375 && bus_phase != P_MESGOUT) { 2376 printf("ahc_intr: HOST_MSG_LOOP bad " 2377 "phase 0x%x\n", 2378 bus_phase); 2379 /* 2380 * Probably transitioned to bus free before 2381 * we got here. Just punt the message. 2382 */ 2383 ahc_clear_intstat(ahc); 2384 restart_sequencer(ahc); 2385 } 2386 2387 if (devinfo.role == ROLE_INITIATOR) { 2388 struct scb *scb; 2389 u_int scb_index; 2390 2391 scb_index = ahc_inb(ahc, SCB_TAG); 2392 scb = &ahc->scb_data->scbarray[scb_index]; 2393 2394 if (bus_phase == P_MESGOUT) 2395 ahc_setup_initiator_msgout(ahc, 2396 &devinfo, 2397 scb); 2398 else { 2399 ahc->msg_type = 2400 MSG_TYPE_INITIATOR_MSGIN; 2401 ahc->msgin_index = 0; 2402 } 2403 } else { 2404 if (bus_phase == P_MESGOUT) { 2405 ahc->msg_type = 2406 MSG_TYPE_TARGET_MSGOUT; 2407 ahc->msgin_index = 0; 2408 } else 2409 /* XXX Ever executed??? */ 2410 ahc_setup_target_msgin(ahc, &devinfo); 2411 } 2412 } 2413 2414 /* Pass a NULL path so that handlers generate their own */ 2415 ahc_handle_message_phase(ahc, /*path*/NULL); 2416 break; 2417 } 2418 case DATA_OVERRUN: 2419 { 2420 /* 2421 * When the sequencer detects an overrun, it 2422 * places the controller in "BITBUCKET" mode 2423 * and allows the target to complete its transfer. 2424 * Unfortunately, none of the counters get updated 2425 * when the controller is in this mode, so we have 2426 * no way of knowing how large the overrun was. 2427 */ 2428 u_int scbindex = ahc_inb(ahc, SCB_TAG); 2429 u_int lastphase = ahc_inb(ahc, LASTPHASE); 2430 int i; 2431 2432 scb = &ahc->scb_data->scbarray[scbindex]; 2433 xpt_print_path(scb->ccb->ccb_h.path); 2434 printf("data overrun detected in %s phase." 2435 " Tag == 0x%x.\n", 2436 lastphase == P_DATAIN ? "Data-In" : "Data-Out", 2437 scb->hscb->tag); 2438 xpt_print_path(scb->ccb->ccb_h.path); 2439 printf("%s seen Data Phase. Length = %d. NumSGs = %d.\n", 2440 ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", 2441 scb->ccb->csio.dxfer_len, scb->sg_count); 2442 if (scb->sg_count > 0) { 2443 for (i = 0; i < scb->sg_count - 1; i++) { 2444 printf("sg[%d] - Addr 0x%x : Length %d\n", 2445 i, 2446 scb->sg_list[i].addr, 2447 scb->sg_list[i].len); 2448 } 2449 } 2450 /* 2451 * Set this and it will take affect when the 2452 * target does a command complete. 2453 */ 2454 ahc_freeze_devq(ahc, scb->ccb->ccb_h.path); 2455 ahcsetccbstatus(scb->ccb, CAM_DATA_RUN_ERR); 2456 ahc_freeze_ccb(scb->ccb); 2457 break; 2458 } 2459 case TRACEPOINT: 2460 { 2461 printf("TRACEPOINT: RETURN_2 = %d\n", ahc_inb(ahc, RETURN_2)); 2462 #if 0 2463 printf("SSTAT1 == 0x%x\n", ahc_inb(ahc, SSTAT1)); 2464 printf("SSTAT0 == 0x%x\n", ahc_inb(ahc, SSTAT0)); 2465 printf(", SCSISIGI == 0x%x\n", ahc_inb(ahc, SCSISIGI)); 2466 printf("TRACEPOINT: CCHCNT = %d, SG_COUNT = %d\n", 2467 ahc_inb(ahc, CCHCNT), ahc_inb(ahc, SG_COUNT)); 2468 printf("TRACEPOINT: SCB_TAG = %d\n", ahc_inb(ahc, SCB_TAG)); 2469 printf("TRACEPOINT1: CCHADDR = %d, CCHCNT = %d, SCBPTR = %d\n", 2470 ahc_inb(ahc, CCHADDR) 2471 | (ahc_inb(ahc, CCHADDR+1) << 8) 2472 | (ahc_inb(ahc, CCHADDR+2) << 16) 2473 | (ahc_inb(ahc, CCHADDR+3) << 24), 2474 ahc_inb(ahc, CCHCNT) 2475 | (ahc_inb(ahc, CCHCNT+1) << 8) 2476 | (ahc_inb(ahc, CCHCNT+2) << 16), 2477 ahc_inb(ahc, SCBPTR)); 2478 printf("TRACEPOINT: WAITING_SCBH = %d\n", ahc_inb(ahc, WAITING_SCBH)); 2479 printf("TRACEPOINT: SCB_TAG = %d\n", ahc_inb(ahc, SCB_TAG)); 2480 #endif 2481 break; 2482 } 2483 #if NOT_YET 2484 /* XXX Fill these in later */ 2485 case MESG_BUFFER_BUSY: 2486 break; 2487 case MSGIN_PHASEMIS: 2488 break; 2489 #endif 2490 default: 2491 printf("ahc_intr: seqint, " 2492 "intstat == 0x%x, scsisigi = 0x%x\n", 2493 intstat, ahc_inb(ahc, SCSISIGI)); 2494 break; 2495 } 2496 2497 unpause: 2498 /* 2499 * The sequencer is paused immediately on 2500 * a SEQINT, so we should restart it when 2501 * we're done. 2502 */ 2503 unpause_sequencer(ahc, /*unpause_always*/TRUE); 2504 } 2505 2506 static void 2507 ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat) 2508 { 2509 u_int scb_index; 2510 u_int status; 2511 struct scb *scb; 2512 char cur_channel; 2513 char intr_channel; 2514 2515 if ((ahc->features & AHC_TWIN) != 0 2516 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0)) 2517 cur_channel = 'B'; 2518 else 2519 cur_channel = 'A'; 2520 intr_channel = cur_channel; 2521 2522 status = ahc_inb(ahc, SSTAT1); 2523 if (status == 0) { 2524 if ((ahc->features & AHC_TWIN) != 0) { 2525 /* Try the other channel */ 2526 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 2527 status = ahc_inb(ahc, SSTAT1); 2528 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 2529 intr_channel = (cur_channel == 'A') ? 'B' : 'A'; 2530 } 2531 if (status == 0) { 2532 printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc)); 2533 return; 2534 } 2535 } 2536 2537 scb_index = ahc_inb(ahc, SCB_TAG); 2538 if (scb_index < ahc->scb_data->numscbs) { 2539 scb = &ahc->scb_data->scbarray[scb_index]; 2540 if ((scb->flags & SCB_ACTIVE) == 0) 2541 scb = NULL; 2542 } else 2543 scb = NULL; 2544 2545 if ((status & SCSIRSTI) != 0) { 2546 printf("%s: Someone reset channel %c\n", 2547 ahc_name(ahc), intr_channel); 2548 ahc_reset_channel(ahc, intr_channel, /* Initiate Reset */FALSE); 2549 } else if ((status & BUSFREE) != 0 && (status & SELTO) == 0) { 2550 /* 2551 * First look at what phase we were last in. 2552 * If its message out, chances are pretty good 2553 * that the busfree was in response to one of 2554 * our abort requests. 2555 */ 2556 u_int lastphase = ahc_inb(ahc, LASTPHASE); 2557 u_int saved_tcl = ahc_inb(ahc, SAVED_TCL); 2558 u_int target = TCL_TARGET(saved_tcl); 2559 u_int initiator_role_id = TCL_SCSI_ID(ahc, saved_tcl); 2560 char channel = TCL_CHANNEL(ahc, saved_tcl); 2561 int printerror = 1; 2562 2563 ahc_outb(ahc, SCSISEQ, 2564 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 2565 if (lastphase == P_MESGOUT) { 2566 u_int message; 2567 u_int tag; 2568 2569 message = ahc->msgout_buf[ahc->msgout_index - 1]; 2570 tag = SCB_LIST_NULL; 2571 switch (message) { 2572 case MSG_ABORT_TAG: 2573 tag = scb->hscb->tag; 2574 /* FALLTRHOUGH */ 2575 case MSG_ABORT: 2576 xpt_print_path(scb->ccb->ccb_h.path); 2577 printf("SCB %d - Abort %s Completed.\n", 2578 scb->hscb->tag, tag == SCB_LIST_NULL ? 2579 "" : "Tag"); 2580 ahc_abort_scbs(ahc, target, channel, 2581 TCL_LUN(saved_tcl), tag, 2582 ROLE_INITIATOR, 2583 CAM_REQ_ABORTED); 2584 printerror = 0; 2585 break; 2586 case MSG_BUS_DEV_RESET: 2587 { 2588 struct ahc_devinfo devinfo; 2589 2590 /* 2591 * Don't mark the user's request for this BDR 2592 * as completing with CAM_BDR_SENT. CAM3 2593 * specifies CAM_REQ_CMP. 2594 */ 2595 if (scb != NULL 2596 && scb->ccb->ccb_h.func_code == XPT_RESET_DEV 2597 && ahc_match_scb(scb, target, channel, 2598 TCL_LUN(saved_tcl), 2599 ROLE_INITIATOR, 2600 SCB_LIST_NULL)) { 2601 ahcsetccbstatus(scb->ccb, CAM_REQ_CMP); 2602 } 2603 ahc_compile_devinfo(&devinfo, 2604 initiator_role_id, 2605 target, 2606 TCL_LUN(saved_tcl), 2607 channel, 2608 ROLE_INITIATOR); 2609 ahc_handle_devreset(ahc, &devinfo, 2610 CAM_BDR_SENT, AC_SENT_BDR, 2611 "Bus Device Reset", 2612 /*verbose_level*/0); 2613 printerror = 0; 2614 break; 2615 } 2616 default: 2617 break; 2618 } 2619 } 2620 if (printerror != 0) { 2621 if (scb != NULL) { 2622 u_int tag; 2623 2624 if ((scb->hscb->control & TAG_ENB) != 0) 2625 tag = scb->hscb->tag; 2626 else 2627 tag = SCB_LIST_NULL; 2628 ahc_abort_scbs(ahc, target, channel, 2629 SCB_LUN(scb), tag, 2630 ROLE_INITIATOR, 2631 CAM_UNEXP_BUSFREE); 2632 } else { 2633 ahc_abort_scbs(ahc, target, channel, 2634 CAM_LUN_WILDCARD, SCB_LIST_NULL, 2635 ROLE_INITIATOR, 2636 CAM_UNEXP_BUSFREE); 2637 printf("%s: ", ahc_name(ahc)); 2638 } 2639 printf("Unexpected busfree. LASTPHASE == 0x%x\n" 2640 "SEQADDR == 0x%x\n", 2641 lastphase, ahc_inb(ahc, SEQADDR0) 2642 | (ahc_inb(ahc, SEQADDR1) << 8)); 2643 } 2644 ahc_clear_msg_state(ahc); 2645 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 2646 ahc_outb(ahc, CLRSINT1, CLRBUSFREE); 2647 ahc_outb(ahc, CLRINT, CLRSCSIINT); 2648 restart_sequencer(ahc); 2649 } else if ((status & SELTO) != 0) { 2650 u_int scbptr; 2651 2652 scbptr = ahc_inb(ahc, WAITING_SCBH); 2653 ahc_outb(ahc, SCBPTR, scbptr); 2654 scb_index = ahc_inb(ahc, SCB_TAG); 2655 2656 if (scb_index < ahc->scb_data->numscbs) { 2657 scb = &ahc->scb_data->scbarray[scb_index]; 2658 if ((scb->flags & SCB_ACTIVE) == 0) 2659 scb = NULL; 2660 } else 2661 scb = NULL; 2662 2663 if (scb == NULL) { 2664 printf("%s: ahc_intr - referenced scb not " 2665 "valid during SELTO scb(%d, %d)\n", 2666 ahc_name(ahc), scbptr, scb_index); 2667 } else { 2668 struct ahc_devinfo devinfo; 2669 2670 ahc_scb_devinfo(ahc, &devinfo, scb); 2671 ahc_handle_devreset(ahc, &devinfo, CAM_SEL_TIMEOUT, 2672 /*ac_code*/0, "Selection Timeout", 2673 /*verbose_level*/2); 2674 } 2675 /* Stop the selection */ 2676 ahc_outb(ahc, SCSISEQ, 0); 2677 2678 /* No more pending messages */ 2679 ahc_clear_msg_state(ahc); 2680 2681 /* 2682 * Although the driver does not care about the 2683 * 'Selection in Progress' status bit, the busy 2684 * LED does. SELINGO is only cleared by a sucessful 2685 * selection, so we must manually clear it to ensure 2686 * the LED turns off just incase no future successful 2687 * selections occur (e.g. no devices on the bus). 2688 */ 2689 ahc_outb(ahc, CLRSINT0, CLRSELINGO); 2690 2691 /* Clear interrupt state */ 2692 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE); 2693 ahc_outb(ahc, CLRINT, CLRSCSIINT); 2694 restart_sequencer(ahc); 2695 } else if (scb == NULL) { 2696 printf("%s: ahc_intr - referenced scb not " 2697 "valid during scsiint 0x%x scb(%d)\n" 2698 "SIMODE0 = 0x%x, SIMODE1 = 0x%x, SSTAT0 = 0x%x\n" 2699 "SEQADDR = 0x%x\n", ahc_name(ahc), 2700 status, scb_index, ahc_inb(ahc, SIMODE0), 2701 ahc_inb(ahc, SIMODE1), ahc_inb(ahc, SSTAT0), 2702 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); 2703 ahc_outb(ahc, CLRSINT1, status); 2704 ahc_outb(ahc, CLRINT, CLRSCSIINT); 2705 unpause_sequencer(ahc, /*unpause_always*/TRUE); 2706 scb = NULL; 2707 } else if ((status & SCSIPERR) != 0) { 2708 /* 2709 * Determine the bus phase and 2710 * queue an appropriate message 2711 */ 2712 char *phase; 2713 u_int mesg_out = MSG_NOOP; 2714 u_int lastphase = ahc_inb(ahc, LASTPHASE); 2715 2716 xpt_print_path(scb->ccb->ccb_h.path); 2717 2718 switch (lastphase) { 2719 case P_DATAOUT: 2720 phase = "Data-Out"; 2721 break; 2722 case P_DATAIN: 2723 phase = "Data-In"; 2724 mesg_out = MSG_INITIATOR_DET_ERR; 2725 break; 2726 case P_COMMAND: 2727 phase = "Command"; 2728 break; 2729 case P_MESGOUT: 2730 phase = "Message-Out"; 2731 break; 2732 case P_STATUS: 2733 phase = "Status"; 2734 mesg_out = MSG_INITIATOR_DET_ERR; 2735 break; 2736 case P_MESGIN: 2737 phase = "Message-In"; 2738 mesg_out = MSG_PARITY_ERROR; 2739 break; 2740 default: 2741 phase = "unknown"; 2742 break; 2743 } 2744 printf("parity error during %s phase.\n", phase); 2745 2746 printf("SEQADDR == 0x%x\n", ahc_inb(ahc, SEQADDR0) 2747 | (ahc_inb(ahc, SEQADDR1) << 8)); 2748 2749 printf("SCSIRATE == 0x%x\n", ahc_inb(ahc, SCSIRATE)); 2750 2751 /* 2752 * We've set the hardware to assert ATN if we 2753 * get a parity error on "in" phases, so all we 2754 * need to do is stuff the message buffer with 2755 * the appropriate message. "In" phases have set 2756 * mesg_out to something other than MSG_NOP. 2757 */ 2758 if (mesg_out != MSG_NOOP) { 2759 if (ahc->msg_type != MSG_TYPE_NONE) 2760 ahc->send_msg_perror = TRUE; 2761 else 2762 ahc_outb(ahc, MSG_OUT, mesg_out); 2763 } 2764 ahc_outb(ahc, CLRSINT1, CLRSCSIPERR); 2765 ahc_outb(ahc, CLRINT, CLRSCSIINT); 2766 unpause_sequencer(ahc, /*unpause_always*/TRUE); 2767 } else { 2768 xpt_print_path(scb->ccb->ccb_h.path); 2769 printf("Unknown SCSIINT. Status = 0x%x\n", status); 2770 ahc_outb(ahc, CLRSINT1, status); 2771 ahc_outb(ahc, CLRINT, CLRSCSIINT); 2772 unpause_sequencer(ahc, /*unpause_always*/TRUE); 2773 } 2774 } 2775 2776 static void 2777 ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2778 { 2779 /* 2780 * We need to initiate transfer negotiations. 2781 * If our current and goal settings are identical, 2782 * we want to renegotiate due to a check condition. 2783 */ 2784 struct ahc_initiator_tinfo *tinfo; 2785 struct tmode_tstate *tstate; 2786 int dowide; 2787 int dosync; 2788 2789 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 2790 devinfo->target, &tstate); 2791 dowide = tinfo->current.width != tinfo->goal.width; 2792 dosync = tinfo->current.period != tinfo->goal.period; 2793 2794 if (!dowide && !dosync) { 2795 dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; 2796 dosync = tinfo->goal.period != 0; 2797 } 2798 2799 if (dowide) { 2800 ahc_construct_wdtr(ahc, tinfo->goal.width); 2801 } else if (dosync) { 2802 struct ahc_syncrate *rate; 2803 u_int period; 2804 u_int offset; 2805 2806 period = tinfo->goal.period; 2807 rate = ahc_devlimited_syncrate(ahc, &period); 2808 offset = tinfo->goal.offset; 2809 ahc_validate_offset(ahc, rate, &offset, 2810 tinfo->current.width); 2811 ahc_construct_sdtr(ahc, period, offset); 2812 } else { 2813 panic("ahc_intr: AWAITING_MSG for negotiation, " 2814 "but no negotiation needed\n"); 2815 } 2816 } 2817 2818 static void 2819 ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2820 struct scb *scb) 2821 { 2822 /* 2823 * To facilitate adding multiple messages together, 2824 * each routine should increment the index and len 2825 * variables instead of setting them explicitly. 2826 */ 2827 ahc->msgout_index = 0; 2828 ahc->msgout_len = 0; 2829 2830 if ((scb->flags & SCB_DEVICE_RESET) == 0 2831 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) { 2832 u_int identify_msg; 2833 2834 identify_msg = MSG_IDENTIFYFLAG | SCB_LUN(scb); 2835 if ((scb->hscb->control & DISCENB) != 0) 2836 identify_msg |= MSG_IDENTIFY_DISCFLAG; 2837 ahc->msgout_buf[ahc->msgout_index++] = identify_msg; 2838 ahc->msgout_len++; 2839 2840 if ((scb->hscb->control & TAG_ENB) != 0) { 2841 ahc->msgout_buf[ahc->msgout_index++] = 2842 scb->ccb->csio.tag_action; 2843 ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag; 2844 ahc->msgout_len += 2; 2845 } 2846 } 2847 2848 if (scb->flags & SCB_DEVICE_RESET) { 2849 ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET; 2850 ahc->msgout_len++; 2851 xpt_print_path(scb->ccb->ccb_h.path); 2852 printf("Bus Device Reset Message Sent\n"); 2853 } else if (scb->flags & SCB_ABORT) { 2854 if ((scb->hscb->control & TAG_ENB) != 0) 2855 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG; 2856 else 2857 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT; 2858 ahc->msgout_len++; 2859 xpt_print_path(scb->ccb->ccb_h.path); 2860 printf("Abort Message Sent\n"); 2861 } else if ((ahc->targ_msg_req & devinfo->target_mask) != 0) { 2862 ahc_build_transfer_msg(ahc, devinfo); 2863 } else { 2864 printf("ahc_intr: AWAITING_MSG for an SCB that " 2865 "does not have a waiting message"); 2866 panic("SCB = %d, SCB Control = %x, MSG_OUT = %x " 2867 "SCB flags = %x", scb->hscb->tag, scb->hscb->control, 2868 ahc_inb(ahc, MSG_OUT), scb->flags); 2869 } 2870 2871 /* 2872 * Clear the MK_MESSAGE flag from the SCB so we aren't 2873 * asked to send this message again. 2874 */ 2875 ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE); 2876 ahc->msgout_index = 0; 2877 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2878 } 2879 2880 static void 2881 ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2882 { 2883 /* 2884 * To facilitate adding multiple messages together, 2885 * each routine should increment the index and len 2886 * variables instead of setting them explicitly. 2887 */ 2888 ahc->msgout_index = 0; 2889 ahc->msgout_len = 0; 2890 2891 if ((ahc->targ_msg_req & devinfo->target_mask) != 0) 2892 ahc_build_transfer_msg(ahc, devinfo); 2893 else 2894 panic("ahc_intr: AWAITING target message with no message"); 2895 2896 ahc->msgout_index = 0; 2897 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 2898 } 2899 2900 static int 2901 ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2902 { 2903 /* 2904 * What we care about here is if we had an 2905 * outstanding SDTR or WDTR message for this 2906 * target. If we did, this is a signal that 2907 * the target is refusing negotiation. 2908 */ 2909 struct scb *scb; 2910 u_int scb_index; 2911 u_int last_msg; 2912 int response = 0; 2913 2914 scb_index = ahc_inb(ahc, SCB_TAG); 2915 scb = &ahc->scb_data->scbarray[scb_index]; 2916 2917 /* Might be necessary */ 2918 last_msg = ahc_inb(ahc, LAST_MSG); 2919 2920 if (ahc_sent_msg(ahc, MSG_EXT_WDTR, /*full*/FALSE)) { 2921 struct ahc_initiator_tinfo *tinfo; 2922 struct tmode_tstate *tstate; 2923 2924 /* note 8bit xfers */ 2925 printf("%s:%c:%d: refuses WIDE negotiation. Using " 2926 "8bit transfers\n", ahc_name(ahc), 2927 devinfo->channel, devinfo->target); 2928 ahc_set_width(ahc, devinfo, scb->ccb->ccb_h.path, 2929 MSG_EXT_WDTR_BUS_8_BIT, 2930 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 2931 /*paused*/TRUE); 2932 /* 2933 * No need to clear the sync rate. If the target 2934 * did not accept the command, our syncrate is 2935 * unaffected. If the target started the negotiation, 2936 * but rejected our response, we already cleared the 2937 * sync rate before sending our WDTR. 2938 */ 2939 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, 2940 devinfo->our_scsiid, 2941 devinfo->target, &tstate); 2942 if (tinfo->goal.period) { 2943 u_int period; 2944 2945 /* Start the sync negotiation */ 2946 period = tinfo->goal.period; 2947 ahc_devlimited_syncrate(ahc, &period); 2948 ahc->msgout_index = 0; 2949 ahc->msgout_len = 0; 2950 ahc_construct_sdtr(ahc, period, tinfo->goal.offset); 2951 ahc->msgout_index = 0; 2952 response = 1; 2953 } 2954 } else if (ahc_sent_msg(ahc, MSG_EXT_SDTR, /*full*/FALSE)) { 2955 /* note asynch xfers and clear flag */ 2956 ahc_set_syncrate(ahc, devinfo, scb->ccb->ccb_h.path, 2957 /*syncrate*/NULL, /*period*/0, 2958 /*offset*/0, 2959 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 2960 /*paused*/TRUE); 2961 printf("%s:%c:%d: refuses synchronous negotiation. " 2962 "Using asynchronous transfers\n", 2963 ahc_name(ahc), 2964 devinfo->channel, devinfo->target); 2965 } else if ((scb->hscb->control & MSG_SIMPLE_Q_TAG) != 0) { 2966 struct ccb_trans_settings neg; 2967 2968 printf("%s:%c:%d: refuses tagged commands. Performing " 2969 "non-tagged I/O\n", ahc_name(ahc), 2970 devinfo->channel, devinfo->target); 2971 2972 ahc_set_tags(ahc, devinfo, FALSE); 2973 neg.flags = 0; 2974 neg.valid = CCB_TRANS_TQ_VALID; 2975 xpt_setup_ccb(&neg.ccb_h, scb->ccb->ccb_h.path, /*priority*/1); 2976 xpt_async(AC_TRANSFER_NEG, scb->ccb->ccb_h.path, &neg); 2977 2978 /* 2979 * Resend the identify for this CCB as the target 2980 * may believe that the selection is invalid otherwise. 2981 */ 2982 ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) 2983 & ~MSG_SIMPLE_Q_TAG); 2984 scb->hscb->control &= ~MSG_SIMPLE_Q_TAG; 2985 scb->ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; 2986 ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG); 2987 ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, SCSISIGO) | ATNO); 2988 2989 /* 2990 * Requeue all tagged commands for this target 2991 * currently in our posession so they can be 2992 * converted to untagged commands. 2993 */ 2994 ahc_search_qinfifo(ahc, SCB_TARGET(scb), SCB_CHANNEL(scb), 2995 SCB_LUN(scb), /*tag*/SCB_LIST_NULL, 2996 ROLE_INITIATOR, CAM_REQUEUE_REQ, 2997 SEARCH_COMPLETE); 2998 } else { 2999 /* 3000 * Otherwise, we ignore it. 3001 */ 3002 printf("%s:%c:%d: Message reject for %x -- ignored\n", 3003 ahc_name(ahc), devinfo->channel, devinfo->target, 3004 last_msg); 3005 } 3006 return (response); 3007 } 3008 3009 static void 3010 ahc_clear_msg_state(struct ahc_softc *ahc) 3011 { 3012 ahc->msgout_len = 0; 3013 ahc->msgin_index = 0; 3014 ahc->msg_type = MSG_TYPE_NONE; 3015 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 3016 } 3017 3018 static void 3019 ahc_handle_message_phase(struct ahc_softc *ahc, struct cam_path *path) 3020 { 3021 struct ahc_devinfo devinfo; 3022 u_int bus_phase; 3023 int end_session; 3024 3025 ahc_fetch_devinfo(ahc, &devinfo); 3026 end_session = FALSE; 3027 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 3028 3029 reswitch: 3030 switch (ahc->msg_type) { 3031 case MSG_TYPE_INITIATOR_MSGOUT: 3032 { 3033 int lastbyte; 3034 int phasemis; 3035 int msgdone; 3036 3037 if (ahc->msgout_len == 0) 3038 panic("REQINIT interrupt with no active message"); 3039 3040 phasemis = bus_phase != P_MESGOUT; 3041 if (phasemis) { 3042 if (bus_phase == P_MESGIN) { 3043 /* 3044 * Change gears and see if 3045 * this messages is of interest to 3046 * us or should be passed back to 3047 * the sequencer. 3048 */ 3049 ahc_outb(ahc, CLRSINT1, CLRATNO); 3050 ahc->send_msg_perror = FALSE; 3051 ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN; 3052 ahc->msgin_index = 0; 3053 goto reswitch; 3054 } 3055 end_session = TRUE; 3056 break; 3057 } 3058 3059 if (ahc->send_msg_perror) { 3060 ahc_outb(ahc, CLRSINT1, CLRATNO); 3061 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 3062 ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR); 3063 break; 3064 } 3065 3066 msgdone = ahc->msgout_index == ahc->msgout_len; 3067 if (msgdone) { 3068 /* 3069 * The target has requested a retry. 3070 * Re-assert ATN, reset our message index to 3071 * 0, and try again. 3072 */ 3073 ahc->msgout_index = 0; 3074 ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, SCSISIGO) | ATNO); 3075 } 3076 3077 lastbyte = ahc->msgout_index == (ahc->msgout_len - 1); 3078 if (lastbyte) { 3079 /* Last byte is signified by dropping ATN */ 3080 ahc_outb(ahc, CLRSINT1, CLRATNO); 3081 } 3082 3083 /* 3084 * Clear our interrupt status and present 3085 * the next byte on the bus. 3086 */ 3087 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 3088 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 3089 break; 3090 } 3091 case MSG_TYPE_INITIATOR_MSGIN: 3092 { 3093 int phasemis; 3094 int message_done; 3095 3096 phasemis = bus_phase != P_MESGIN; 3097 3098 if (phasemis) { 3099 ahc->msgin_index = 0; 3100 if (bus_phase == P_MESGOUT 3101 && (ahc->send_msg_perror == TRUE 3102 || (ahc->msgout_len != 0 3103 && ahc->msgout_index == 0))) { 3104 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 3105 goto reswitch; 3106 } 3107 end_session = TRUE; 3108 break; 3109 } 3110 3111 /* Pull the byte in without acking it */ 3112 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL); 3113 3114 message_done = ahc_parse_msg(ahc, path, &devinfo); 3115 3116 if (message_done) { 3117 /* 3118 * Clear our incoming message buffer in case there 3119 * is another message following this one. 3120 */ 3121 ahc->msgin_index = 0; 3122 3123 /* 3124 * If this message illicited a response, 3125 * assert ATN so the target takes us to the 3126 * message out phase. 3127 */ 3128 if (ahc->msgout_len != 0) 3129 ahc_outb(ahc, SCSISIGO, 3130 ahc_inb(ahc, SCSISIGO) | ATNO); 3131 } else 3132 ahc->msgin_index++; 3133 3134 /* Ack the byte */ 3135 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 3136 ahc_inb(ahc, SCSIDATL); 3137 break; 3138 } 3139 case MSG_TYPE_TARGET_MSGIN: 3140 { 3141 int msgdone; 3142 int msgout_request; 3143 3144 if (ahc->msgout_len == 0) 3145 panic("Target MSGIN with no active message"); 3146 3147 /* 3148 * If we interrupted a mesgout session, the initiator 3149 * will not know this until our first REQ. So, we 3150 * only honor mesgout requests after we've sent our 3151 * first byte. 3152 */ 3153 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0 3154 && ahc->msgout_index > 0) 3155 msgout_request = TRUE; 3156 else 3157 msgout_request = FALSE; 3158 3159 if (msgout_request) { 3160 3161 /* 3162 * Change gears and see if 3163 * this messages is of interest to 3164 * us or should be passed back to 3165 * the sequencer. 3166 */ 3167 ahc->msg_type = MSG_TYPE_TARGET_MSGOUT; 3168 ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO); 3169 ahc->msgin_index = 0; 3170 /* Dummy read to REQ for first byte */ 3171 ahc_inb(ahc, SCSIDATL); 3172 ahc_outb(ahc, SXFRCTL0, 3173 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 3174 break; 3175 } 3176 3177 msgdone = ahc->msgout_index == ahc->msgout_len; 3178 if (msgdone) { 3179 ahc_outb(ahc, SXFRCTL0, 3180 ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 3181 end_session = TRUE; 3182 break; 3183 } 3184 3185 /* 3186 * Present the next byte on the bus. 3187 */ 3188 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); 3189 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 3190 break; 3191 } 3192 case MSG_TYPE_TARGET_MSGOUT: 3193 { 3194 int lastbyte; 3195 int msgdone; 3196 3197 /* 3198 * The initiator signals that this is 3199 * the last byte by dropping ATN. 3200 */ 3201 lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0; 3202 3203 /* 3204 * Read the latched byte, but turn off SPIOEN first 3205 * so that we don't inadvertantly cause a REQ for the 3206 * next byte. 3207 */ 3208 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 3209 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL); 3210 msgdone = ahc_parse_msg(ahc, path, &devinfo); 3211 if (msgdone == MSGLOOP_TERMINATED) { 3212 /* 3213 * The message is *really* done in that it caused 3214 * us to go to bus free. The sequencer has already 3215 * been reset at this point, so pull the ejection 3216 * handle. 3217 */ 3218 return; 3219 } 3220 3221 ahc->msgin_index++; 3222 3223 /* 3224 * XXX Read spec about initiator dropping ATN too soon 3225 * and use msgdone to detect it. 3226 */ 3227 if (msgdone == MSGLOOP_MSGCOMPLETE) { 3228 ahc->msgin_index = 0; 3229 3230 /* 3231 * If this message illicited a response, transition 3232 * to the Message in phase and send it. 3233 */ 3234 if (ahc->msgout_len != 0) { 3235 ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO); 3236 ahc_outb(ahc, SXFRCTL0, 3237 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 3238 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 3239 ahc->msgin_index = 0; 3240 break; 3241 } 3242 } 3243 3244 if (lastbyte) 3245 end_session = TRUE; 3246 else { 3247 /* Ask for the next byte. */ 3248 ahc_outb(ahc, SXFRCTL0, 3249 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 3250 } 3251 3252 break; 3253 } 3254 default: 3255 panic("Unknown REQINIT message type"); 3256 } 3257 3258 if (end_session) { 3259 ahc_clear_msg_state(ahc); 3260 ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP); 3261 } else 3262 ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP); 3263 } 3264 3265 /* 3266 * See if we sent a particular extended message to the target. 3267 * If "full" is true, the target saw the full message. 3268 * If "full" is false, the target saw at least the first 3269 * byte of the message. 3270 */ 3271 static int 3272 ahc_sent_msg(struct ahc_softc *ahc, u_int msgtype, int full) 3273 { 3274 int found; 3275 int index; 3276 3277 found = FALSE; 3278 index = 0; 3279 3280 while (index < ahc->msgout_len) { 3281 if ((ahc->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0 3282 || ahc->msgout_buf[index] == MSG_MESSAGE_REJECT) 3283 index++; 3284 else if (ahc->msgout_buf[index] >= MSG_SIMPLE_Q_TAG 3285 && ahc->msgout_buf[index] < MSG_IGN_WIDE_RESIDUE) { 3286 /* Skip tag type and tag id */ 3287 index += 2; 3288 } else if (ahc->msgout_buf[index] == MSG_EXTENDED) { 3289 /* Found a candidate */ 3290 if (ahc->msgout_buf[index+2] == msgtype) { 3291 u_int end_index; 3292 3293 end_index = index + 1 3294 + ahc->msgout_buf[index + 1]; 3295 if (full) { 3296 if (ahc->msgout_index > end_index) 3297 found = TRUE; 3298 } else if (ahc->msgout_index > index) 3299 found = TRUE; 3300 } 3301 break; 3302 } else { 3303 panic("ahc_sent_msg: Inconsistent msg buffer"); 3304 } 3305 } 3306 return (found); 3307 } 3308 3309 static int 3310 ahc_parse_msg(struct ahc_softc *ahc, struct cam_path *path, 3311 struct ahc_devinfo *devinfo) 3312 { 3313 struct ahc_initiator_tinfo *tinfo; 3314 struct tmode_tstate *tstate; 3315 int reject; 3316 int done; 3317 int response; 3318 u_int targ_scsirate; 3319 3320 done = MSGLOOP_IN_PROG; 3321 response = FALSE; 3322 reject = FALSE; 3323 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 3324 devinfo->target, &tstate); 3325 targ_scsirate = tinfo->scsirate; 3326 3327 /* 3328 * Parse as much of the message as is availible, 3329 * rejecting it if we don't support it. When 3330 * the entire message is availible and has been 3331 * handled, return TRUE indicating that we have 3332 * parsed an entire message. 3333 * 3334 * In the case of extended messages, we accept the length 3335 * byte outright and perform more checking once we know the 3336 * extended message type. 3337 */ 3338 switch (ahc->msgin_buf[0]) { 3339 case MSG_MESSAGE_REJECT: 3340 response = ahc_handle_msg_reject(ahc, devinfo); 3341 /* FALLTHROUGH */ 3342 case MSG_NOOP: 3343 done = MSGLOOP_MSGCOMPLETE; 3344 break; 3345 case MSG_IGN_WIDE_RESIDUE: 3346 { 3347 /* Wait for the whole message */ 3348 if (ahc->msgin_index >= 1) { 3349 if (ahc->msgin_buf[1] != 1 3350 || tinfo->current.width == MSG_EXT_WDTR_BUS_8_BIT) { 3351 reject = TRUE; 3352 done = MSGLOOP_MSGCOMPLETE; 3353 } else 3354 ahc_handle_ign_wide_residue(ahc, devinfo); 3355 } 3356 break; 3357 } 3358 case MSG_EXTENDED: 3359 { 3360 /* Wait for enough of the message to begin validation */ 3361 if (ahc->msgin_index < 2) 3362 break; 3363 switch (ahc->msgin_buf[2]) { 3364 case MSG_EXT_SDTR: 3365 { 3366 struct ahc_syncrate *syncrate; 3367 u_int period; 3368 u_int offset; 3369 u_int saved_offset; 3370 3371 if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) { 3372 reject = TRUE; 3373 break; 3374 } 3375 3376 /* 3377 * Wait until we have both args before validating 3378 * and acting on this message. 3379 * 3380 * Add one to MSG_EXT_SDTR_LEN to account for 3381 * the extended message preamble. 3382 */ 3383 if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1)) 3384 break; 3385 3386 period = ahc->msgin_buf[3]; 3387 saved_offset = offset = ahc->msgin_buf[4]; 3388 syncrate = ahc_devlimited_syncrate(ahc, &period); 3389 ahc_validate_offset(ahc, syncrate, &offset, 3390 targ_scsirate & WIDEXFER); 3391 ahc_set_syncrate(ahc, devinfo, path, 3392 syncrate, period, offset, 3393 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3394 /*paused*/TRUE); 3395 3396 /* 3397 * See if we initiated Sync Negotiation 3398 * and didn't have to fall down to async 3399 * transfers. 3400 */ 3401 if (ahc_sent_msg(ahc, MSG_EXT_SDTR, /*full*/TRUE)) { 3402 /* We started it */ 3403 if (saved_offset != offset) { 3404 /* Went too low - force async */ 3405 reject = TRUE; 3406 } 3407 } else { 3408 /* 3409 * Send our own SDTR in reply 3410 */ 3411 if (bootverbose) 3412 printf("Sending SDTR!\n"); 3413 ahc->msgout_index = 0; 3414 ahc->msgout_len = 0; 3415 ahc_construct_sdtr(ahc, period, offset); 3416 ahc->msgout_index = 0; 3417 response = TRUE; 3418 } 3419 done = MSGLOOP_MSGCOMPLETE; 3420 break; 3421 } 3422 case MSG_EXT_WDTR: 3423 { 3424 u_int bus_width; 3425 u_int sending_reply; 3426 3427 sending_reply = FALSE; 3428 if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) { 3429 reject = TRUE; 3430 break; 3431 } 3432 3433 /* 3434 * Wait until we have our arg before validating 3435 * and acting on this message. 3436 * 3437 * Add one to MSG_EXT_WDTR_LEN to account for 3438 * the extended message preamble. 3439 */ 3440 if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1)) 3441 break; 3442 3443 /* 3444 * Due to a problem with sync/wide transfers 3445 * on the aic7880 only allow this on Ultra2 3446 * controllers for the moment. 3447 */ 3448 if (devinfo->role == ROLE_TARGET 3449 && (ahc->features & AHC_ULTRA2) == 0) { 3450 reject = TRUE; 3451 break; 3452 } 3453 3454 bus_width = ahc->msgin_buf[3]; 3455 if (ahc_sent_msg(ahc, MSG_EXT_WDTR, /*full*/TRUE)) { 3456 /* 3457 * Don't send a WDTR back to the 3458 * target, since we asked first. 3459 */ 3460 switch (bus_width){ 3461 default: 3462 /* 3463 * How can we do anything greater 3464 * than 16bit transfers on a 16bit 3465 * bus? 3466 */ 3467 reject = TRUE; 3468 printf("%s: target %d requested %dBit " 3469 "transfers. Rejecting...\n", 3470 ahc_name(ahc), devinfo->target, 3471 8 * (0x01 << bus_width)); 3472 /* FALLTHROUGH */ 3473 case MSG_EXT_WDTR_BUS_8_BIT: 3474 bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3475 break; 3476 case MSG_EXT_WDTR_BUS_16_BIT: 3477 break; 3478 } 3479 } else { 3480 /* 3481 * Send our own WDTR in reply 3482 */ 3483 if (bootverbose) 3484 printf("Sending WDTR!\n"); 3485 switch (bus_width) { 3486 default: 3487 if (ahc->features & AHC_WIDE) { 3488 /* Respond Wide */ 3489 bus_width = 3490 MSG_EXT_WDTR_BUS_16_BIT; 3491 break; 3492 } 3493 /* FALLTHROUGH */ 3494 case MSG_EXT_WDTR_BUS_8_BIT: 3495 bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3496 break; 3497 } 3498 ahc->msgout_index = 0; 3499 ahc->msgout_len = 0; 3500 ahc_construct_wdtr(ahc, bus_width); 3501 ahc->msgout_index = 0; 3502 response = TRUE; 3503 sending_reply = TRUE; 3504 } 3505 ahc_set_width(ahc, devinfo, path, bus_width, 3506 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3507 /*paused*/TRUE); 3508 3509 /* After a wide message, we are async */ 3510 ahc_set_syncrate(ahc, devinfo, path, 3511 /*syncrate*/NULL, /*period*/0, 3512 /*offset*/0, AHC_TRANS_ACTIVE, 3513 /*paused*/TRUE); 3514 if (sending_reply == FALSE && reject == FALSE) { 3515 3516 if (tinfo->goal.period) { 3517 struct ahc_syncrate *rate; 3518 u_int period; 3519 u_int offset; 3520 3521 /* Start the sync negotiation */ 3522 period = tinfo->goal.period; 3523 rate = ahc_devlimited_syncrate(ahc, 3524 &period); 3525 offset = tinfo->goal.offset; 3526 ahc_validate_offset(ahc, rate, &offset, 3527 tinfo->current.width); 3528 ahc->msgout_index = 0; 3529 ahc->msgout_len = 0; 3530 ahc_construct_sdtr(ahc, period, offset); 3531 ahc->msgout_index = 0; 3532 response = TRUE; 3533 } 3534 } 3535 done = MSGLOOP_MSGCOMPLETE; 3536 break; 3537 } 3538 default: 3539 /* Unknown extended message. Reject it. */ 3540 reject = TRUE; 3541 break; 3542 } 3543 break; 3544 } 3545 case MSG_BUS_DEV_RESET: 3546 ahc_handle_devreset(ahc, devinfo, 3547 CAM_BDR_SENT, AC_SENT_BDR, 3548 "Bus Device Reset Received", 3549 /*verbose_level*/0); 3550 restart_sequencer(ahc); 3551 done = MSGLOOP_TERMINATED; 3552 break; 3553 case MSG_ABORT_TAG: 3554 case MSG_ABORT: 3555 case MSG_CLEAR_QUEUE: 3556 /* Target mode messages */ 3557 if (devinfo->role != ROLE_TARGET) { 3558 reject = TRUE; 3559 break; 3560 } 3561 ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 3562 devinfo->lun, 3563 ahc->msgin_buf[0] == MSG_ABORT_TAG 3564 ? SCB_LIST_NULL 3565 : ahc_inb(ahc, INITIATOR_TAG), 3566 ROLE_TARGET, CAM_REQ_ABORTED); 3567 3568 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 3569 if (tstate != NULL) { 3570 struct tmode_lstate* lstate; 3571 3572 lstate = tstate->enabled_luns[devinfo->lun]; 3573 if (lstate != NULL) { 3574 ahc_queue_lstate_event(ahc, lstate, 3575 devinfo->our_scsiid, 3576 ahc->msgin_buf[0], 3577 /*arg*/0); 3578 ahc_send_lstate_events(ahc, lstate); 3579 } 3580 } 3581 done = MSGLOOP_MSGCOMPLETE; 3582 break; 3583 case MSG_TERM_IO_PROC: 3584 default: 3585 reject = TRUE; 3586 break; 3587 } 3588 3589 if (reject) { 3590 /* 3591 * Setup to reject the message. 3592 */ 3593 ahc->msgout_index = 0; 3594 ahc->msgout_len = 1; 3595 ahc->msgout_buf[0] = MSG_MESSAGE_REJECT; 3596 done = MSGLOOP_MSGCOMPLETE; 3597 response = TRUE; 3598 } 3599 3600 if (done != MSGLOOP_IN_PROG && !response) 3601 /* Clear the outgoing message buffer */ 3602 ahc->msgout_len = 0; 3603 3604 return (done); 3605 } 3606 3607 static void 3608 ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 3609 { 3610 u_int scb_index; 3611 struct scb *scb; 3612 3613 scb_index = ahc_inb(ahc, SCB_TAG); 3614 scb = &ahc->scb_data->scbarray[scb_index]; 3615 if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0 3616 || (scb->ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_IN) { 3617 /* 3618 * Ignore the message if we haven't 3619 * seen an appropriate data phase yet. 3620 */ 3621 } else { 3622 /* 3623 * If the residual occurred on the last 3624 * transfer and the transfer request was 3625 * expected to end on an odd count, do 3626 * nothing. Otherwise, subtract a byte 3627 * and update the residual count accordingly. 3628 */ 3629 u_int resid_sgcnt; 3630 3631 resid_sgcnt = ahc_inb(ahc, SCB_RESID_SGCNT); 3632 if (resid_sgcnt == 0 3633 && ahc_inb(ahc, DATA_COUNT_ODD) == 1) { 3634 /* 3635 * If the residual occurred on the last 3636 * transfer and the transfer request was 3637 * expected to end on an odd count, do 3638 * nothing. 3639 */ 3640 } else { 3641 u_int data_cnt; 3642 u_int data_addr; 3643 u_int sg_index; 3644 3645 data_cnt = (ahc_inb(ahc, SCB_RESID_DCNT + 2) << 16) 3646 | (ahc_inb(ahc, SCB_RESID_DCNT + 1) << 8) 3647 | (ahc_inb(ahc, SCB_RESID_DCNT)); 3648 3649 data_addr = (ahc_inb(ahc, SHADDR + 3) << 24) 3650 | (ahc_inb(ahc, SHADDR + 2) << 16) 3651 | (ahc_inb(ahc, SHADDR + 1) << 8) 3652 | (ahc_inb(ahc, SHADDR)); 3653 3654 data_cnt += 1; 3655 data_addr -= 1; 3656 3657 sg_index = scb->sg_count - resid_sgcnt; 3658 3659 /* 3660 * scb->sg_list starts with the second S/G entry. 3661 */ 3662 if (sg_index-- != 0 3663 && (scb->sg_list[sg_index].len < data_cnt)) { 3664 u_int sg_addr; 3665 3666 data_cnt = 1; 3667 data_addr = scb->sg_list[sg_index - 1].addr 3668 + scb->sg_list[sg_index - 1].len - 1; 3669 3670 sg_addr = scb->sg_list_phys 3671 + (sg_index * sizeof(*scb->sg_list)); 3672 ahc_outb(ahc, SG_NEXT + 3, sg_addr >> 24); 3673 ahc_outb(ahc, SG_NEXT + 2, sg_addr >> 16); 3674 ahc_outb(ahc, SG_NEXT + 1, sg_addr >> 8); 3675 ahc_outb(ahc, SG_NEXT, sg_addr); 3676 } 3677 3678 ahc_outb(ahc, SCB_RESID_DCNT + 2, data_cnt >> 16); 3679 ahc_outb(ahc, SCB_RESID_DCNT + 1, data_cnt >> 8); 3680 ahc_outb(ahc, SCB_RESID_DCNT, data_cnt); 3681 3682 ahc_outb(ahc, SHADDR + 3, data_addr >> 24); 3683 ahc_outb(ahc, SHADDR + 2, data_addr >> 16); 3684 ahc_outb(ahc, SHADDR + 1, data_addr >> 8); 3685 ahc_outb(ahc, SHADDR, data_addr); 3686 } 3687 } 3688 } 3689 3690 static void 3691 ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 3692 cam_status status, ac_code acode, char *message, 3693 int verbose_level) 3694 { 3695 struct cam_path *path; 3696 int found; 3697 int error; 3698 struct tmode_tstate* tstate; 3699 u_int lun; 3700 3701 3702 error = ahc_create_path(ahc, devinfo, &path); 3703 3704 found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 3705 CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role, 3706 status); 3707 3708 /* 3709 * Send an immediate notify ccb to all target more peripheral 3710 * drivers affected by this action. 3711 */ 3712 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 3713 if (tstate != NULL) { 3714 for (lun = 0; lun <= 7; lun++) { 3715 struct tmode_lstate* lstate; 3716 3717 lstate = tstate->enabled_luns[lun]; 3718 if (lstate == NULL) 3719 continue; 3720 3721 ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid, 3722 MSG_BUS_DEV_RESET, /*arg*/0); 3723 ahc_send_lstate_events(ahc, lstate); 3724 } 3725 } 3726 3727 /* 3728 * Go back to async/narrow transfers and renegotiate. 3729 * ahc_set_width and ahc_set_syncrate can cope with NULL 3730 * paths. 3731 */ 3732 ahc_set_width(ahc, devinfo, path, MSG_EXT_WDTR_BUS_8_BIT, 3733 AHC_TRANS_CUR, /*paused*/TRUE); 3734 ahc_set_syncrate(ahc, devinfo, path, /*syncrate*/NULL, 3735 /*period*/0, /*offset*/0, AHC_TRANS_CUR, 3736 /*paused*/TRUE); 3737 3738 if (error == CAM_REQ_CMP && acode != 0) 3739 xpt_async(AC_SENT_BDR, path, NULL); 3740 3741 if (error == CAM_REQ_CMP) 3742 xpt_free_path(path); 3743 3744 if (message != NULL 3745 && (verbose_level <= bootverbose)) 3746 printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc), 3747 message, devinfo->channel, devinfo->target, found); 3748 } 3749 3750 /* 3751 * We have an scb which has been processed by the 3752 * adaptor, now we look to see how the operation 3753 * went. 3754 */ 3755 static void 3756 ahc_done(struct ahc_softc *ahc, struct scb *scb) 3757 { 3758 union ccb *ccb; 3759 3760 CAM_DEBUG(scb->ccb->ccb_h.path, CAM_DEBUG_TRACE, 3761 ("ahc_done - scb %d\n", scb->hscb->tag)); 3762 3763 ccb = scb->ccb; 3764 LIST_REMOVE(&ccb->ccb_h, sim_links.le); 3765 3766 untimeout(ahc_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch); 3767 3768 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 3769 bus_dmasync_op_t op; 3770 3771 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 3772 op = BUS_DMASYNC_POSTREAD; 3773 else 3774 op = BUS_DMASYNC_POSTWRITE; 3775 bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op); 3776 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap); 3777 } 3778 3779 /* 3780 * Unbusy this target/channel/lun. 3781 * XXX if we are holding two commands per lun, 3782 * send the next command. 3783 */ 3784 ahc_index_busy_tcl(ahc, scb->hscb->tcl, /*unbusy*/TRUE); 3785 3786 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 3787 if (ahc_ccb_status(ccb) == CAM_REQ_INPROG) 3788 ccb->ccb_h.status |= CAM_REQ_CMP; 3789 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3790 ahcfreescb(ahc, scb); 3791 xpt_done(ccb); 3792 return; 3793 } 3794 3795 /* 3796 * If the recovery SCB completes, we have to be 3797 * out of our timeout. 3798 */ 3799 if ((scb->flags & SCB_RECOVERY_SCB) != 0) { 3800 3801 struct ccb_hdr *ccbh; 3802 3803 /* 3804 * We were able to complete the command successfully, 3805 * so reinstate the timeouts for all other pending 3806 * commands. 3807 */ 3808 ccbh = ahc->pending_ccbs.lh_first; 3809 while (ccbh != NULL) { 3810 struct scb *pending_scb; 3811 3812 pending_scb = (struct scb *)ccbh->ccb_scb_ptr; 3813 ccbh->timeout_ch = 3814 timeout(ahc_timeout, pending_scb, 3815 (ccbh->timeout * hz)/1000); 3816 ccbh = LIST_NEXT(ccbh, sim_links.le); 3817 } 3818 3819 /* 3820 * Ensure that we didn't put a second instance of this 3821 * SCB into the QINFIFO. 3822 */ 3823 ahc_search_qinfifo(ahc, SCB_TARGET(scb), SCB_CHANNEL(scb), 3824 SCB_LUN(scb), scb->hscb->tag, 3825 ROLE_INITIATOR, /*status*/0, 3826 SEARCH_REMOVE); 3827 if (ahc_ccb_status(ccb) == CAM_BDR_SENT 3828 || ahc_ccb_status(ccb) == CAM_REQ_ABORTED) 3829 ahcsetccbstatus(ccb, CAM_CMD_TIMEOUT); 3830 xpt_print_path(ccb->ccb_h.path); 3831 printf("no longer in timeout, status = %x\n", 3832 ccb->ccb_h.status); 3833 } 3834 3835 /* Don't clobber any existing error state */ 3836 if (ahc_ccb_status(ccb) == CAM_REQ_INPROG) { 3837 ccb->ccb_h.status |= CAM_REQ_CMP; 3838 } else if ((scb->flags & SCB_SENSE) != 0) { 3839 /* 3840 * We performed autosense retrieval. 3841 * 3842 * bzero the sense data before having 3843 * the drive fill it. The SCSI spec mandates 3844 * that any untransfered data should be 3845 * assumed to be zero. Complete the 'bounce' 3846 * of sense information through buffers accessible 3847 * via bus-space by copying it into the clients 3848 * csio. 3849 */ 3850 bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data)); 3851 bcopy(&ahc->scb_data->sense[scb->hscb->tag], 3852 &ccb->csio.sense_data, scb->sg_list->len); 3853 scb->ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 3854 } 3855 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3856 ahcfreescb(ahc, scb); 3857 xpt_done(ccb); 3858 } 3859 3860 /* 3861 * Determine the number of SCBs available on the controller 3862 */ 3863 int 3864 ahc_probe_scbs(struct ahc_softc *ahc) { 3865 int i; 3866 3867 for (i = 0; i < AHC_SCB_MAX; i++) { 3868 ahc_outb(ahc, SCBPTR, i); 3869 ahc_outb(ahc, SCB_CONTROL, i); 3870 if (ahc_inb(ahc, SCB_CONTROL) != i) 3871 break; 3872 ahc_outb(ahc, SCBPTR, 0); 3873 if (ahc_inb(ahc, SCB_CONTROL) != 0) 3874 break; 3875 } 3876 3877 return (i); 3878 } 3879 3880 /* 3881 * Start the board, ready for normal operation 3882 */ 3883 int 3884 ahc_init(struct ahc_softc *ahc) 3885 { 3886 int max_targ = 15; 3887 int i; 3888 int term; 3889 u_int scsi_conf; 3890 u_int scsiseq_template; 3891 u_int ultraenb; 3892 u_int discenable; 3893 u_int tagenable; 3894 size_t driver_data_size; 3895 u_int32_t physaddr; 3896 3897 #ifdef AHC_PRINT_SRAM 3898 printf("Scratch Ram:"); 3899 for (i = 0x20; i < 0x5f; i++) { 3900 if (((i % 8) == 0) && (i != 0)) { 3901 printf ("\n "); 3902 } 3903 printf (" 0x%x", ahc_inb(ahc, i)); 3904 } 3905 if ((ahc->features & AHC_MORE_SRAM) != 0) { 3906 for (i = 0x70; i < 0x7f; i++) { 3907 if (((i % 8) == 0) && (i != 0)) { 3908 printf ("\n "); 3909 } 3910 printf (" 0x%x", ahc_inb(ahc, i)); 3911 } 3912 } 3913 printf ("\n"); 3914 #endif 3915 3916 /* 3917 * Assume we have a board at this stage and it has been reset. 3918 */ 3919 if ((ahc->flags & AHC_USEDEFAULTS) != 0) { 3920 ahc->our_id = ahc->our_id_b = 7; 3921 } 3922 3923 /* 3924 * Default to allowing initiator operations. 3925 */ 3926 ahc->flags |= AHC_INITIATORMODE; 3927 3928 /* 3929 * XXX Would be better to use a per device flag, but PCI and EISA 3930 * devices don't have them yet. 3931 */ 3932 if ((AHC_TMODE_ENABLE & (0x01 << ahc->unit)) != 0) { 3933 ahc->flags |= AHC_TARGETMODE; 3934 /* 3935 * Although we have space for both the initiator and 3936 * target roles on ULTRA2 chips, we currently disable 3937 * the initiator role to allow multi-scsi-id target mode 3938 * configurations. We can only respond on the same SCSI 3939 * ID as our initiator role if we allow initiator operation. 3940 * At some point, we should add a configuration knob to 3941 * allow both roles to be loaded. 3942 */ 3943 ahc->flags &= ~AHC_INITIATORMODE; 3944 } 3945 3946 /* DMA tag for mapping buffers into device visible space. */ 3947 if (bus_dma_tag_create(ahc->parent_dmat, /*alignment*/1, /*boundary*/0, 3948 /*lowaddr*/BUS_SPACE_MAXADDR, 3949 /*highaddr*/BUS_SPACE_MAXADDR, 3950 /*filter*/NULL, /*filterarg*/NULL, 3951 /*maxsize*/MAXBSIZE, /*nsegments*/AHC_NSEG, 3952 /*maxsegsz*/AHC_MAXTRANSFER_SIZE, 3953 /*flags*/BUS_DMA_ALLOCNOW, 3954 &ahc->buffer_dmat) != 0) { 3955 return (ENOMEM); 3956 } 3957 3958 ahc->init_level++; 3959 3960 /* 3961 * DMA tag for our command fifos and other data in system memory 3962 * the card's sequencer must be able to access. For initiator 3963 * roles, we need to allocate space for the qinfifo, qoutfifo, 3964 * and untagged_scb arrays each of which are composed of 256 3965 * 1 byte elements. When providing for the target mode role, 3966 * we additionally must provide space for the incoming target 3967 * command fifo. 3968 */ 3969 driver_data_size = 3 * 256 * sizeof(u_int8_t); 3970 if ((ahc->flags & AHC_TARGETMODE) != 0) 3971 driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd); 3972 if (bus_dma_tag_create(ahc->parent_dmat, /*alignment*/1, /*boundary*/0, 3973 /*lowaddr*/BUS_SPACE_MAXADDR, 3974 /*highaddr*/BUS_SPACE_MAXADDR, 3975 /*filter*/NULL, /*filterarg*/NULL, 3976 driver_data_size, 3977 /*nsegments*/1, 3978 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 3979 /*flags*/0, &ahc->shared_data_dmat) != 0) { 3980 return (ENOMEM); 3981 } 3982 3983 ahc->init_level++; 3984 3985 /* Allocation of driver data */ 3986 if (bus_dmamem_alloc(ahc->shared_data_dmat, (void **)&ahc->qoutfifo, 3987 BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) { 3988 return (ENOMEM); 3989 } 3990 3991 ahc->init_level++; 3992 3993 /* And permanently map it in */ 3994 bus_dmamap_load(ahc->shared_data_dmat, ahc->shared_data_dmamap, 3995 ahc->qoutfifo, driver_data_size, 3996 ahcdmamapcb, &ahc->shared_data_busaddr, /*flags*/0); 3997 3998 ahc->init_level++; 3999 4000 /* Allocate SCB data now that buffer_dmat is initialized) */ 4001 if (ahc->scb_data->maxhscbs == 0) 4002 if (ahcinitscbdata(ahc) != 0) 4003 return (ENOMEM); 4004 4005 ahc->qinfifo = &ahc->qoutfifo[256]; 4006 ahc->untagged_scbs = &ahc->qinfifo[256]; 4007 /* There are no untagged SCBs active yet. */ 4008 for (i = 0; i < 256; i++) 4009 ahc->untagged_scbs[i] = SCB_LIST_NULL; 4010 4011 /* All of our queues are empty */ 4012 for (i = 0; i < 256; i++) 4013 ahc->qoutfifo[i] = SCB_LIST_NULL; 4014 4015 if ((ahc->flags & AHC_TARGETMODE) != 0) { 4016 ahc->targetcmds = (struct target_cmd *)&ahc->untagged_scbs[256]; 4017 4018 /* All target command blocks start out invalid. */ 4019 for (i = 0; i < AHC_TMODE_CMDS; i++) 4020 ahc->targetcmds[i].cmd_valid = 0; 4021 ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1); 4022 ahc_outb(ahc, TQINPOS, 0); 4023 } 4024 4025 /* 4026 * Allocate a tstate to house information for our 4027 * initiator presence on the bus as well as the user 4028 * data for any target mode initiator. 4029 */ 4030 if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) { 4031 printf("%s: unable to allocate tmode_tstate. " 4032 "Failing attach\n", ahc_name(ahc)); 4033 return (-1); 4034 } 4035 4036 if ((ahc->features & AHC_TWIN) != 0) { 4037 if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) { 4038 printf("%s: unable to allocate tmode_tstate. " 4039 "Failing attach\n", ahc_name(ahc)); 4040 return (-1); 4041 } 4042 printf("Twin Channel, A SCSI Id=%d, B SCSI Id=%d, primary %c, ", 4043 ahc->our_id, ahc->our_id_b, 4044 ahc->flags & AHC_CHANNEL_B_PRIMARY? 'B': 'A'); 4045 } else { 4046 if ((ahc->features & AHC_WIDE) != 0) { 4047 printf("Wide "); 4048 } else { 4049 printf("Single "); 4050 } 4051 printf("Channel %c, SCSI Id=%d, ", ahc->channel, ahc->our_id); 4052 } 4053 4054 ahc_outb(ahc, SEQ_FLAGS, 0); 4055 4056 if (ahc->scb_data->maxhscbs < AHC_SCB_MAX) { 4057 ahc->flags |= AHC_PAGESCBS; 4058 printf("%d/%d SCBs\n", ahc->scb_data->maxhscbs, AHC_SCB_MAX); 4059 } else { 4060 ahc->flags &= ~AHC_PAGESCBS; 4061 printf("%d SCBs\n", ahc->scb_data->maxhscbs); 4062 } 4063 4064 #ifdef AHC_DEBUG 4065 if (ahc_debug & AHC_SHOWMISC) { 4066 printf("%s: hardware scb %d bytes; kernel scb %d bytes; " 4067 "ahc_dma %d bytes\n", 4068 ahc_name(ahc), 4069 sizeof(struct hardware_scb), 4070 sizeof(struct scb), 4071 sizeof(struct ahc_dma_seg)); 4072 } 4073 #endif /* AHC_DEBUG */ 4074 4075 /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/ 4076 if (ahc->features & AHC_TWIN) { 4077 4078 /* 4079 * The device is gated to channel B after a chip reset, 4080 * so set those values first 4081 */ 4082 term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0; 4083 if ((ahc->features & AHC_ULTRA2) != 0) 4084 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id_b); 4085 else 4086 ahc_outb(ahc, SCSIID, ahc->our_id_b); 4087 scsi_conf = ahc_inb(ahc, SCSICONF + 1); 4088 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 4089 |term|ENSTIMER|ACTNEGEN); 4090 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 4091 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 4092 4093 if ((scsi_conf & RESET_SCSI) != 0 4094 && (ahc->flags & AHC_INITIATORMODE) != 0) 4095 ahc->flags |= AHC_RESET_BUS_B; 4096 4097 /* Select Channel A */ 4098 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 4099 } 4100 term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0; 4101 if ((ahc->features & AHC_ULTRA2) != 0) 4102 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); 4103 else 4104 ahc_outb(ahc, SCSIID, ahc->our_id); 4105 scsi_conf = ahc_inb(ahc, SCSICONF); 4106 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 4107 |term 4108 |ENSTIMER|ACTNEGEN); 4109 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 4110 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 4111 4112 if ((scsi_conf & RESET_SCSI) != 0 4113 && (ahc->flags & AHC_INITIATORMODE) != 0) 4114 ahc->flags |= AHC_RESET_BUS_A; 4115 4116 /* 4117 * Look at the information that board initialization or 4118 * the board bios has left us. In the lower four bits of each 4119 * target's scratch space any value other than 0 indicates 4120 * that we should initiate synchronous transfers. If it's zero, 4121 * the user or the BIOS has decided to disable synchronous 4122 * negotiation to that target so we don't activate the needsdtr 4123 * flag. 4124 */ 4125 ultraenb = 0; 4126 tagenable = ALL_TARGETS_MASK; 4127 4128 /* Grab the disconnection disable table and invert it for our needs */ 4129 if (ahc->flags & AHC_USEDEFAULTS) { 4130 printf("%s: Host Adapter Bios disabled. Using default SCSI " 4131 "device parameters\n", ahc_name(ahc)); 4132 ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B| 4133 AHC_TERM_ENB_A|AHC_TERM_ENB_B; 4134 discenable = ALL_TARGETS_MASK; 4135 if ((ahc->features & AHC_ULTRA) != 0) 4136 ultraenb = ALL_TARGETS_MASK; 4137 } else { 4138 discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8) 4139 | ahc_inb(ahc, DISC_DSB)); 4140 if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0) 4141 ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8) 4142 | ahc_inb(ahc, ULTRA_ENB); 4143 } 4144 4145 if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) 4146 max_targ = 7; 4147 4148 for (i = 0; i <= max_targ; i++) { 4149 struct ahc_initiator_tinfo *tinfo; 4150 struct tmode_tstate *tstate; 4151 u_int our_id; 4152 u_int target_id; 4153 char channel; 4154 4155 channel = 'A'; 4156 our_id = ahc->our_id; 4157 target_id = i; 4158 if (i > 7 && (ahc->features & AHC_TWIN) != 0) { 4159 channel = 'B'; 4160 our_id = ahc->our_id_b; 4161 target_id = i % 8; 4162 } 4163 tinfo = ahc_fetch_transinfo(ahc, channel, our_id, 4164 target_id, &tstate); 4165 /* Default to async narrow across the board */ 4166 bzero(tinfo, sizeof(*tinfo)); 4167 if (ahc->flags & AHC_USEDEFAULTS) { 4168 if ((ahc->features & AHC_WIDE) != 0) 4169 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 4170 4171 /* 4172 * These will be truncated when we determine the 4173 * connection type we have with the target. 4174 */ 4175 tinfo->user.period = ahc_syncrates->period; 4176 tinfo->user.offset = ~0; 4177 } else { 4178 u_int scsirate; 4179 u_int16_t mask; 4180 4181 /* Take the settings leftover in scratch RAM. */ 4182 scsirate = ahc_inb(ahc, TARG_SCSIRATE + i); 4183 mask = (0x01 << i); 4184 if ((ahc->features & AHC_ULTRA2) != 0) { 4185 u_int offset; 4186 4187 if ((scsirate & SOFS) == 0x0F) { 4188 /* 4189 * Haven't negotiated yet, 4190 * so the format is different. 4191 */ 4192 scsirate = (scsirate & SXFR) >> 4 4193 | (ultraenb & mask) 4194 ? 0x18 : 0x10 4195 | (scsirate & WIDEXFER); 4196 offset = MAX_OFFSET_ULTRA2; 4197 } else 4198 offset = ahc_inb(ahc, TARG_OFFSET + i); 4199 tinfo->user.period = 4200 ahc_find_period(ahc, scsirate, 4201 AHC_SYNCRATE_ULTRA2); 4202 if (offset == 0) 4203 tinfo->user.period = 0; 4204 else 4205 tinfo->user.offset = ~0; 4206 } else if ((scsirate & SOFS) != 0) { 4207 tinfo->user.period = 4208 ahc_find_period(ahc, scsirate, 4209 (ultraenb & mask) 4210 ? AHC_SYNCRATE_ULTRA 4211 : AHC_SYNCRATE_FAST); 4212 if (tinfo->user.period != 0) 4213 tinfo->user.offset = ~0; 4214 } 4215 if ((scsirate & WIDEXFER) != 0 4216 && (ahc->features & AHC_WIDE) != 0) 4217 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 4218 } 4219 tstate->ultraenb = ultraenb; 4220 tstate->discenable = discenable; 4221 tstate->tagenable = 0; /* Wait until the XPT says its okay */ 4222 } 4223 ahc->user_discenable = discenable; 4224 ahc->user_tagenable = tagenable; 4225 4226 /* 4227 * Tell the sequencer where it can find the our arrays in memory. 4228 */ 4229 physaddr = ahc->scb_data->hscb_busaddr; 4230 ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF); 4231 ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF); 4232 ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF); 4233 ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF); 4234 4235 physaddr = ahc->shared_data_busaddr; 4236 ahc_outb(ahc, SCBID_ADDR, physaddr & 0xFF); 4237 ahc_outb(ahc, SCBID_ADDR + 1, (physaddr >> 8) & 0xFF); 4238 ahc_outb(ahc, SCBID_ADDR + 2, (physaddr >> 16) & 0xFF); 4239 ahc_outb(ahc, SCBID_ADDR + 3, (physaddr >> 24) & 0xFF); 4240 4241 /* Target mode incomding command fifo */ 4242 physaddr += 3 * 256 * sizeof(u_int8_t); 4243 ahc_outb(ahc, TMODE_CMDADDR, physaddr & 0xFF); 4244 ahc_outb(ahc, TMODE_CMDADDR + 1, (physaddr >> 8) & 0xFF); 4245 ahc_outb(ahc, TMODE_CMDADDR + 2, (physaddr >> 16) & 0xFF); 4246 ahc_outb(ahc, TMODE_CMDADDR + 3, (physaddr >> 24) & 0xFF); 4247 4248 /* 4249 * Initialize the group code to command length table. 4250 * This overrides the values in TARG_SCSIRATE, so only 4251 * setup the table after we have processed that information. 4252 */ 4253 ahc_outb(ahc, CMDSIZE_TABLE, 5); 4254 ahc_outb(ahc, CMDSIZE_TABLE + 1, 9); 4255 ahc_outb(ahc, CMDSIZE_TABLE + 2, 9); 4256 ahc_outb(ahc, CMDSIZE_TABLE + 3, 0); 4257 ahc_outb(ahc, CMDSIZE_TABLE + 4, 15); 4258 ahc_outb(ahc, CMDSIZE_TABLE + 5, 11); 4259 ahc_outb(ahc, CMDSIZE_TABLE + 6, 0); 4260 ahc_outb(ahc, CMDSIZE_TABLE + 7, 0); 4261 4262 /* Tell the sequencer of our initial queue positions */ 4263 ahc_outb(ahc, KERNEL_QINPOS, 0); 4264 ahc_outb(ahc, QINPOS, 0); 4265 ahc_outb(ahc, QOUTPOS, 0); 4266 4267 #ifdef AHC_DEBUG 4268 if (ahc_debug & AHC_SHOWMISC) 4269 printf("NEEDSDTR == 0x%x\nNEEDWDTR == 0x%x\n" 4270 "DISCENABLE == 0x%x\nULTRAENB == 0x%x\n", 4271 ahc->needsdtr_orig, ahc->needwdtr_orig, 4272 discenable, ultraenb); 4273 #endif 4274 4275 /* Don't have any special messages to send to targets */ 4276 ahc_outb(ahc, TARGET_MSG_REQUEST, 0); 4277 ahc_outb(ahc, TARGET_MSG_REQUEST + 1, 0); 4278 4279 /* 4280 * Use the built in queue management registers 4281 * if they are available. 4282 */ 4283 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 4284 ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256); 4285 ahc_outb(ahc, SDSCB_QOFF, 0); 4286 ahc_outb(ahc, SNSCB_QOFF, 0); 4287 ahc_outb(ahc, HNSCB_QOFF, 0); 4288 } 4289 4290 4291 /* We don't have any waiting selections */ 4292 ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL); 4293 4294 /* Our disconnection list is empty too */ 4295 ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL); 4296 4297 /* Message out buffer starts empty */ 4298 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 4299 4300 /* 4301 * Setup the allowed SCSI Sequences based on operational mode. 4302 * If we are a target, we'll enalbe select in operations once 4303 * we've had a lun enabled. 4304 */ 4305 scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP; 4306 if ((ahc->flags & AHC_INITIATORMODE) != 0) 4307 scsiseq_template |= ENRSELI; 4308 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template); 4309 4310 /* 4311 * Load the Sequencer program and Enable the adapter 4312 * in "fast" mode. 4313 */ 4314 if (bootverbose) 4315 printf("%s: Downloading Sequencer Program...", 4316 ahc_name(ahc)); 4317 4318 ahc_loadseq(ahc); 4319 4320 /* We have to wait until after any system dumps... */ 4321 EVENTHANDLER_REGISTER(shutdown_final, ahc_shutdown, ahc, SHUTDOWN_PRI_DEFAULT); 4322 4323 return (0); 4324 } 4325 4326 static cam_status 4327 ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb, 4328 struct tmode_tstate **tstate, struct tmode_lstate **lstate, 4329 int notfound_failure) 4330 { 4331 int our_id; 4332 4333 /* 4334 * If we are not configured for target mode, someone 4335 * is really confused to be sending this to us. 4336 */ 4337 if ((ahc->flags & AHC_TARGETMODE) == 0) 4338 return (CAM_REQ_INVALID); 4339 4340 /* Range check target and lun */ 4341 4342 /* 4343 * Handle the 'black hole' device that sucks up 4344 * requests to unattached luns on enabled targets. 4345 */ 4346 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD 4347 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 4348 *tstate = NULL; 4349 *lstate = ahc->black_hole; 4350 } else { 4351 u_int max_id; 4352 4353 if (cam_sim_bus(sim) == 0) 4354 our_id = ahc->our_id; 4355 else 4356 our_id = ahc->our_id_b; 4357 4358 max_id = (ahc->features & AHC_WIDE) ? 15 : 7; 4359 if (ccb->ccb_h.target_id > max_id) 4360 return (CAM_TID_INVALID); 4361 4362 if (ccb->ccb_h.target_lun > 7) 4363 return (CAM_LUN_INVALID); 4364 4365 if (ccb->ccb_h.target_id != our_id) { 4366 if ((ahc->features & AHC_MULTI_TID) != 0) { 4367 /* 4368 * Only allow additional targets if 4369 * the initiator role is disabled. 4370 * The hardware cannot handle a re-select-in 4371 * on the initiator id during a re-select-out 4372 * on a different target id. 4373 */ 4374 if ((ahc->flags & AHC_INITIATORMODE) != 0) 4375 return (CAM_TID_INVALID); 4376 } else { 4377 /* 4378 * Only allow our target id to change 4379 * if the initiator role is not configured 4380 * and there are no enabled luns which 4381 * are attached to the currently registered 4382 * scsi id. 4383 */ 4384 if ((ahc->flags & AHC_INITIATORMODE) != 0 4385 || ahc->enabled_luns > 0) 4386 return (CAM_TID_INVALID); 4387 } 4388 } 4389 4390 *tstate = ahc->enabled_targets[ccb->ccb_h.target_id]; 4391 *lstate = NULL; 4392 if (*tstate != NULL) 4393 *lstate = 4394 (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; 4395 } 4396 4397 if (notfound_failure != 0 && *lstate == NULL) 4398 return (CAM_PATH_INVALID); 4399 4400 return (CAM_REQ_CMP); 4401 } 4402 4403 static void 4404 ahc_action(struct cam_sim *sim, union ccb *ccb) 4405 { 4406 struct ahc_softc *ahc; 4407 struct tmode_lstate *lstate; 4408 u_int target_id; 4409 u_int our_id; 4410 int s; 4411 4412 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahc_action\n")); 4413 4414 ahc = (struct ahc_softc *)cam_sim_softc(sim); 4415 4416 target_id = ccb->ccb_h.target_id; 4417 our_id = SIM_SCSI_ID(ahc, sim); 4418 4419 switch (ccb->ccb_h.func_code) { 4420 /* Common cases first */ 4421 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ 4422 case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/ 4423 { 4424 struct tmode_tstate *tstate; 4425 cam_status status; 4426 4427 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, 4428 &lstate, TRUE); 4429 4430 if (status != CAM_REQ_CMP) { 4431 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 4432 /* Response from the black hole device */ 4433 tstate = NULL; 4434 lstate = ahc->black_hole; 4435 } else { 4436 ccb->ccb_h.status = status; 4437 xpt_done(ccb); 4438 break; 4439 } 4440 } 4441 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 4442 int s; 4443 4444 s = splcam(); 4445 SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h, 4446 sim_links.sle); 4447 ccb->ccb_h.status = CAM_REQ_INPROG; 4448 if ((ahc->flags & AHC_TQINFIFO_BLOCKED) != 0) 4449 ahc_run_tqinfifo(ahc); 4450 splx(s); 4451 break; 4452 } 4453 4454 /* 4455 * The target_id represents the target we attempt to 4456 * select. In target mode, this is the initiator of 4457 * the original command. 4458 */ 4459 our_id = target_id; 4460 target_id = ccb->csio.init_id; 4461 /* FALLTHROUGH */ 4462 } 4463 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 4464 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 4465 { 4466 struct scb *scb; 4467 struct hardware_scb *hscb; 4468 struct ahc_initiator_tinfo *tinfo; 4469 struct tmode_tstate *tstate; 4470 u_int16_t mask; 4471 4472 /* 4473 * get an scb to use. 4474 */ 4475 if ((scb = ahcgetscb(ahc)) == NULL) { 4476 int s; 4477 4478 s = splcam(); 4479 ahc->flags |= AHC_RESOURCE_SHORTAGE; 4480 splx(s); 4481 xpt_freeze_simq(ahc->sim, /*count*/1); 4482 ahcsetccbstatus(ccb, CAM_REQUEUE_REQ); 4483 xpt_done(ccb); 4484 return; 4485 } 4486 4487 hscb = scb->hscb; 4488 4489 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE, 4490 ("start scb(%p)\n", scb)); 4491 scb->ccb = ccb; 4492 /* 4493 * So we can find the SCB when an abort is requested 4494 */ 4495 ccb->ccb_h.ccb_scb_ptr = scb; 4496 ccb->ccb_h.ccb_ahc_ptr = ahc; 4497 4498 /* 4499 * Put all the arguments for the xfer in the scb 4500 */ 4501 hscb->tcl = ((target_id << 4) & 0xF0) 4502 | (SIM_IS_SCSIBUS_B(ahc, sim) ? SELBUSB : 0) 4503 | (ccb->ccb_h.target_lun & 0x07); 4504 4505 mask = SCB_TARGET_MASK(scb); 4506 tinfo = ahc_fetch_transinfo(ahc, SIM_CHANNEL(ahc, sim), our_id, 4507 target_id, &tstate); 4508 4509 hscb->scsirate = tinfo->scsirate; 4510 hscb->scsioffset = tinfo->current.offset; 4511 if ((tstate->ultraenb & mask) != 0) 4512 hscb->control |= ULTRAENB; 4513 4514 if ((tstate->discenable & mask) != 0 4515 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0) 4516 hscb->control |= DISCENB; 4517 4518 if (ccb->ccb_h.func_code == XPT_RESET_DEV) { 4519 hscb->cmdpointer = NULL; 4520 scb->flags |= SCB_DEVICE_RESET; 4521 hscb->control |= MK_MESSAGE; 4522 ahc_execute_scb(scb, NULL, 0, 0); 4523 } else { 4524 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 4525 if (ahc->pending_device == lstate) { 4526 scb->flags |= SCB_TARGET_IMMEDIATE; 4527 ahc->pending_device = NULL; 4528 } 4529 hscb->control |= TARGET_SCB; 4530 hscb->cmdpointer = IDENTIFY_SEEN; 4531 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 4532 hscb->cmdpointer |= SPHASE_PENDING; 4533 hscb->status = ccb->csio.scsi_status; 4534 } 4535 4536 /* Overloaded with tag ID */ 4537 hscb->cmdlen = ccb->csio.tag_id; 4538 /* 4539 * Overloaded with the value to place 4540 * in SCSIID for reselection. 4541 */ 4542 hscb->cmdpointer |= 4543 (our_id|(hscb->tcl & 0xF0)) << 16; 4544 } 4545 if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) 4546 hscb->control |= ccb->csio.tag_action; 4547 4548 ahc_setup_data(ahc, &ccb->csio, scb); 4549 } 4550 break; 4551 } 4552 case XPT_NOTIFY_ACK: 4553 case XPT_IMMED_NOTIFY: 4554 { 4555 struct tmode_tstate *tstate; 4556 struct tmode_lstate *lstate; 4557 cam_status status; 4558 4559 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, 4560 &lstate, TRUE); 4561 4562 if (status != CAM_REQ_CMP) { 4563 ccb->ccb_h.status = status; 4564 xpt_done(ccb); 4565 break; 4566 } 4567 SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h, 4568 sim_links.sle); 4569 ccb->ccb_h.status = CAM_REQ_INPROG; 4570 ahc_send_lstate_events(ahc, lstate); 4571 break; 4572 } 4573 case XPT_EN_LUN: /* Enable LUN as a target */ 4574 ahc_handle_en_lun(ahc, sim, ccb); 4575 xpt_done(ccb); 4576 break; 4577 case XPT_ABORT: /* Abort the specified CCB */ 4578 { 4579 ahc_abort_ccb(ahc, sim, ccb); 4580 break; 4581 } 4582 case XPT_SET_TRAN_SETTINGS: 4583 { 4584 struct ahc_devinfo devinfo; 4585 struct ccb_trans_settings *cts; 4586 struct ahc_initiator_tinfo *tinfo; 4587 struct tmode_tstate *tstate; 4588 u_int16_t *discenable; 4589 u_int16_t *tagenable; 4590 u_int update_type; 4591 int s; 4592 4593 cts = &ccb->cts; 4594 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim), 4595 cts->ccb_h.target_id, 4596 cts->ccb_h.target_lun, 4597 SIM_CHANNEL(ahc, sim), 4598 ROLE_UNKNOWN); 4599 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 4600 devinfo.our_scsiid, 4601 devinfo.target, &tstate); 4602 update_type = 0; 4603 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { 4604 update_type |= AHC_TRANS_GOAL; 4605 discenable = &tstate->discenable; 4606 tagenable = &tstate->tagenable; 4607 } else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { 4608 update_type |= AHC_TRANS_USER; 4609 discenable = &ahc->user_discenable; 4610 tagenable = &ahc->user_tagenable; 4611 } else { 4612 ccb->ccb_h.status = CAM_REQ_INVALID; 4613 xpt_done(ccb); 4614 break; 4615 } 4616 4617 s = splcam(); 4618 4619 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 4620 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) 4621 *discenable |= devinfo.target_mask; 4622 else 4623 *discenable &= ~devinfo.target_mask; 4624 } 4625 4626 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 4627 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) 4628 *tagenable |= devinfo.target_mask; 4629 else 4630 *tagenable &= ~devinfo.target_mask; 4631 } 4632 4633 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { 4634 switch (cts->bus_width) { 4635 case MSG_EXT_WDTR_BUS_16_BIT: 4636 if ((ahc->features & AHC_WIDE) != 0) 4637 break; 4638 /* FALLTHROUGH to 8bit */ 4639 case MSG_EXT_WDTR_BUS_32_BIT: 4640 case MSG_EXT_WDTR_BUS_8_BIT: 4641 default: 4642 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 4643 break; 4644 } 4645 ahc_set_width(ahc, &devinfo, cts->ccb_h.path, 4646 cts->bus_width, update_type, 4647 /*paused*/FALSE); 4648 } 4649 4650 if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) 4651 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) { 4652 struct ahc_syncrate *syncrate; 4653 u_int maxsync; 4654 4655 if ((ahc->features & AHC_ULTRA2) != 0) 4656 maxsync = AHC_SYNCRATE_ULTRA2; 4657 else if ((ahc->features & AHC_ULTRA) != 0) 4658 maxsync = AHC_SYNCRATE_ULTRA; 4659 else 4660 maxsync = AHC_SYNCRATE_FAST; 4661 4662 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) { 4663 if (update_type & AHC_TRANS_USER) 4664 cts->sync_offset = tinfo->user.offset; 4665 else 4666 cts->sync_offset = tinfo->goal.offset; 4667 } 4668 4669 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) { 4670 if (update_type & AHC_TRANS_USER) 4671 cts->sync_period = tinfo->user.period; 4672 else 4673 cts->sync_period = tinfo->goal.period; 4674 } 4675 4676 syncrate = ahc_find_syncrate(ahc, &cts->sync_period, 4677 maxsync); 4678 ahc_validate_offset(ahc, syncrate, &cts->sync_offset, 4679 MSG_EXT_WDTR_BUS_8_BIT); 4680 4681 /* We use a period of 0 to represent async */ 4682 if (cts->sync_offset == 0) 4683 cts->sync_period = 0; 4684 4685 ahc_set_syncrate(ahc, &devinfo, cts->ccb_h.path, 4686 syncrate, cts->sync_period, 4687 cts->sync_offset, update_type, 4688 /*paused*/FALSE); 4689 } 4690 splx(s); 4691 ccb->ccb_h.status = CAM_REQ_CMP; 4692 xpt_done(ccb); 4693 break; 4694 } 4695 case XPT_GET_TRAN_SETTINGS: 4696 /* Get default/user set transfer settings for the target */ 4697 { 4698 struct ahc_devinfo devinfo; 4699 struct ccb_trans_settings *cts; 4700 struct ahc_initiator_tinfo *targ_info; 4701 struct tmode_tstate *tstate; 4702 struct ahc_transinfo *tinfo; 4703 int s; 4704 4705 cts = &ccb->cts; 4706 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim), 4707 cts->ccb_h.target_id, 4708 cts->ccb_h.target_lun, 4709 SIM_CHANNEL(ahc, sim), 4710 ROLE_UNKNOWN); 4711 targ_info = ahc_fetch_transinfo(ahc, devinfo.channel, 4712 devinfo.our_scsiid, 4713 devinfo.target, &tstate); 4714 4715 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) 4716 tinfo = &targ_info->current; 4717 else 4718 tinfo = &targ_info->user; 4719 4720 s = splcam(); 4721 4722 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 4723 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { 4724 if ((ahc->user_discenable & devinfo.target_mask) != 0) 4725 cts->flags |= CCB_TRANS_DISC_ENB; 4726 4727 if ((ahc->user_tagenable & devinfo.target_mask) != 0) 4728 cts->flags |= CCB_TRANS_TAG_ENB; 4729 } else { 4730 if ((tstate->discenable & devinfo.target_mask) != 0) 4731 cts->flags |= CCB_TRANS_DISC_ENB; 4732 4733 if ((tstate->tagenable & devinfo.target_mask) != 0) 4734 cts->flags |= CCB_TRANS_TAG_ENB; 4735 } 4736 4737 cts->sync_period = tinfo->period; 4738 cts->sync_offset = tinfo->offset; 4739 cts->bus_width = tinfo->width; 4740 4741 splx(s); 4742 4743 cts->valid = CCB_TRANS_SYNC_RATE_VALID 4744 | CCB_TRANS_SYNC_OFFSET_VALID 4745 | CCB_TRANS_BUS_WIDTH_VALID 4746 | CCB_TRANS_DISC_VALID 4747 | CCB_TRANS_TQ_VALID; 4748 4749 ccb->ccb_h.status = CAM_REQ_CMP; 4750 xpt_done(ccb); 4751 break; 4752 } 4753 case XPT_CALC_GEOMETRY: 4754 { 4755 struct ccb_calc_geometry *ccg; 4756 u_int32_t size_mb; 4757 u_int32_t secs_per_cylinder; 4758 int extended; 4759 4760 ccg = &ccb->ccg; 4761 size_mb = ccg->volume_size 4762 / ((1024L * 1024L) / ccg->block_size); 4763 extended = SIM_IS_SCSIBUS_B(ahc, sim) 4764 ? ahc->flags & AHC_EXTENDED_TRANS_B 4765 : ahc->flags & AHC_EXTENDED_TRANS_A; 4766 4767 if (size_mb > 1024 && extended) { 4768 ccg->heads = 255; 4769 ccg->secs_per_track = 63; 4770 } else { 4771 ccg->heads = 64; 4772 ccg->secs_per_track = 32; 4773 } 4774 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 4775 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 4776 ccb->ccb_h.status = CAM_REQ_CMP; 4777 xpt_done(ccb); 4778 break; 4779 } 4780 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 4781 { 4782 int found; 4783 4784 s = splcam(); 4785 found = ahc_reset_channel(ahc, SIM_CHANNEL(ahc, sim), 4786 /*initiate reset*/TRUE); 4787 splx(s); 4788 if (bootverbose) { 4789 xpt_print_path(SIM_PATH(ahc, sim)); 4790 printf("SCSI bus reset delivered. " 4791 "%d SCBs aborted.\n", found); 4792 } 4793 ccb->ccb_h.status = CAM_REQ_CMP; 4794 xpt_done(ccb); 4795 break; 4796 } 4797 case XPT_TERM_IO: /* Terminate the I/O process */ 4798 /* XXX Implement */ 4799 ccb->ccb_h.status = CAM_REQ_INVALID; 4800 xpt_done(ccb); 4801 break; 4802 case XPT_PATH_INQ: /* Path routing inquiry */ 4803 { 4804 struct ccb_pathinq *cpi = &ccb->cpi; 4805 4806 cpi->version_num = 1; /* XXX??? */ 4807 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; 4808 if ((ahc->features & AHC_WIDE) != 0) 4809 cpi->hba_inquiry |= PI_WIDE_16; 4810 if ((ahc->flags & AHC_TARGETMODE) != 0) { 4811 cpi->target_sprt = PIT_PROCESSOR 4812 | PIT_DISCONNECT 4813 | PIT_TERM_IO; 4814 } else { 4815 cpi->target_sprt = 0; 4816 } 4817 cpi->hba_misc = (ahc->flags & AHC_INITIATORMODE) 4818 ? 0 : PIM_NOINITIATOR; 4819 cpi->hba_eng_cnt = 0; 4820 cpi->max_target = (ahc->features & AHC_WIDE) ? 15 : 7; 4821 cpi->max_lun = 7; 4822 if (SIM_IS_SCSIBUS_B(ahc, sim)) { 4823 cpi->initiator_id = ahc->our_id_b; 4824 if ((ahc->flags & AHC_RESET_BUS_B) == 0) 4825 cpi->hba_misc |= PIM_NOBUSRESET; 4826 } else { 4827 cpi->initiator_id = ahc->our_id; 4828 if ((ahc->flags & AHC_RESET_BUS_A) == 0) 4829 cpi->hba_misc |= PIM_NOBUSRESET; 4830 } 4831 cpi->bus_id = cam_sim_bus(sim); 4832 cpi->base_transfer_speed = 3300; 4833 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 4834 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); 4835 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 4836 cpi->unit_number = cam_sim_unit(sim); 4837 cpi->ccb_h.status = CAM_REQ_CMP; 4838 xpt_done(ccb); 4839 break; 4840 } 4841 default: 4842 ccb->ccb_h.status = CAM_REQ_INVALID; 4843 xpt_done(ccb); 4844 break; 4845 } 4846 } 4847 4848 static void 4849 ahc_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) 4850 { 4851 struct ahc_softc *ahc; 4852 struct cam_sim *sim; 4853 4854 sim = (struct cam_sim *)callback_arg; 4855 ahc = (struct ahc_softc *)cam_sim_softc(sim); 4856 switch (code) { 4857 case AC_LOST_DEVICE: 4858 { 4859 struct ahc_devinfo devinfo; 4860 int s; 4861 4862 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim), 4863 xpt_path_target_id(path), 4864 xpt_path_lun_id(path), 4865 SIM_CHANNEL(ahc, sim), 4866 ROLE_UNKNOWN); 4867 4868 /* 4869 * Revert to async/narrow transfers 4870 * for the next device. 4871 */ 4872 s = splcam(); 4873 ahc_set_width(ahc, &devinfo, path, MSG_EXT_WDTR_BUS_8_BIT, 4874 AHC_TRANS_GOAL|AHC_TRANS_CUR, 4875 /*paused*/FALSE); 4876 ahc_set_syncrate(ahc, &devinfo, path, /*syncrate*/NULL, 4877 /*period*/0, /*offset*/0, 4878 AHC_TRANS_GOAL|AHC_TRANS_CUR, 4879 /*paused*/FALSE); 4880 splx(s); 4881 break; 4882 } 4883 default: 4884 break; 4885 } 4886 } 4887 4888 static void 4889 ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments, 4890 int error) 4891 { 4892 struct scb *scb; 4893 union ccb *ccb; 4894 struct ahc_softc *ahc; 4895 int s; 4896 4897 scb = (struct scb *)arg; 4898 ccb = scb->ccb; 4899 ahc = (struct ahc_softc *)ccb->ccb_h.ccb_ahc_ptr; 4900 4901 if (nsegments != 0) { 4902 struct ahc_dma_seg *sg; 4903 bus_dma_segment_t *end_seg; 4904 bus_dmasync_op_t op; 4905 4906 end_seg = dm_segs + nsegments; 4907 4908 /* Copy the first SG into the data pointer area */ 4909 scb->hscb->SG_pointer = scb->sg_list_phys; 4910 scb->hscb->data = dm_segs->ds_addr; 4911 scb->hscb->datalen = dm_segs->ds_len; 4912 dm_segs++; 4913 4914 /* Copy the remaining segments into our SG list */ 4915 sg = scb->sg_list; 4916 while (dm_segs < end_seg) { 4917 sg->addr = dm_segs->ds_addr; 4918 sg->len = dm_segs->ds_len; 4919 sg++; 4920 dm_segs++; 4921 } 4922 4923 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 4924 op = BUS_DMASYNC_PREREAD; 4925 else 4926 op = BUS_DMASYNC_PREWRITE; 4927 4928 bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op); 4929 4930 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 4931 scb->hscb->cmdpointer |= DPHASE_PENDING; 4932 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 4933 scb->hscb->cmdpointer |= (TARGET_DATA_IN << 8); 4934 } 4935 } else { 4936 scb->hscb->SG_pointer = 0; 4937 scb->hscb->data = 0; 4938 scb->hscb->datalen = 0; 4939 } 4940 4941 scb->sg_count = scb->hscb->SG_count = nsegments; 4942 4943 s = splcam(); 4944 4945 /* 4946 * Last time we need to check if this SCB needs to 4947 * be aborted. 4948 */ 4949 if (ahc_ccb_status(ccb) != CAM_REQ_INPROG) { 4950 if (nsegments != 0) 4951 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap); 4952 ahcfreescb(ahc, scb); 4953 xpt_done(ccb); 4954 splx(s); 4955 return; 4956 } 4957 4958 /* Busy this tcl if we are untagged */ 4959 if ((scb->hscb->control & TAG_ENB) == 0) 4960 ahc_busy_tcl(ahc, scb); 4961 4962 LIST_INSERT_HEAD(&ahc->pending_ccbs, &ccb->ccb_h, 4963 sim_links.le); 4964 4965 scb->flags |= SCB_ACTIVE; 4966 ccb->ccb_h.status |= CAM_SIM_QUEUED; 4967 4968 ccb->ccb_h.timeout_ch = 4969 timeout(ahc_timeout, (caddr_t)scb, 4970 (ccb->ccb_h.timeout * hz) / 1000); 4971 4972 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) { 4973 #if 0 4974 printf("Continueing Immediate Command %d:%d\n", 4975 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 4976 #endif 4977 pause_sequencer(ahc); 4978 if ((ahc->flags & AHC_PAGESCBS) == 0) 4979 ahc_outb(ahc, SCBPTR, scb->hscb->tag); 4980 ahc_outb(ahc, SCB_TAG, scb->hscb->tag); 4981 ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP); 4982 unpause_sequencer(ahc, /*unpause_always*/FALSE); 4983 } else { 4984 4985 ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; 4986 4987 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 4988 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 4989 } else { 4990 pause_sequencer(ahc); 4991 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 4992 unpause_sequencer(ahc, /*unpause_always*/FALSE); 4993 } 4994 } 4995 4996 splx(s); 4997 } 4998 4999 static void 5000 ahc_poll(struct cam_sim *sim) 5001 { 5002 ahc_intr(cam_sim_softc(sim)); 5003 } 5004 5005 static void 5006 ahc_setup_data(struct ahc_softc *ahc, struct ccb_scsiio *csio, 5007 struct scb *scb) 5008 { 5009 struct hardware_scb *hscb; 5010 struct ccb_hdr *ccb_h; 5011 5012 hscb = scb->hscb; 5013 ccb_h = &csio->ccb_h; 5014 5015 if (ccb_h->func_code == XPT_SCSI_IO) { 5016 hscb->cmdlen = csio->cdb_len; 5017 if ((ccb_h->flags & CAM_CDB_POINTER) != 0) { 5018 if ((ccb_h->flags & CAM_CDB_PHYS) == 0) 5019 if (hscb->cmdlen <= 16) { 5020 memcpy(hscb->cmdstore, 5021 csio->cdb_io.cdb_ptr, 5022 hscb->cmdlen); 5023 hscb->cmdpointer = 5024 hscb->cmdstore_busaddr; 5025 } else { 5026 ahcsetccbstatus(scb->ccb, 5027 CAM_REQ_INVALID); 5028 xpt_done(scb->ccb); 5029 ahcfreescb(ahc, scb); 5030 return; 5031 } 5032 else 5033 hscb->cmdpointer = 5034 ((intptr_t)csio->cdb_io.cdb_ptr) & 0xffffffff; 5035 } else { 5036 /* 5037 * CCB CDB Data Storage area is only 16 bytes 5038 * so no additional testing is required 5039 */ 5040 memcpy(hscb->cmdstore, csio->cdb_io.cdb_bytes, 5041 hscb->cmdlen); 5042 hscb->cmdpointer = hscb->cmdstore_busaddr; 5043 } 5044 } 5045 5046 /* Only use S/G if there is a transfer */ 5047 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 5048 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) { 5049 /* We've been given a pointer to a single buffer */ 5050 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) { 5051 int s; 5052 int error; 5053 5054 s = splsoftvm(); 5055 error = bus_dmamap_load(ahc->buffer_dmat, 5056 scb->dmamap, 5057 csio->data_ptr, 5058 csio->dxfer_len, 5059 ahc_execute_scb, 5060 scb, /*flags*/0); 5061 if (error == EINPROGRESS) { 5062 /* 5063 * So as to maintain ordering, 5064 * freeze the controller queue 5065 * until our mapping is 5066 * returned. 5067 */ 5068 xpt_freeze_simq(ahc->sim, 5069 /*count*/1); 5070 scb->ccb->ccb_h.status |= 5071 CAM_RELEASE_SIMQ; 5072 } 5073 splx(s); 5074 } else { 5075 struct bus_dma_segment seg; 5076 5077 /* Pointer to physical buffer */ 5078 if (csio->dxfer_len > AHC_MAXTRANSFER_SIZE) 5079 panic("ahc_setup_data - Transfer size " 5080 "larger than can device max"); 5081 5082 seg.ds_addr = (bus_addr_t)csio->data_ptr; 5083 seg.ds_len = csio->dxfer_len; 5084 ahc_execute_scb(scb, &seg, 1, 0); 5085 } 5086 } else { 5087 struct bus_dma_segment *segs; 5088 5089 if ((ccb_h->flags & CAM_DATA_PHYS) != 0) 5090 panic("ahc_setup_data - Physical segment " 5091 "pointers unsupported"); 5092 5093 if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) 5094 panic("ahc_setup_data - Virtual segment " 5095 "addresses unsupported"); 5096 5097 /* Just use the segments provided */ 5098 segs = (struct bus_dma_segment *)csio->data_ptr; 5099 ahc_execute_scb(scb, segs, csio->sglist_cnt, 0); 5100 } 5101 } else { 5102 ahc_execute_scb(scb, NULL, 0, 0); 5103 } 5104 } 5105 5106 static void 5107 ahc_freeze_devq(struct ahc_softc *ahc, struct cam_path *path) 5108 { 5109 int target; 5110 char channel; 5111 int lun; 5112 5113 target = xpt_path_target_id(path); 5114 lun = xpt_path_lun_id(path); 5115 channel = xpt_path_sim(path)->bus_id == 0 ? 'A' : 'B'; 5116 5117 ahc_search_qinfifo(ahc, target, channel, lun, 5118 /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, 5119 CAM_REQUEUE_REQ, SEARCH_COMPLETE); 5120 } 5121 5122 static void 5123 ahcallocscbs(struct ahc_softc *ahc) 5124 { 5125 struct scb_data *scb_data; 5126 struct scb *next_scb; 5127 struct sg_map_node *sg_map; 5128 bus_addr_t physaddr; 5129 struct ahc_dma_seg *segs; 5130 int newcount; 5131 int i; 5132 5133 scb_data = ahc->scb_data; 5134 if (scb_data->numscbs >= AHC_SCB_MAX) 5135 /* Can't allocate any more */ 5136 return; 5137 5138 next_scb = &scb_data->scbarray[scb_data->numscbs]; 5139 5140 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); 5141 5142 if (sg_map == NULL) 5143 return; 5144 5145 /* Allocate S/G space for the next batch of SCBS */ 5146 if (bus_dmamem_alloc(scb_data->sg_dmat, (void **)&sg_map->sg_vaddr, 5147 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { 5148 free(sg_map, M_DEVBUF); 5149 return; 5150 } 5151 5152 SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); 5153 5154 bus_dmamap_load(scb_data->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, 5155 PAGE_SIZE, ahcdmamapcb, &sg_map->sg_physaddr, 5156 /*flags*/0); 5157 5158 segs = sg_map->sg_vaddr; 5159 physaddr = sg_map->sg_physaddr; 5160 5161 newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg))); 5162 for (i = 0; scb_data->numscbs < AHC_SCB_MAX && i < newcount; i++) { 5163 int error; 5164 5165 next_scb->sg_list = segs; 5166 next_scb->sg_list_phys = physaddr; 5167 next_scb->flags = SCB_FREE; 5168 error = bus_dmamap_create(ahc->buffer_dmat, /*flags*/0, 5169 &next_scb->dmamap); 5170 if (error != 0) 5171 break; 5172 next_scb->hscb = &scb_data->hscbs[scb_data->numscbs]; 5173 next_scb->hscb->tag = ahc->scb_data->numscbs; 5174 next_scb->hscb->cmdstore_busaddr = 5175 ahc_hscb_busaddr(ahc, next_scb->hscb->tag) 5176 + offsetof(struct hardware_scb, cmdstore); 5177 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, next_scb, links); 5178 segs += AHC_NSEG; 5179 physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg)); 5180 next_scb++; 5181 ahc->scb_data->numscbs++; 5182 } 5183 } 5184 5185 #ifdef AHC_DUMP_SEQ 5186 static void 5187 ahc_dumpseq(struct ahc_softc* ahc) 5188 { 5189 int i; 5190 int max_prog; 5191 5192 if ((ahc->chip & AHC_BUS_MASK) < AHC_PCI) 5193 max_prog = 448; 5194 else if ((ahc->features & AHC_ULTRA2) != 0) 5195 max_prog = 768; 5196 else 5197 max_prog = 512; 5198 5199 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 5200 ahc_outb(ahc, SEQADDR0, 0); 5201 ahc_outb(ahc, SEQADDR1, 0); 5202 for (i = 0; i < max_prog; i++) { 5203 u_int8_t ins_bytes[4]; 5204 5205 ahc_insb(ahc, SEQRAM, ins_bytes, 4); 5206 printf("0x%2.2x%2.2x%2.2x%2.2x\n", 5207 ins_bytes[0], 5208 ins_bytes[1], 5209 ins_bytes[2], 5210 ins_bytes[3]); 5211 } 5212 } 5213 #endif 5214 5215 static void 5216 ahc_loadseq(struct ahc_softc *ahc) 5217 { 5218 struct patch *cur_patch; 5219 int i; 5220 int downloaded; 5221 int skip_addr; 5222 u_int8_t download_consts[4]; 5223 5224 /* Setup downloadable constant table */ 5225 #if 0 5226 /* No downloaded constants are currently defined. */ 5227 download_consts[TMODE_NUMCMDS] = ahc->num_targetcmds; 5228 #endif 5229 5230 cur_patch = patches; 5231 downloaded = 0; 5232 skip_addr = 0; 5233 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 5234 ahc_outb(ahc, SEQADDR0, 0); 5235 ahc_outb(ahc, SEQADDR1, 0); 5236 5237 for (i = 0; i < sizeof(seqprog)/4; i++) { 5238 if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) { 5239 /* 5240 * Don't download this instruction as it 5241 * is in a patch that was removed. 5242 */ 5243 continue; 5244 } 5245 ahc_download_instr(ahc, i, download_consts); 5246 downloaded++; 5247 } 5248 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE); 5249 restart_sequencer(ahc); 5250 5251 if (bootverbose) 5252 printf(" %d instructions downloaded\n", downloaded); 5253 } 5254 5255 static int 5256 ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch, 5257 int start_instr, int *skip_addr) 5258 { 5259 struct patch *cur_patch; 5260 struct patch *last_patch; 5261 int num_patches; 5262 5263 num_patches = sizeof(patches)/sizeof(struct patch); 5264 last_patch = &patches[num_patches]; 5265 cur_patch = *start_patch; 5266 5267 while (cur_patch < last_patch && start_instr == cur_patch->begin) { 5268 5269 if (cur_patch->patch_func(ahc) == 0) { 5270 5271 /* Start rejecting code */ 5272 *skip_addr = start_instr + cur_patch->skip_instr; 5273 cur_patch += cur_patch->skip_patch; 5274 } else { 5275 /* Accepted this patch. Advance to the next 5276 * one and wait for our intruction pointer to 5277 * hit this point. 5278 */ 5279 cur_patch++; 5280 } 5281 } 5282 5283 *start_patch = cur_patch; 5284 if (start_instr < *skip_addr) 5285 /* Still skipping */ 5286 return (0); 5287 5288 return (1); 5289 } 5290 5291 static void 5292 ahc_download_instr(struct ahc_softc *ahc, int instrptr, u_int8_t *dconsts) 5293 { 5294 union ins_formats instr; 5295 struct ins_format1 *fmt1_ins; 5296 struct ins_format3 *fmt3_ins; 5297 u_int opcode; 5298 5299 /* Structure copy */ 5300 instr = *(union ins_formats*)&seqprog[instrptr * 4]; 5301 5302 fmt1_ins = &instr.format1; 5303 fmt3_ins = NULL; 5304 5305 /* Pull the opcode */ 5306 opcode = instr.format1.opcode; 5307 switch (opcode) { 5308 case AIC_OP_JMP: 5309 case AIC_OP_JC: 5310 case AIC_OP_JNC: 5311 case AIC_OP_CALL: 5312 case AIC_OP_JNE: 5313 case AIC_OP_JNZ: 5314 case AIC_OP_JE: 5315 case AIC_OP_JZ: 5316 { 5317 struct patch *cur_patch; 5318 int address_offset; 5319 u_int address; 5320 int skip_addr; 5321 int i; 5322 5323 fmt3_ins = &instr.format3; 5324 address_offset = 0; 5325 address = fmt3_ins->address; 5326 cur_patch = patches; 5327 skip_addr = 0; 5328 5329 for (i = 0; i < address;) { 5330 5331 ahc_check_patch(ahc, &cur_patch, i, &skip_addr); 5332 5333 if (skip_addr > i) { 5334 int end_addr; 5335 5336 end_addr = MIN(address, skip_addr); 5337 address_offset += end_addr - i; 5338 i = skip_addr; 5339 } else { 5340 i++; 5341 } 5342 } 5343 address -= address_offset; 5344 fmt3_ins->address = address; 5345 /* FALLTHROUGH */ 5346 } 5347 case AIC_OP_OR: 5348 case AIC_OP_AND: 5349 case AIC_OP_XOR: 5350 case AIC_OP_ADD: 5351 case AIC_OP_ADC: 5352 case AIC_OP_BMOV: 5353 if (fmt1_ins->parity != 0) { 5354 fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; 5355 } 5356 fmt1_ins->parity = 0; 5357 /* FALLTHROUGH */ 5358 case AIC_OP_ROL: 5359 if ((ahc->features & AHC_ULTRA2) != 0) { 5360 int i, count; 5361 5362 /* Calculate odd parity for the instruction */ 5363 for (i = 0, count = 0; i < 31; i++) { 5364 u_int32_t mask; 5365 5366 mask = 0x01 << i; 5367 if ((instr.integer & mask) != 0) 5368 count++; 5369 } 5370 if ((count & 0x01) == 0) 5371 instr.format1.parity = 1; 5372 } else { 5373 /* Compress the instruction for older sequencers */ 5374 if (fmt3_ins != NULL) { 5375 instr.integer = 5376 fmt3_ins->immediate 5377 | (fmt3_ins->source << 8) 5378 | (fmt3_ins->address << 16) 5379 | (fmt3_ins->opcode << 25); 5380 } else { 5381 instr.integer = 5382 fmt1_ins->immediate 5383 | (fmt1_ins->source << 8) 5384 | (fmt1_ins->destination << 16) 5385 | (fmt1_ins->ret << 24) 5386 | (fmt1_ins->opcode << 25); 5387 } 5388 } 5389 ahc_outsb(ahc, SEQRAM, instr.bytes, 4); 5390 break; 5391 default: 5392 panic("Unknown opcode encountered in seq program"); 5393 break; 5394 } 5395 } 5396 5397 static void 5398 ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) { 5399 5400 if ((scb->flags & SCB_RECOVERY_SCB) == 0) { 5401 struct ccb_hdr *ccbh; 5402 5403 scb->flags |= SCB_RECOVERY_SCB; 5404 5405 /* 5406 * Take all queued, but not sent SCBs out of the equation. 5407 * Also ensure that no new CCBs are queued to us while we 5408 * try to fix this problem. 5409 */ 5410 if ((scb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { 5411 xpt_freeze_simq(ahc->sim, /*count*/1); 5412 scb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5413 } 5414 5415 /* 5416 * Go through all of our pending SCBs and remove 5417 * any scheduled timeouts for them. We will reschedule 5418 * them after we've successfully fixed this problem. 5419 */ 5420 ccbh = ahc->pending_ccbs.lh_first; 5421 while (ccbh != NULL) { 5422 struct scb *pending_scb; 5423 5424 pending_scb = (struct scb *)ccbh->ccb_scb_ptr; 5425 untimeout(ahc_timeout, pending_scb, ccbh->timeout_ch); 5426 ccbh = ccbh->sim_links.le.le_next; 5427 } 5428 } 5429 } 5430 5431 static void 5432 ahc_timeout(void *arg) 5433 { 5434 struct scb *scb; 5435 struct ahc_softc *ahc; 5436 int s, found; 5437 u_int bus_state; 5438 int target; 5439 int lun; 5440 char channel; 5441 5442 scb = (struct scb *)arg; 5443 ahc = (struct ahc_softc *)scb->ccb->ccb_h.ccb_ahc_ptr; 5444 5445 s = splcam(); 5446 5447 /* 5448 * Ensure that the card doesn't do anything 5449 * behind our back. Also make sure that we 5450 * didn't "just" miss an interrupt that would 5451 * affect this timeout. 5452 */ 5453 do { 5454 ahc_intr(ahc); 5455 pause_sequencer(ahc); 5456 } while (ahc_inb(ahc, INTSTAT) & INT_PEND); 5457 5458 if ((scb->flags & SCB_ACTIVE) == 0) { 5459 /* Previous timeout took care of me already */ 5460 printf("Timedout SCB handled by another timeout\n"); 5461 unpause_sequencer(ahc, /*unpause_always*/TRUE); 5462 splx(s); 5463 return; 5464 } 5465 5466 target = SCB_TARGET(scb); 5467 channel = SCB_CHANNEL(scb); 5468 lun = SCB_LUN(scb); 5469 5470 xpt_print_path(scb->ccb->ccb_h.path); 5471 printf("SCB 0x%x - timed out ", scb->hscb->tag); 5472 /* 5473 * Take a snapshot of the bus state and print out 5474 * some information so we can track down driver bugs. 5475 */ 5476 bus_state = ahc_inb(ahc, LASTPHASE); 5477 5478 switch(bus_state) 5479 { 5480 case P_DATAOUT: 5481 printf("in dataout phase"); 5482 break; 5483 case P_DATAIN: 5484 printf("in datain phase"); 5485 break; 5486 case P_COMMAND: 5487 printf("in command phase"); 5488 break; 5489 case P_MESGOUT: 5490 printf("in message out phase"); 5491 break; 5492 case P_STATUS: 5493 printf("in status phase"); 5494 break; 5495 case P_MESGIN: 5496 printf("in message in phase"); 5497 break; 5498 case P_BUSFREE: 5499 printf("while idle, LASTPHASE == 0x%x", 5500 bus_state); 5501 break; 5502 default: 5503 /* 5504 * We aren't in a valid phase, so assume we're 5505 * idle. 5506 */ 5507 printf("invalid phase, LASTPHASE == 0x%x", 5508 bus_state); 5509 bus_state = P_BUSFREE; 5510 break; 5511 } 5512 5513 printf(", SEQADDR == 0x%x\n", 5514 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); 5515 5516 #if 0 5517 printf(", SCSISIGI == 0x%x\n", ahc_inb(ahc, SCSISIGI)); 5518 printf("SIMODE1 = 0x%x\n", ahc_inb(ahc, SIMODE1)); 5519 printf("INTSTAT = 0x%x\n", ahc_inb(ahc, INTSTAT)); 5520 printf("SSTAT1 == 0x%x\n", ahc_inb(ahc, SSTAT1)); 5521 printf("SCSIRATE == 0x%x\n", ahc_inb(ahc, SCSIRATE)); 5522 printf("CCSCBCTL == 0x%x\n", ahc_inb(ahc, CCSCBCTL)); 5523 printf("CCSCBCNT == 0x%x\n", ahc_inb(ahc, CCSCBCNT)); 5524 printf("DFCNTRL == 0x%x\n", ahc_inb(ahc, DFCNTRL)); 5525 printf("DFSTATUS == 0x%x\n", ahc_inb(ahc, DFSTATUS)); 5526 printf("CCHCNT == 0x%x\n", ahc_inb(ahc, CCHCNT)); 5527 #endif 5528 if (scb->flags & SCB_DEVICE_RESET) { 5529 /* 5530 * Been down this road before. 5531 * Do a full bus reset. 5532 */ 5533 bus_reset: 5534 ahcsetccbstatus(scb->ccb, CAM_CMD_TIMEOUT); 5535 found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE); 5536 printf("%s: Issued Channel %c Bus Reset. " 5537 "%d SCBs aborted\n", ahc_name(ahc), channel, found); 5538 } else { 5539 /* 5540 * If we are a target, transition to bus free and report 5541 * the timeout. 5542 * 5543 * The target/initiator that is holding up the bus may not 5544 * be the same as the one that triggered this timeout 5545 * (different commands have different timeout lengths). 5546 * If the bus is idle and we are actiing as the initiator 5547 * for this request, queue a BDR message to the timed out 5548 * target. Otherwise, if the timed out transaction is 5549 * active: 5550 * Initiator transaction: 5551 * Stuff the message buffer with a BDR message and assert 5552 * ATN in the hopes that the target will let go of the bus 5553 * and go to the mesgout phase. If this fails, we'll 5554 * get another timeout 2 seconds later which will attempt 5555 * a bus reset. 5556 * 5557 * Target transaction: 5558 * Transition to BUS FREE and report the error. 5559 * It's good to be the target! 5560 */ 5561 u_int active_scb_index; 5562 5563 active_scb_index = ahc_inb(ahc, SCB_TAG); 5564 5565 if (bus_state != P_BUSFREE 5566 && (active_scb_index < ahc->scb_data->numscbs)) { 5567 struct scb *active_scb; 5568 5569 /* 5570 * If the active SCB is not from our device, 5571 * assume that another device is hogging the bus 5572 * and wait for it's timeout to expire before 5573 * taking additional action. 5574 */ 5575 active_scb = &ahc->scb_data->scbarray[active_scb_index]; 5576 if (active_scb->hscb->tcl != scb->hscb->tcl 5577 && (scb->flags & SCB_OTHERTCL_TIMEOUT) == 0) { 5578 struct ccb_hdr *ccbh; 5579 u_int newtimeout; 5580 5581 xpt_print_path(scb->ccb->ccb_h.path); 5582 printf("Other SCB Timeout\n"); 5583 scb->flags |= SCB_OTHERTCL_TIMEOUT; 5584 newtimeout = MAX(active_scb->ccb->ccb_h.timeout, 5585 scb->ccb->ccb_h.timeout); 5586 ccbh = &scb->ccb->ccb_h; 5587 scb->ccb->ccb_h.timeout_ch = 5588 timeout(ahc_timeout, scb, 5589 (newtimeout * hz) / 1000); 5590 splx(s); 5591 return; 5592 } 5593 5594 /* It's us */ 5595 if ((scb->hscb->control & TARGET_SCB) != 0) { 5596 5597 /* 5598 * Send back any queued up transactions 5599 * and properly record the error condition. 5600 */ 5601 ahc_freeze_devq(ahc, scb->ccb->ccb_h.path); 5602 ahcsetccbstatus(scb->ccb, CAM_CMD_TIMEOUT); 5603 ahc_freeze_ccb(scb->ccb); 5604 ahc_done(ahc, scb); 5605 5606 /* Will clear us from the bus */ 5607 restart_sequencer(ahc); 5608 return; 5609 } 5610 5611 ahc_set_recoveryscb(ahc, active_scb); 5612 ahc_outb(ahc, MSG_OUT, MSG_BUS_DEV_RESET); 5613 ahc_outb(ahc, SCSISIGO, bus_state|ATNO); 5614 xpt_print_path(active_scb->ccb->ccb_h.path); 5615 printf("BDR message in message buffer\n"); 5616 active_scb->flags |= SCB_DEVICE_RESET; 5617 active_scb->ccb->ccb_h.timeout_ch = 5618 timeout(ahc_timeout, (caddr_t)active_scb, 2 * hz); 5619 unpause_sequencer(ahc, /*unpause_always*/FALSE); 5620 } else { 5621 int disconnected; 5622 5623 /* XXX Shouldn't panic. Just punt instead */ 5624 if ((scb->hscb->control & TARGET_SCB) != 0) 5625 panic("Timed-out target SCB but bus idle"); 5626 5627 if (bus_state != P_BUSFREE 5628 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) { 5629 /* XXX What happened to the SCB? */ 5630 /* Hung target selection. Goto busfree */ 5631 printf("%s: Hung target selection\n", 5632 ahc_name(ahc)); 5633 restart_sequencer(ahc); 5634 return; 5635 } 5636 5637 if (ahc_search_qinfifo(ahc, target, channel, lun, 5638 scb->hscb->tag, ROLE_INITIATOR, 5639 /*status*/0, SEARCH_COUNT) > 0) { 5640 disconnected = FALSE; 5641 } else { 5642 disconnected = TRUE; 5643 } 5644 5645 if (disconnected) { 5646 5647 ahc_set_recoveryscb(ahc, scb); 5648 /* 5649 * Simply set the MK_MESSAGE control bit. 5650 */ 5651 scb->hscb->control |= MK_MESSAGE; 5652 scb->flags |= SCB_QUEUED_MSG 5653 | SCB_DEVICE_RESET; 5654 5655 /* 5656 * Remove this SCB from the disconnected 5657 * list so that a reconnect at this point 5658 * causes a BDR or abort. 5659 */ 5660 ahc_search_disc_list(ahc, target, channel, lun, 5661 scb->hscb->tag); 5662 ahc_index_busy_tcl(ahc, scb->hscb->tcl, 5663 /*unbusy*/TRUE); 5664 5665 /* 5666 * Actually re-queue this SCB in case we can 5667 * select the device before it reconnects. 5668 * Clear out any entries in the QINFIFO first 5669 * so we are the next SCB for this target 5670 * to run. 5671 */ 5672 ahc_search_qinfifo(ahc, SCB_TARGET(scb), 5673 channel, SCB_LUN(scb), 5674 SCB_LIST_NULL, 5675 ROLE_INITIATOR, 5676 CAM_REQUEUE_REQ, 5677 SEARCH_COMPLETE); 5678 xpt_print_path(scb->ccb->ccb_h.path); 5679 printf("Queuing a BDR SCB\n"); 5680 ahc->qinfifo[ahc->qinfifonext++] = 5681 scb->hscb->tag; 5682 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5683 ahc_outb(ahc, HNSCB_QOFF, 5684 ahc->qinfifonext); 5685 } else { 5686 ahc_outb(ahc, KERNEL_QINPOS, 5687 ahc->qinfifonext); 5688 } 5689 scb->ccb->ccb_h.timeout_ch = 5690 timeout(ahc_timeout, (caddr_t)scb, 2 * hz); 5691 unpause_sequencer(ahc, /*unpause_always*/FALSE); 5692 } else { 5693 /* Go "immediatly" to the bus reset */ 5694 /* This shouldn't happen */ 5695 ahc_set_recoveryscb(ahc, scb); 5696 xpt_print_path(scb->ccb->ccb_h.path); 5697 printf("SCB %d: Immediate reset. " 5698 "Flags = 0x%x\n", scb->hscb->tag, 5699 scb->flags); 5700 goto bus_reset; 5701 } 5702 } 5703 } 5704 splx(s); 5705 } 5706 5707 static int 5708 ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel, 5709 int lun, u_int tag, role_t role, u_int32_t status, 5710 ahc_search_action action) 5711 { 5712 struct scb *scbp; 5713 u_int8_t qinpos; 5714 u_int8_t qintail; 5715 int found; 5716 5717 qinpos = ahc_inb(ahc, QINPOS); 5718 qintail = ahc->qinfifonext; 5719 found = 0; 5720 5721 /* 5722 * Start with an empty queue. Entries that are not chosen 5723 * for removal will be re-added to the queue as we go. 5724 */ 5725 ahc->qinfifonext = qinpos; 5726 5727 while (qinpos != qintail) { 5728 scbp = &ahc->scb_data->scbarray[ahc->qinfifo[qinpos]]; 5729 if (ahc_match_scb(scbp, target, channel, lun, tag, role)) { 5730 /* 5731 * We found an scb that needs to be removed. 5732 */ 5733 switch (action) { 5734 case SEARCH_COMPLETE: 5735 if (ahc_ccb_status(scbp->ccb) == CAM_REQ_INPROG) 5736 ahcsetccbstatus(scbp->ccb, status); 5737 ahc_freeze_ccb(scbp->ccb); 5738 ahc_done(ahc, scbp); 5739 break; 5740 case SEARCH_COUNT: 5741 ahc->qinfifo[ahc->qinfifonext++] = 5742 scbp->hscb->tag; 5743 break; 5744 case SEARCH_REMOVE: 5745 break; 5746 } 5747 found++; 5748 } else { 5749 ahc->qinfifo[ahc->qinfifonext++] = scbp->hscb->tag; 5750 } 5751 qinpos++; 5752 } 5753 5754 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5755 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 5756 } else { 5757 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 5758 } 5759 5760 return (found); 5761 } 5762 5763 5764 static void 5765 ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) 5766 { 5767 union ccb *abort_ccb; 5768 5769 abort_ccb = ccb->cab.abort_ccb; 5770 switch (abort_ccb->ccb_h.func_code) { 5771 case XPT_ACCEPT_TARGET_IO: 5772 case XPT_IMMED_NOTIFY: 5773 case XPT_CONT_TARGET_IO: 5774 { 5775 struct tmode_tstate *tstate; 5776 struct tmode_lstate *lstate; 5777 struct ccb_hdr_slist *list; 5778 cam_status status; 5779 5780 status = ahc_find_tmode_devs(ahc, sim, abort_ccb, &tstate, 5781 &lstate, TRUE); 5782 5783 if (status != CAM_REQ_CMP) { 5784 ccb->ccb_h.status = status; 5785 break; 5786 } 5787 5788 if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) 5789 list = &lstate->accept_tios; 5790 else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) 5791 list = &lstate->immed_notifies; 5792 else 5793 list = NULL; 5794 5795 if (list != NULL) { 5796 struct ccb_hdr *curelm; 5797 int found; 5798 5799 curelm = SLIST_FIRST(list); 5800 found = 0; 5801 if (curelm == &abort_ccb->ccb_h) { 5802 found = 1; 5803 SLIST_REMOVE_HEAD(list, sim_links.sle); 5804 } else { 5805 while(curelm != NULL) { 5806 struct ccb_hdr *nextelm; 5807 5808 nextelm = 5809 SLIST_NEXT(curelm, sim_links.sle); 5810 5811 if (nextelm == &abort_ccb->ccb_h) { 5812 found = 1; 5813 SLIST_NEXT(curelm, 5814 sim_links.sle) = 5815 SLIST_NEXT(nextelm, 5816 sim_links.sle); 5817 break; 5818 } 5819 curelm = nextelm; 5820 } 5821 } 5822 5823 if (found) { 5824 abort_ccb->ccb_h.status = CAM_REQ_ABORTED; 5825 xpt_done(abort_ccb); 5826 ccb->ccb_h.status = CAM_REQ_CMP; 5827 } else { 5828 printf("Not found\n"); 5829 ccb->ccb_h.status = CAM_PATH_INVALID; 5830 } 5831 break; 5832 } 5833 /* FALLTHROUGH */ 5834 } 5835 case XPT_SCSI_IO: 5836 /* XXX Fully implement the hard ones */ 5837 ccb->ccb_h.status = CAM_UA_ABORT; 5838 break; 5839 default: 5840 ccb->ccb_h.status = CAM_REQ_INVALID; 5841 break; 5842 } 5843 xpt_done(ccb); 5844 } 5845 5846 /* 5847 * Abort all SCBs that match the given description (target/channel/lun/tag), 5848 * setting their status to the passed in status if the status has not already 5849 * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer 5850 * is paused before it is called. 5851 */ 5852 static int 5853 ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, 5854 int lun, u_int tag, role_t role, u_int32_t status) 5855 { 5856 struct scb *scbp; 5857 u_int active_scb; 5858 int i; 5859 int found; 5860 5861 /* restore this when we're done */ 5862 active_scb = ahc_inb(ahc, SCBPTR); 5863 5864 found = ahc_search_qinfifo(ahc, target, channel, lun, tag, 5865 role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); 5866 5867 /* 5868 * Search waiting for selection list. 5869 */ 5870 { 5871 u_int8_t next, prev; 5872 5873 next = ahc_inb(ahc, WAITING_SCBH); /* Start at head of list. */ 5874 prev = SCB_LIST_NULL; 5875 5876 while (next != SCB_LIST_NULL) { 5877 u_int8_t scb_index; 5878 5879 ahc_outb(ahc, SCBPTR, next); 5880 scb_index = ahc_inb(ahc, SCB_TAG); 5881 if (scb_index >= ahc->scb_data->numscbs) { 5882 panic("Waiting List inconsistency. " 5883 "SCB index == %d, yet numscbs == %d.", 5884 scb_index, ahc->scb_data->numscbs); 5885 } 5886 scbp = &ahc->scb_data->scbarray[scb_index]; 5887 if (ahc_match_scb(scbp, target, channel, 5888 lun, tag, role)) { 5889 5890 next = ahc_abort_wscb(ahc, next, prev); 5891 } else { 5892 5893 prev = next; 5894 next = ahc_inb(ahc, SCB_NEXT); 5895 } 5896 } 5897 } 5898 /* 5899 * Go through the disconnected list and remove any entries we 5900 * have queued for completion, 0'ing their control byte too. 5901 */ 5902 ahc_search_disc_list(ahc, target, channel, lun, tag); 5903 5904 /* 5905 * Go through the hardware SCB array looking for commands that 5906 * were active but not on any list. 5907 */ 5908 for(i = 0; i < ahc->scb_data->maxhscbs; i++) { 5909 u_int scbid; 5910 5911 ahc_outb(ahc, SCBPTR, i); 5912 scbid = ahc_inb(ahc, SCB_TAG); 5913 if (scbid < ahc->scb_data->numscbs) { 5914 scbp = &ahc->scb_data->scbarray[scbid]; 5915 if (ahc_match_scb(scbp, target, channel, 5916 lun, tag, role)) { 5917 ahc_add_curscb_to_free_list(ahc); 5918 } 5919 } 5920 } 5921 /* 5922 * Go through the pending CCB list and look for 5923 * commands for this target that are still active. 5924 * These are other tagged commands that were 5925 * disconnected when the reset occured. 5926 */ 5927 { 5928 struct ccb_hdr *ccb_h; 5929 5930 ccb_h = ahc->pending_ccbs.lh_first; 5931 while (ccb_h != NULL) { 5932 scbp = (struct scb *)ccb_h->ccb_scb_ptr; 5933 ccb_h = ccb_h->sim_links.le.le_next; 5934 if (ahc_match_scb(scbp, target, channel, 5935 lun, tag, role)) { 5936 if (ahc_ccb_status(scbp->ccb) == CAM_REQ_INPROG) 5937 ahcsetccbstatus(scbp->ccb, status); 5938 ahc_freeze_ccb(scbp->ccb); 5939 ahc_done(ahc, scbp); 5940 found++; 5941 } 5942 } 5943 } 5944 ahc_outb(ahc, SCBPTR, active_scb); 5945 return found; 5946 } 5947 5948 static int 5949 ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel, 5950 int lun, u_int tag) 5951 { 5952 struct scb *scbp; 5953 u_int next; 5954 u_int prev; 5955 u_int count; 5956 u_int active_scb; 5957 5958 count = 0; 5959 next = ahc_inb(ahc, DISCONNECTED_SCBH); 5960 prev = SCB_LIST_NULL; 5961 5962 /* restore this when we're done */ 5963 active_scb = ahc_inb(ahc, SCBPTR); 5964 5965 while (next != SCB_LIST_NULL) { 5966 u_int scb_index; 5967 5968 ahc_outb(ahc, SCBPTR, next); 5969 scb_index = ahc_inb(ahc, SCB_TAG); 5970 if (scb_index >= ahc->scb_data->numscbs) { 5971 panic("Disconnected List inconsistency. " 5972 "SCB index == %d, yet numscbs == %d.", 5973 scb_index, ahc->scb_data->numscbs); 5974 } 5975 scbp = &ahc->scb_data->scbarray[scb_index]; 5976 if (ahc_match_scb(scbp, target, channel, lun, 5977 tag, ROLE_INITIATOR)) { 5978 next = ahc_rem_scb_from_disc_list(ahc, prev, next); 5979 count++; 5980 } else { 5981 prev = next; 5982 next = ahc_inb(ahc, SCB_NEXT); 5983 } 5984 } 5985 ahc_outb(ahc, SCBPTR, active_scb); 5986 return (count); 5987 } 5988 5989 static u_int 5990 ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr) 5991 { 5992 u_int next; 5993 5994 ahc_outb(ahc, SCBPTR, scbptr); 5995 next = ahc_inb(ahc, SCB_NEXT); 5996 5997 ahc_outb(ahc, SCB_CONTROL, 0); 5998 5999 ahc_add_curscb_to_free_list(ahc); 6000 6001 if (prev != SCB_LIST_NULL) { 6002 ahc_outb(ahc, SCBPTR, prev); 6003 ahc_outb(ahc, SCB_NEXT, next); 6004 } else 6005 ahc_outb(ahc, DISCONNECTED_SCBH, next); 6006 6007 return (next); 6008 } 6009 6010 static void 6011 ahc_add_curscb_to_free_list(struct ahc_softc *ahc) 6012 { 6013 /* Invalidate the tag so that ahc_find_scb doesn't think it's active */ 6014 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 6015 6016 ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH)); 6017 ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR)); 6018 } 6019 6020 /* 6021 * Manipulate the waiting for selection list and return the 6022 * scb that follows the one that we remove. 6023 */ 6024 static u_int 6025 ahc_abort_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev) 6026 { 6027 u_int curscb, next; 6028 6029 /* 6030 * Select the SCB we want to abort and 6031 * pull the next pointer out of it. 6032 */ 6033 curscb = ahc_inb(ahc, SCBPTR); 6034 ahc_outb(ahc, SCBPTR, scbpos); 6035 next = ahc_inb(ahc, SCB_NEXT); 6036 6037 /* Clear the necessary fields */ 6038 ahc_outb(ahc, SCB_CONTROL, 0); 6039 6040 ahc_add_curscb_to_free_list(ahc); 6041 6042 /* update the waiting list */ 6043 if (prev == SCB_LIST_NULL) { 6044 /* First in the list */ 6045 ahc_outb(ahc, WAITING_SCBH, next); 6046 6047 /* 6048 * Ensure we aren't attempting to perform 6049 * selection for this entry. 6050 */ 6051 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 6052 } else { 6053 /* 6054 * Select the scb that pointed to us 6055 * and update its next pointer. 6056 */ 6057 ahc_outb(ahc, SCBPTR, prev); 6058 ahc_outb(ahc, SCB_NEXT, next); 6059 } 6060 6061 /* 6062 * Point us back at the original scb position. 6063 */ 6064 ahc_outb(ahc, SCBPTR, curscb); 6065 return next; 6066 } 6067 6068 static void 6069 ahc_clear_intstat(struct ahc_softc *ahc) 6070 { 6071 /* Clear any interrupt conditions this may have caused */ 6072 ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO); 6073 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI 6074 |CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG| 6075 CLRREQINIT); 6076 ahc_outb(ahc, CLRINT, CLRSCSIINT); 6077 } 6078 6079 static void 6080 ahc_reset_current_bus(struct ahc_softc *ahc) 6081 { 6082 u_int8_t scsiseq; 6083 6084 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST); 6085 scsiseq = ahc_inb(ahc, SCSISEQ); 6086 ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO); 6087 DELAY(AHC_BUSRESET_DELAY); 6088 /* Turn off the bus reset */ 6089 ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO); 6090 6091 ahc_clear_intstat(ahc); 6092 6093 /* Re-enable reset interrupts */ 6094 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST); 6095 } 6096 6097 static int 6098 ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset) 6099 { 6100 struct cam_path *path; 6101 u_int initiator, target, max_scsiid; 6102 u_int sblkctl; 6103 u_int our_id; 6104 int found; 6105 int restart_needed; 6106 char cur_channel; 6107 6108 ahc->pending_device = NULL; 6109 6110 pause_sequencer(ahc); 6111 6112 /* 6113 * Run our command complete fifos to ensure that we perform 6114 * completion processing on any commands that 'completed' 6115 * before the reset occurred. 6116 */ 6117 ahc_run_qoutfifo(ahc); 6118 if ((ahc->flags & AHC_TARGETMODE) != 0) { 6119 ahc_run_tqinfifo(ahc); 6120 } 6121 6122 /* 6123 * Reset the bus if we are initiating this reset 6124 */ 6125 sblkctl = ahc_inb(ahc, SBLKCTL); 6126 cur_channel = 'A'; 6127 if ((ahc->features & AHC_TWIN) != 0 6128 && ((sblkctl & SELBUSB) != 0)) 6129 cur_channel = 'B'; 6130 if (cur_channel != channel) { 6131 /* Case 1: Command for another bus is active 6132 * Stealthily reset the other bus without 6133 * upsetting the current bus. 6134 */ 6135 ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB); 6136 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 6137 ahc_outb(ahc, SCSISEQ, 6138 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 6139 if (initiate_reset) 6140 ahc_reset_current_bus(ahc); 6141 ahc_clear_intstat(ahc); 6142 ahc_outb(ahc, SBLKCTL, sblkctl); 6143 restart_needed = FALSE; 6144 } else { 6145 /* Case 2: A command from this bus is active or we're idle */ 6146 ahc_clear_msg_state(ahc); 6147 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 6148 ahc_outb(ahc, SCSISEQ, 6149 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 6150 if (initiate_reset) 6151 ahc_reset_current_bus(ahc); 6152 ahc_clear_intstat(ahc); 6153 6154 /* 6155 * Since we are going to restart the sequencer, avoid 6156 * a race in the sequencer that could cause corruption 6157 * of our Q pointers by starting over from index 0. 6158 */ 6159 ahc->qoutfifonext = 0; 6160 if ((ahc->features & AHC_QUEUE_REGS) != 0) 6161 ahc_outb(ahc, SDSCB_QOFF, 0); 6162 else 6163 ahc_outb(ahc, QOUTPOS, 0); 6164 if ((ahc->flags & AHC_TARGETMODE) != 0) { 6165 ahc->tqinfifonext = 0; 6166 ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1); 6167 ahc_outb(ahc, TQINPOS, 0); 6168 } 6169 restart_needed = TRUE; 6170 } 6171 6172 /* 6173 * Clean up all the state information for the 6174 * pending transactions on this bus. 6175 */ 6176 found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel, 6177 CAM_LUN_WILDCARD, SCB_LIST_NULL, 6178 ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); 6179 if (channel == 'B') { 6180 path = ahc->path_b; 6181 our_id = ahc->our_id_b; 6182 } else { 6183 path = ahc->path; 6184 our_id = ahc->our_id; 6185 } 6186 6187 max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7; 6188 6189 /* 6190 * Send an immediate notify ccb to all target more peripheral 6191 * drivers affected by this action. 6192 */ 6193 for (target = 0; target <= max_scsiid; target++) { 6194 struct tmode_tstate* tstate; 6195 u_int lun; 6196 6197 tstate = ahc->enabled_targets[target]; 6198 if (tstate == NULL) 6199 continue; 6200 for (lun = 0; lun <= 7; lun++) { 6201 struct tmode_lstate* lstate; 6202 6203 lstate = tstate->enabled_luns[lun]; 6204 if (lstate == NULL) 6205 continue; 6206 6207 ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD, 6208 EVENT_TYPE_BUS_RESET, /*arg*/0); 6209 ahc_send_lstate_events(ahc, lstate); 6210 } 6211 } 6212 6213 /* Notify the XPT that a bus reset occurred */ 6214 xpt_async(AC_BUS_RESET, path, NULL); 6215 6216 /* 6217 * Revert to async/narrow transfers until we renegotiate. 6218 */ 6219 for (target = 0; target <= max_scsiid; target++) { 6220 6221 if (ahc->enabled_targets[target] == NULL) 6222 continue; 6223 for (initiator = 0; initiator <= max_scsiid; initiator++) { 6224 struct ahc_devinfo devinfo; 6225 6226 ahc_compile_devinfo(&devinfo, target, initiator, 6227 CAM_LUN_WILDCARD, 6228 channel, ROLE_UNKNOWN); 6229 ahc_set_width(ahc, &devinfo, path, 6230 MSG_EXT_WDTR_BUS_8_BIT, 6231 AHC_TRANS_CUR, /*paused*/TRUE); 6232 ahc_set_syncrate(ahc, &devinfo, path, 6233 /*syncrate*/NULL, /*period*/0, 6234 /*offset*/0, AHC_TRANS_CUR, 6235 /*paused*/TRUE); 6236 } 6237 } 6238 6239 if (restart_needed) 6240 restart_sequencer(ahc); 6241 else 6242 unpause_sequencer(ahc, /*unpause_always*/FALSE); 6243 return found; 6244 } 6245 6246 static int 6247 ahc_match_scb(struct scb *scb, int target, char channel, 6248 int lun, role_t role, u_int tag) 6249 { 6250 int targ = SCB_TARGET(scb); 6251 char chan = SCB_CHANNEL(scb); 6252 int slun = SCB_LUN(scb); 6253 int match; 6254 6255 match = ((chan == channel) || (channel == ALL_CHANNELS)); 6256 if (match != 0) 6257 match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); 6258 if (match != 0) 6259 match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); 6260 if (match != 0) { 6261 int group; 6262 6263 group = XPT_FC_GROUP(scb->ccb->ccb_h.func_code); 6264 if (role == ROLE_INITIATOR) { 6265 match = (group == XPT_FC_GROUP_COMMON) 6266 && ((tag == scb->ccb->csio.tag_id) 6267 || (tag == SCB_LIST_NULL)); 6268 } else if (role == ROLE_TARGET) { 6269 match = (group == XPT_FC_GROUP_TMODE) 6270 && ((tag == scb->ccb->csio.tag_id) 6271 || (tag == SCB_LIST_NULL)); 6272 } 6273 } 6274 return match; 6275 } 6276 6277 static void 6278 ahc_construct_sdtr(struct ahc_softc *ahc, u_int period, u_int offset) 6279 { 6280 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 6281 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN; 6282 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR; 6283 ahc->msgout_buf[ahc->msgout_index++] = period; 6284 ahc->msgout_buf[ahc->msgout_index++] = offset; 6285 ahc->msgout_len += 5; 6286 } 6287 6288 static void 6289 ahc_construct_wdtr(struct ahc_softc *ahc, u_int bus_width) 6290 { 6291 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 6292 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN; 6293 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR; 6294 ahc->msgout_buf[ahc->msgout_index++] = bus_width; 6295 ahc->msgout_len += 4; 6296 } 6297 6298 static void 6299 ahc_calc_residual(struct scb *scb) 6300 { 6301 struct hardware_scb *hscb; 6302 6303 hscb = scb->hscb; 6304 6305 /* 6306 * If the disconnected flag is still set, this is bogus 6307 * residual information left over from a sequencer 6308 * pagin/pageout, so ignore this case. 6309 */ 6310 if ((scb->hscb->control & DISCONNECTED) == 0) { 6311 u_int32_t resid; 6312 int resid_sgs; 6313 int sg; 6314 6315 /* 6316 * Remainder of the SG where the transfer 6317 * stopped. 6318 */ 6319 resid = (hscb->residual_data_count[2] << 16) 6320 | (hscb->residual_data_count[1] <<8) 6321 | (hscb->residual_data_count[0]); 6322 6323 /* 6324 * Add up the contents of all residual 6325 * SG segments that are after the SG where 6326 * the transfer stopped. 6327 */ 6328 resid_sgs = scb->hscb->residual_SG_count - 1/*current*/; 6329 sg = scb->sg_count - resid_sgs - 1/*first SG*/; 6330 while (resid_sgs > 0) { 6331 6332 resid += scb->sg_list[sg].len; 6333 sg++; 6334 resid_sgs--; 6335 } 6336 if ((scb->flags & SCB_SENSE) == 0) { 6337 6338 scb->ccb->csio.resid = resid; 6339 } else { 6340 6341 scb->ccb->csio.sense_resid = resid; 6342 } 6343 } 6344 6345 /* 6346 * Clean out the residual information in this SCB for its 6347 * next consumer. 6348 */ 6349 hscb->residual_SG_count = 0; 6350 6351 #ifdef AHC_DEBUG 6352 if (ahc_debug & AHC_SHOWMISC) { 6353 sc_print_addr(xs->sc_link); 6354 printf("Handled Residual of %ld bytes\n" ,xs->resid); 6355 } 6356 #endif 6357 } 6358 6359 static void 6360 ahc_update_pending_syncrates(struct ahc_softc *ahc) 6361 { 6362 struct ccb_hdr *ccbh; 6363 int pending_ccb_count; 6364 int i; 6365 u_int saved_scbptr; 6366 6367 /* 6368 * Traverse the pending SCB list and ensure that all of the 6369 * SCBs there have the proper settings. 6370 */ 6371 ccbh = LIST_FIRST(&ahc->pending_ccbs); 6372 pending_ccb_count = 0; 6373 while (ccbh != NULL) { 6374 struct ahc_devinfo devinfo; 6375 union ccb *ccb; 6376 struct scb *pending_scb; 6377 struct hardware_scb *pending_hscb; 6378 struct ahc_initiator_tinfo *tinfo; 6379 struct tmode_tstate *tstate; 6380 u_int our_id, remote_id; 6381 6382 ccb = (union ccb*)ccbh; 6383 pending_scb = (struct scb *)ccbh->ccb_scb_ptr; 6384 pending_hscb = pending_scb->hscb; 6385 if (ccbh->func_code == XPT_CONT_TARGET_IO) { 6386 our_id = ccb->ccb_h.target_id; 6387 remote_id = ccb->ctio.init_id; 6388 } else { 6389 our_id = SCB_IS_SCSIBUS_B(pending_scb) 6390 ? ahc->our_id_b : ahc->our_id; 6391 remote_id = ccb->ccb_h.target_id; 6392 } 6393 ahc_compile_devinfo(&devinfo, our_id, remote_id, 6394 SCB_LUN(pending_scb), 6395 SCB_CHANNEL(pending_scb), 6396 ROLE_UNKNOWN); 6397 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 6398 our_id, remote_id, &tstate); 6399 pending_hscb->control &= ~ULTRAENB; 6400 if ((tstate->ultraenb & devinfo.target_mask) != 0) 6401 pending_hscb->control |= ULTRAENB; 6402 pending_hscb->scsirate = tinfo->scsirate; 6403 pending_hscb->scsioffset = tinfo->current.offset; 6404 pending_ccb_count++; 6405 ccbh = LIST_NEXT(ccbh, sim_links.le); 6406 } 6407 6408 if (pending_ccb_count == 0) 6409 return; 6410 6411 saved_scbptr = ahc_inb(ahc, SCBPTR); 6412 /* Ensure that the hscbs down on the card match the new information */ 6413 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 6414 u_int scb_tag; 6415 6416 ahc_outb(ahc, SCBPTR, i); 6417 scb_tag = ahc_inb(ahc, SCB_TAG); 6418 if (scb_tag != SCB_LIST_NULL) { 6419 struct ahc_devinfo devinfo; 6420 union ccb *ccb; 6421 struct scb *pending_scb; 6422 struct hardware_scb *pending_hscb; 6423 struct ahc_initiator_tinfo *tinfo; 6424 struct tmode_tstate *tstate; 6425 u_int our_id, remote_id; 6426 u_int control; 6427 6428 pending_scb = &ahc->scb_data->scbarray[scb_tag]; 6429 if (pending_scb->flags == SCB_FREE) 6430 continue; 6431 pending_hscb = pending_scb->hscb; 6432 ccb = pending_scb->ccb; 6433 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 6434 our_id = ccb->ccb_h.target_id; 6435 remote_id = ccb->ctio.init_id; 6436 } else { 6437 our_id = SCB_IS_SCSIBUS_B(pending_scb) 6438 ? ahc->our_id_b : ahc->our_id; 6439 remote_id = ccb->ccb_h.target_id; 6440 } 6441 ahc_compile_devinfo(&devinfo, our_id, remote_id, 6442 SCB_LUN(pending_scb), 6443 SCB_CHANNEL(pending_scb), 6444 ROLE_UNKNOWN); 6445 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 6446 our_id, remote_id, &tstate); 6447 control = ahc_inb(ahc, SCB_CONTROL); 6448 control &= ~ULTRAENB; 6449 if ((tstate->ultraenb & devinfo.target_mask) != 0) 6450 control |= ULTRAENB; 6451 ahc_outb(ahc, SCB_CONTROL, control); 6452 ahc_outb(ahc, SCB_SCSIRATE, tinfo->scsirate); 6453 ahc_outb(ahc, SCB_SCSIOFFSET, tinfo->current.offset); 6454 } 6455 } 6456 ahc_outb(ahc, SCBPTR, saved_scbptr); 6457 } 6458 6459 #if UNUSED 6460 static void 6461 ahc_dump_targcmd(struct target_cmd *cmd) 6462 { 6463 u_int8_t *byte; 6464 u_int8_t *last_byte; 6465 int i; 6466 6467 byte = &cmd->initiator_channel; 6468 /* Debugging info for received commands */ 6469 last_byte = &cmd[1].initiator_channel; 6470 6471 i = 0; 6472 while (byte < last_byte) { 6473 if (i == 0) 6474 printf("\t"); 6475 printf("%#x", *byte++); 6476 i++; 6477 if (i == 8) { 6478 printf("\n"); 6479 i = 0; 6480 } else { 6481 printf(", "); 6482 } 6483 } 6484 } 6485 #endif 6486 6487 static void 6488 ahc_shutdown(void *arg, int howto) 6489 { 6490 struct ahc_softc *ahc; 6491 int i; 6492 u_int sxfrctl1_a, sxfrctl1_b; 6493 6494 ahc = (struct ahc_softc *)arg; 6495 6496 pause_sequencer(ahc); 6497 6498 /* 6499 * Preserve the value of the SXFRCTL1 register for all channels. 6500 * It contains settings that affect termination and we don't want 6501 * to disturb the integrity of the bus during shutdown in case 6502 * we are in a multi-initiator setup. 6503 */ 6504 sxfrctl1_b = 0; 6505 if ((ahc->features & AHC_TWIN) != 0) { 6506 u_int sblkctl; 6507 6508 sblkctl = ahc_inb(ahc, SBLKCTL); 6509 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); 6510 sxfrctl1_b = ahc_inb(ahc, SXFRCTL1); 6511 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); 6512 } 6513 6514 sxfrctl1_a = ahc_inb(ahc, SXFRCTL1); 6515 6516 /* This will reset most registers to 0, but not all */ 6517 ahc_reset(ahc); 6518 6519 if ((ahc->features & AHC_TWIN) != 0) { 6520 u_int sblkctl; 6521 6522 sblkctl = ahc_inb(ahc, SBLKCTL); 6523 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); 6524 ahc_outb(ahc, SXFRCTL1, sxfrctl1_b); 6525 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); 6526 } 6527 ahc_outb(ahc, SXFRCTL1, sxfrctl1_a); 6528 6529 ahc_outb(ahc, SCSISEQ, 0); 6530 ahc_outb(ahc, SXFRCTL0, 0); 6531 ahc_outb(ahc, DSPCISTATUS, 0); 6532 6533 for (i = TARG_SCSIRATE; i < HA_274_BIOSCTRL; i++) 6534 ahc_outb(ahc, i, 0); 6535 } 6536 6537 /* 6538 * Add a target mode event to this lun's queue 6539 */ 6540 static void 6541 ahc_queue_lstate_event(struct ahc_softc *ahc, struct tmode_lstate *lstate, 6542 u_int initiator_id, u_int event_type, u_int event_arg) 6543 { 6544 struct ahc_tmode_event *event; 6545 int pending; 6546 6547 xpt_freeze_devq(lstate->path, /*count*/1); 6548 if (lstate->event_w_idx >= lstate->event_r_idx) 6549 pending = lstate->event_w_idx - lstate->event_r_idx; 6550 else 6551 pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1 6552 - (lstate->event_r_idx - lstate->event_w_idx); 6553 6554 if (event_type == EVENT_TYPE_BUS_RESET 6555 || event_type == MSG_BUS_DEV_RESET) { 6556 /* 6557 * Any earlier events are irrelevant, so reset our buffer. 6558 * This has the effect of allowing us to deal with reset 6559 * floods (an external device holding down the reset line) 6560 * without losing the event that is really interesting. 6561 */ 6562 lstate->event_r_idx = 0; 6563 lstate->event_w_idx = 0; 6564 xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); 6565 } 6566 6567 if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) { 6568 xpt_print_path(lstate->path); 6569 printf("immediate event %x:%x lost\n", 6570 lstate->event_buffer[lstate->event_r_idx].event_type, 6571 lstate->event_buffer[lstate->event_r_idx].event_arg); 6572 lstate->event_r_idx++; 6573 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6574 lstate->event_r_idx = 0; 6575 xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); 6576 } 6577 6578 event = &lstate->event_buffer[lstate->event_w_idx]; 6579 event->initiator_id = initiator_id; 6580 event->event_type = event_type; 6581 event->event_arg = event_arg; 6582 lstate->event_w_idx++; 6583 if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6584 lstate->event_w_idx = 0; 6585 } 6586 6587 /* 6588 * Send any target mode events queued up waiting 6589 * for immediate notify resources. 6590 */ 6591 static void 6592 ahc_send_lstate_events(struct ahc_softc *ahc, struct tmode_lstate *lstate) 6593 { 6594 struct ccb_hdr *ccbh; 6595 struct ccb_immed_notify *inot; 6596 6597 while (lstate->event_r_idx != lstate->event_w_idx 6598 && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { 6599 struct ahc_tmode_event *event; 6600 6601 event = &lstate->event_buffer[lstate->event_r_idx]; 6602 SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); 6603 inot = (struct ccb_immed_notify *)ccbh; 6604 switch (event->event_type) { 6605 case EVENT_TYPE_BUS_RESET: 6606 ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; 6607 break; 6608 default: 6609 ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 6610 inot->message_args[0] = event->event_type; 6611 inot->message_args[1] = event->event_arg; 6612 break; 6613 } 6614 inot->initiator_id = event->initiator_id; 6615 inot->sense_len = 0; 6616 xpt_done((union ccb *)inot); 6617 lstate->event_r_idx++; 6618 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6619 lstate->event_r_idx = 0; 6620 } 6621 } 6622