1 /* 2 * Generic driver for the aic7xxx based adaptec SCSI controllers 3 * Product specific probe and attach routines can be found in: 4 * i386/eisa/ahc_eisa.c 27/284X and aic7770 motherboard controllers 5 * pci/ahc_pci.c 3985, 3980, 3940, 2940, aic7895, aic7890, 6 * aic7880, aic7870, aic7860, and aic7850 controllers 7 * 8 * Copyright (c) 1994, 1995, 1996, 1997, 1998, 1999, 2000 Justin T. Gibbs. 9 * All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions, and the following disclaimer, 16 * without modification. 17 * 2. The name of the author may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * the GNU Public License ("GPL"). 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 27 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * $FreeBSD$ 36 */ 37 /* 38 * A few notes on features of the driver. 39 * 40 * SCB paging takes advantage of the fact that devices stay disconnected 41 * from the bus a relatively long time and that while they're disconnected, 42 * having the SCBs for these transactions down on the host adapter is of 43 * little use. Instead of leaving this idle SCB down on the card we copy 44 * it back up into kernel memory and reuse the SCB slot on the card to 45 * schedule another transaction. This can be a real payoff when doing random 46 * I/O to tagged queueing devices since there are more transactions active at 47 * once for the device to sort for optimal seek reduction. The algorithm goes 48 * like this... 49 * 50 * The sequencer maintains two lists of its hardware SCBs. The first is the 51 * singly linked free list which tracks all SCBs that are not currently in 52 * use. The second is the doubly linked disconnected list which holds the 53 * SCBs of transactions that are in the disconnected state sorted most 54 * recently disconnected first. When the kernel queues a transaction to 55 * the card, a hardware SCB to "house" this transaction is retrieved from 56 * either of these two lists. If the SCB came from the disconnected list, 57 * a check is made to see if any data transfer or SCB linking (more on linking 58 * in a bit) information has been changed since it was copied from the host 59 * and if so, DMAs the SCB back up before it can be used. Once a hardware 60 * SCB has been obtained, the SCB is DMAed from the host. Before any work 61 * can begin on this SCB, the sequencer must ensure that either the SCB is 62 * for a tagged transaction or the target is not already working on another 63 * non-tagged transaction. If a conflict arises in the non-tagged case, the 64 * sequencer finds the SCB for the active transactions and sets the SCB_LINKED 65 * field in that SCB to this next SCB to execute. To facilitate finding 66 * active non-tagged SCBs, the last four bytes of up to the first four hardware 67 * SCBs serve as a storage area for the currently active SCB ID for each 68 * target. 69 * 70 * When a device reconnects, a search is made of the hardware SCBs to find 71 * the SCB for this transaction. If the search fails, a hardware SCB is 72 * pulled from either the free or disconnected SCB list and the proper 73 * SCB is DMAed from the host. If the MK_MESSAGE control bit is set 74 * in the control byte of the SCB while it was disconnected, the sequencer 75 * will assert ATN and attempt to issue a message to the host. 76 * 77 * When a command completes, a check for non-zero status and residuals is 78 * made. If either of these conditions exists, the SCB is DMAed back up to 79 * the host so that it can interpret this information. Additionally, in the 80 * case of bad status, the sequencer generates a special interrupt and pauses 81 * itself. This allows the host to setup a request sense command if it 82 * chooses for this target synchronously with the error so that sense 83 * information isn't lost. 84 * 85 */ 86 87 #include <opt_aic7xxx.h> 88 89 #include <pci.h> 90 #include <stddef.h> /* For offsetof */ 91 92 #include <sys/param.h> 93 #include <sys/systm.h> 94 #include <sys/malloc.h> 95 #include <sys/eventhandler.h> 96 #include <sys/buf.h> 97 #include <sys/proc.h> 98 99 #include <cam/cam.h> 100 #include <cam/cam_ccb.h> 101 #include <cam/cam_sim.h> 102 #include <cam/cam_xpt_sim.h> 103 #include <cam/cam_debug.h> 104 105 #include <cam/scsi/scsi_all.h> 106 #include <cam/scsi/scsi_message.h> 107 108 #if NPCI > 0 109 #include <machine/bus_memio.h> 110 #endif 111 #include <machine/bus_pio.h> 112 #include <machine/bus.h> 113 #include <machine/clock.h> 114 #include <sys/rman.h> 115 116 #include <vm/vm.h> 117 #include <vm/vm_param.h> 118 #include <vm/pmap.h> 119 120 #include <dev/aic7xxx/aic7xxx.h> 121 #include <dev/aic7xxx/sequencer.h> 122 123 #include <aic7xxx_reg.h> 124 #include <aic7xxx_seq.h> 125 126 #include <sys/kernel.h> 127 128 #ifndef AHC_TMODE_ENABLE 129 #define AHC_TMODE_ENABLE 0 130 #endif 131 132 #define MAX(a,b) (((a) > (b)) ? (a) : (b)) 133 #define MIN(a,b) (((a) < (b)) ? (a) : (b)) 134 #define ALL_CHANNELS '\0' 135 #define ALL_TARGETS_MASK 0xFFFF 136 #define INITIATOR_WILDCARD (~0) 137 138 #define SIM_IS_SCSIBUS_B(ahc, sim) \ 139 ((sim) == ahc->sim_b) 140 #define SIM_CHANNEL(ahc, sim) \ 141 (((sim) == ahc->sim_b) ? 'B' : 'A') 142 #define SIM_SCSI_ID(ahc, sim) \ 143 (((sim) == ahc->sim_b) ? ahc->our_id_b : ahc->our_id) 144 #define SIM_PATH(ahc, sim) \ 145 (((sim) == ahc->sim_b) ? ahc->path_b : ahc->path) 146 #define SCB_IS_SCSIBUS_B(scb) \ 147 (((scb)->hscb->tcl & SELBUSB) != 0) 148 #define SCB_TARGET(scb) \ 149 (((scb)->hscb->tcl & TID) >> 4) 150 #define SCB_CHANNEL(scb) \ 151 (SCB_IS_SCSIBUS_B(scb) ? 'B' : 'A') 152 #define SCB_LUN(scb) \ 153 ((scb)->hscb->tcl & LID) 154 #define SCB_TARGET_OFFSET(scb) \ 155 (SCB_TARGET(scb) + (SCB_IS_SCSIBUS_B(scb) ? 8 : 0)) 156 #define SCB_TARGET_MASK(scb) \ 157 (0x01 << (SCB_TARGET_OFFSET(scb))) 158 #define TCL_CHANNEL(ahc, tcl) \ 159 ((((ahc)->features & AHC_TWIN) && ((tcl) & SELBUSB)) ? 'B' : 'A') 160 #define TCL_SCSI_ID(ahc, tcl) \ 161 (TCL_CHANNEL((ahc), (tcl)) == 'B' ? (ahc)->our_id_b : (ahc)->our_id) 162 #define TCL_TARGET(tcl) (((tcl) & TID) >> TCL_TARGET_SHIFT) 163 #define TCL_LUN(tcl) ((tcl) & LID) 164 165 #define ccb_scb_ptr spriv_ptr0 166 #define ccb_ahc_ptr spriv_ptr1 167 168 char *ahc_chip_names[] = 169 { 170 "NONE", 171 "aic7770", 172 "aic7850", 173 "aic7855", 174 "aic7859", 175 "aic7860", 176 "aic7870", 177 "aic7880", 178 "aic7890/91", 179 "aic7892", 180 "aic7895", 181 "aic7896/97", 182 "aic7899" 183 }; 184 185 typedef enum { 186 ROLE_UNKNOWN, 187 ROLE_INITIATOR, 188 ROLE_TARGET 189 } role_t; 190 191 struct ahc_devinfo { 192 int our_scsiid; 193 int target_offset; 194 u_int16_t target_mask; 195 u_int8_t target; 196 u_int8_t lun; 197 char channel; 198 role_t role; /* 199 * Only guaranteed to be correct if not 200 * in the busfree state. 201 */ 202 }; 203 204 typedef enum { 205 SEARCH_COMPLETE, 206 SEARCH_COUNT, 207 SEARCH_REMOVE 208 } ahc_search_action; 209 210 #ifdef AHC_DEBUG 211 static int ahc_debug = AHC_DEBUG; 212 #endif 213 214 #if NPCI > 0 215 void ahc_pci_intr(struct ahc_softc *ahc); 216 #endif 217 218 static int ahcinitscbdata(struct ahc_softc *ahc); 219 static void ahcfiniscbdata(struct ahc_softc *ahc); 220 221 static bus_dmamap_callback_t ahcdmamapcb; 222 223 #if UNUSED 224 static void ahc_dump_targcmd(struct target_cmd *cmd); 225 #endif 226 static void ahc_shutdown(void *arg, int howto); 227 static cam_status 228 ahc_find_tmode_devs(struct ahc_softc *ahc, 229 struct cam_sim *sim, union ccb *ccb, 230 struct tmode_tstate **tstate, 231 struct tmode_lstate **lstate, 232 int notfound_failure); 233 static void ahc_action(struct cam_sim *sim, union ccb *ccb); 234 static void ahc_async(void *callback_arg, u_int32_t code, 235 struct cam_path *path, void *arg); 236 static void ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, 237 int nsegments, int error); 238 static void ahc_poll(struct cam_sim *sim); 239 static void ahc_setup_data(struct ahc_softc *ahc, 240 struct ccb_scsiio *csio, struct scb *scb); 241 static void ahc_freeze_devq(struct ahc_softc *ahc, struct cam_path *path); 242 static void ahcallocscbs(struct ahc_softc *ahc); 243 #if UNUSED 244 static void ahc_scb_devinfo(struct ahc_softc *ahc, 245 struct ahc_devinfo *devinfo, 246 struct scb *scb); 247 #endif 248 static void ahc_fetch_devinfo(struct ahc_softc *ahc, 249 struct ahc_devinfo *devinfo); 250 static void ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, 251 u_int target, u_int lun, char channel, 252 role_t role); 253 static u_int ahc_abort_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev); 254 static void ahc_done(struct ahc_softc *ahc, struct scb *scbp); 255 static struct tmode_tstate * 256 ahc_alloc_tstate(struct ahc_softc *ahc, 257 u_int scsi_id, char channel); 258 static void ahc_free_tstate(struct ahc_softc *ahc, 259 u_int scsi_id, char channel, int force); 260 static void ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, 261 union ccb *ccb); 262 static int ahc_handle_target_cmd(struct ahc_softc *ahc, 263 struct target_cmd *cmd); 264 static void ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat); 265 static void ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat); 266 static void ahc_build_transfer_msg(struct ahc_softc *ahc, 267 struct ahc_devinfo *devinfo); 268 static void ahc_setup_initiator_msgout(struct ahc_softc *ahc, 269 struct ahc_devinfo *devinfo, 270 struct scb *scb); 271 static void ahc_setup_target_msgin(struct ahc_softc *ahc, 272 struct ahc_devinfo *devinfo); 273 static int ahc_handle_msg_reject(struct ahc_softc *ahc, 274 struct ahc_devinfo *devinfo); 275 static void ahc_clear_msg_state(struct ahc_softc *ahc); 276 static void ahc_handle_message_phase(struct ahc_softc *ahc, 277 struct cam_path *path); 278 static int ahc_sent_msg(struct ahc_softc *ahc, u_int msgtype, int full); 279 typedef enum { 280 MSGLOOP_IN_PROG, 281 MSGLOOP_MSGCOMPLETE, 282 MSGLOOP_TERMINATED 283 } msg_loop_stat; 284 static int ahc_parse_msg(struct ahc_softc *ahc, struct cam_path *path, 285 struct ahc_devinfo *devinfo); 286 static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc, 287 struct ahc_devinfo *devinfo); 288 static void ahc_handle_devreset(struct ahc_softc *ahc, 289 struct ahc_devinfo *devinfo, 290 cam_status status, ac_code acode, 291 char *message, 292 int verbose_level); 293 #ifdef AHC_DUMP_SEQ 294 static void ahc_dumpseq(struct ahc_softc *ahc); 295 #endif 296 static void ahc_loadseq(struct ahc_softc *ahc); 297 static int ahc_check_patch(struct ahc_softc *ahc, 298 struct patch **start_patch, 299 int start_instr, int *skip_addr); 300 static void ahc_download_instr(struct ahc_softc *ahc, 301 int instrptr, u_int8_t *dconsts); 302 static int ahc_match_scb(struct scb *scb, int target, char channel, 303 int lun, u_int tag, role_t role); 304 #ifdef AHC_DEBUG 305 static void ahc_print_scb(struct scb *scb); 306 #endif 307 static int ahc_search_qinfifo(struct ahc_softc *ahc, int target, 308 char channel, int lun, u_int tag, 309 role_t role, u_int32_t status, 310 ahc_search_action action); 311 static void ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, 312 union ccb *ccb); 313 static int ahc_reset_channel(struct ahc_softc *ahc, char channel, 314 int initiate_reset); 315 static int ahc_abort_scbs(struct ahc_softc *ahc, int target, 316 char channel, int lun, u_int tag, role_t role, 317 u_int32_t status); 318 static int ahc_search_disc_list(struct ahc_softc *ahc, int target, 319 char channel, int lun, u_int tag, 320 int stop_on_first, int remove, 321 int save_state); 322 static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, 323 u_int prev, u_int scbptr); 324 static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc); 325 static void ahc_clear_intstat(struct ahc_softc *ahc); 326 static void ahc_reset_current_bus(struct ahc_softc *ahc); 327 static struct ahc_syncrate * 328 ahc_devlimited_syncrate(struct ahc_softc *ahc, u_int *period); 329 static struct ahc_syncrate * 330 ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, 331 u_int maxsync); 332 static u_int ahc_find_period(struct ahc_softc *ahc, u_int scsirate, 333 u_int maxsync); 334 static void ahc_validate_offset(struct ahc_softc *ahc, 335 struct ahc_syncrate *syncrate, 336 u_int *offset, int wide); 337 static void ahc_update_target_msg_request(struct ahc_softc *ahc, 338 struct ahc_devinfo *devinfo, 339 struct ahc_initiator_tinfo *tinfo, 340 int force, int paused); 341 static int ahc_create_path(struct ahc_softc *ahc, 342 struct ahc_devinfo *devinfo, 343 struct cam_path **path); 344 static void ahc_set_syncrate(struct ahc_softc *ahc, 345 struct ahc_devinfo *devinfo, 346 struct cam_path *path, 347 struct ahc_syncrate *syncrate, 348 u_int period, u_int offset, u_int type, 349 int paused); 350 static void ahc_set_width(struct ahc_softc *ahc, 351 struct ahc_devinfo *devinfo, 352 struct cam_path *path, u_int width, u_int type, 353 int paused); 354 static void ahc_set_tags(struct ahc_softc *ahc, 355 struct ahc_devinfo *devinfo, 356 int enable); 357 static void ahc_construct_sdtr(struct ahc_softc *ahc, 358 u_int period, u_int offset); 359 360 static void ahc_construct_wdtr(struct ahc_softc *ahc, u_int bus_width); 361 362 static void ahc_calc_residual(struct scb *scb); 363 364 static void ahc_update_pending_syncrates(struct ahc_softc *ahc); 365 366 static void ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb); 367 368 static timeout_t 369 ahc_timeout; 370 static void ahc_queue_lstate_event(struct ahc_softc *ahc, 371 struct tmode_lstate *lstate, 372 u_int initiator_id, u_int event_type, 373 u_int event_arg); 374 static void ahc_send_lstate_events(struct ahc_softc *ahc, 375 struct tmode_lstate *lstate); 376 static __inline int sequencer_paused(struct ahc_softc *ahc); 377 static __inline void pause_sequencer(struct ahc_softc *ahc); 378 static __inline void unpause_sequencer(struct ahc_softc *ahc); 379 static void restart_sequencer(struct ahc_softc *ahc); 380 static __inline u_int ahc_index_busy_tcl(struct ahc_softc *ahc, 381 u_int tcl, int unbusy); 382 383 static __inline void ahc_busy_tcl(struct ahc_softc *ahc, struct scb *scb); 384 385 static __inline void ahc_freeze_ccb(union ccb* ccb); 386 static __inline cam_status ahc_ccb_status(union ccb* ccb); 387 static __inline void ahcsetccbstatus(union ccb* ccb, 388 cam_status status); 389 static void ahc_run_tqinfifo(struct ahc_softc *ahc, int paused); 390 static void ahc_run_qoutfifo(struct ahc_softc *ahc); 391 392 static __inline struct ahc_initiator_tinfo * 393 ahc_fetch_transinfo(struct ahc_softc *ahc, 394 char channel, 395 u_int our_id, u_int target, 396 struct tmode_tstate **tstate); 397 static void ahcfreescb(struct ahc_softc *ahc, struct scb *scb); 398 static __inline struct scb *ahcgetscb(struct ahc_softc *ahc); 399 400 static __inline u_int32_t 401 ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index) 402 { 403 return (ahc->scb_data->hscb_busaddr 404 + (sizeof(struct hardware_scb) * index)); 405 } 406 407 #define AHC_BUSRESET_DELAY 25 /* Reset delay in us */ 408 409 static __inline int 410 sequencer_paused(struct ahc_softc *ahc) 411 { 412 return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0); 413 } 414 415 static __inline void 416 pause_sequencer(struct ahc_softc *ahc) 417 { 418 ahc_outb(ahc, HCNTRL, ahc->pause); 419 420 /* 421 * Since the sequencer can disable pausing in a critical section, we 422 * must loop until it actually stops. 423 */ 424 while (sequencer_paused(ahc) == 0) 425 ; 426 } 427 428 static __inline void 429 unpause_sequencer(struct ahc_softc *ahc) 430 { 431 if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0) 432 ahc_outb(ahc, HCNTRL, ahc->unpause); 433 } 434 435 /* 436 * Restart the sequencer program from address zero 437 */ 438 static void 439 restart_sequencer(struct ahc_softc *ahc) 440 { 441 u_int i; 442 443 pause_sequencer(ahc); 444 445 /* 446 * Everytime we restart the sequencer, there 447 * is the possiblitity that we have restarted 448 * within a three instruction window where an 449 * SCB has been marked free but has not made it 450 * onto the free list. Since SCSI events(bus reset, 451 * unexpected bus free) will always freeze the 452 * sequencer, we cannot close this window. To 453 * avoid losing an SCB, we reconsitute the free 454 * list every time we restart the sequencer. 455 */ 456 ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL); 457 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 458 459 ahc_outb(ahc, SCBPTR, i); 460 if (ahc_inb(ahc, SCB_TAG) == SCB_LIST_NULL) 461 ahc_add_curscb_to_free_list(ahc); 462 } 463 ahc_outb(ahc, SEQCTL, FASTMODE|SEQRESET); 464 unpause_sequencer(ahc); 465 } 466 467 static __inline u_int 468 ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl, int unbusy) 469 { 470 u_int scbid; 471 472 scbid = ahc->untagged_scbs[tcl]; 473 if (unbusy) 474 ahc->untagged_scbs[tcl] = SCB_LIST_NULL; 475 476 return (scbid); 477 } 478 479 static __inline void 480 ahc_busy_tcl(struct ahc_softc *ahc, struct scb *scb) 481 { 482 ahc->untagged_scbs[scb->hscb->tcl] = scb->hscb->tag; 483 } 484 485 static __inline void 486 ahc_freeze_ccb(union ccb* ccb) 487 { 488 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 489 ccb->ccb_h.status |= CAM_DEV_QFRZN; 490 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 491 } 492 } 493 494 static __inline cam_status 495 ahc_ccb_status(union ccb* ccb) 496 { 497 return (ccb->ccb_h.status & CAM_STATUS_MASK); 498 } 499 500 static __inline void 501 ahcsetccbstatus(union ccb* ccb, cam_status status) 502 { 503 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 504 ccb->ccb_h.status |= status; 505 } 506 507 static __inline struct ahc_initiator_tinfo * 508 ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id, 509 u_int remote_id, struct tmode_tstate **tstate) 510 { 511 /* 512 * Transfer data structures are stored from the perspective 513 * of the target role. Since the parameters for a connection 514 * in the initiator role to a given target are the same as 515 * when the roles are reversed, we pretend we are the target. 516 */ 517 if (channel == 'B') 518 our_id += 8; 519 *tstate = ahc->enabled_targets[our_id]; 520 return (&(*tstate)->transinfo[remote_id]); 521 } 522 523 static void 524 ahc_run_tqinfifo(struct ahc_softc *ahc, int paused) 525 { 526 struct target_cmd *cmd; 527 528 while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) { 529 530 /* 531 * Only advance through the queue if we 532 * had the resources to process the command. 533 */ 534 if (ahc_handle_target_cmd(ahc, cmd) != 0) 535 break; 536 537 ahc->tqinfifonext++; 538 cmd->cmd_valid = 0; 539 540 /* 541 * Lazily update our position in the target mode incomming 542 * command queue as seen by the sequencer. 543 */ 544 if ((ahc->tqinfifonext & (TQINFIFO_UPDATE_CNT-1)) == 0) { 545 if (!paused) 546 pause_sequencer(ahc); 547 ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1); 548 if (!paused) 549 unpause_sequencer(ahc); 550 } 551 } 552 } 553 554 static void 555 ahc_run_qoutfifo(struct ahc_softc *ahc) 556 { 557 struct scb *scb; 558 u_int scb_index; 559 560 while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) { 561 scb_index = ahc->qoutfifo[ahc->qoutfifonext]; 562 ahc->qoutfifo[ahc->qoutfifonext++] = SCB_LIST_NULL; 563 564 scb = &ahc->scb_data->scbarray[scb_index]; 565 if (scb_index >= ahc->scb_data->numscbs 566 || (scb->flags & SCB_ACTIVE) == 0) { 567 printf("%s: WARNING no command for scb %d " 568 "(cmdcmplt)\nQOUTPOS = %d\n", 569 ahc_name(ahc), scb_index, 570 ahc->qoutfifonext - 1); 571 continue; 572 } 573 574 /* 575 * Save off the residual 576 * if there is one. 577 */ 578 if (scb->hscb->residual_SG_count != 0) 579 ahc_calc_residual(scb); 580 else 581 scb->ccb->csio.resid = 0; 582 ahc_done(ahc, scb); 583 } 584 } 585 586 587 /* 588 * An scb (and hence an scb entry on the board) is put onto the 589 * free list. 590 */ 591 static void 592 ahcfreescb(struct ahc_softc *ahc, struct scb *scb) 593 { 594 struct hardware_scb *hscb; 595 int opri; 596 597 hscb = scb->hscb; 598 599 opri = splcam(); 600 601 if ((ahc->flags & AHC_RESOURCE_SHORTAGE) != 0 602 && (scb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { 603 scb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 604 ahc->flags &= ~AHC_RESOURCE_SHORTAGE; 605 } 606 607 /* Clean up for the next user */ 608 scb->flags = SCB_FREE; 609 hscb->control = 0; 610 hscb->status = 0; 611 612 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links); 613 splx(opri); 614 } 615 616 /* 617 * Get a free scb, either one already assigned to a hardware slot 618 * on the adapter or one that will require an SCB to be paged out before 619 * use. If there are none, see if we can allocate a new SCB. Otherwise 620 * either return an error or sleep. 621 */ 622 static __inline struct scb * 623 ahcgetscb(struct ahc_softc *ahc) 624 { 625 struct scb *scbp; 626 int opri; 627 628 opri = splcam(); 629 if ((scbp = SLIST_FIRST(&ahc->scb_data->free_scbs))) { 630 SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links); 631 } else { 632 ahcallocscbs(ahc); 633 scbp = SLIST_FIRST(&ahc->scb_data->free_scbs); 634 if (scbp != NULL) 635 SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links); 636 } 637 638 splx(opri); 639 640 return (scbp); 641 } 642 643 char * 644 ahc_name(struct ahc_softc *ahc) 645 { 646 static char name[10]; 647 648 snprintf(name, sizeof(name), "ahc%d", ahc->unit); 649 return (name); 650 } 651 652 #ifdef AHC_DEBUG 653 static void 654 ahc_print_scb(struct scb *scb) 655 { 656 struct hardware_scb *hscb = scb->hscb; 657 658 printf("scb:%p control:0x%x tcl:0x%x cmdlen:%d cmdpointer:0x%lx\n", 659 scb, 660 hscb->control, 661 hscb->tcl, 662 hscb->cmdlen, 663 hscb->cmdpointer ); 664 printf(" datlen:%d data:0x%lx segs:0x%x segp:0x%lx\n", 665 hscb->datalen, 666 hscb->data, 667 hscb->SG_count, 668 hscb->SG_pointer); 669 printf(" sg_addr:%lx sg_len:%ld\n", 670 scb->sg_list[0].addr, 671 scb->sg_list[0].len); 672 printf(" cdb:%x %x %x %x %x %x %x %x %x %x %x %x\n", 673 hscb->cmdstore[0], hscb->cmdstore[1], hscb->cmdstore[2], 674 hscb->cmdstore[3], hscb->cmdstore[4], hscb->cmdstore[5], 675 hscb->cmdstore[6], hscb->cmdstore[7], hscb->cmdstore[8], 676 hscb->cmdstore[9], hscb->cmdstore[10], hscb->cmdstore[11]); 677 } 678 #endif 679 680 static struct { 681 u_int8_t errno; 682 char *errmesg; 683 } hard_error[] = { 684 { ILLHADDR, "Illegal Host Access" }, 685 { ILLSADDR, "Illegal Sequencer Address referrenced" }, 686 { ILLOPCODE, "Illegal Opcode in sequencer program" }, 687 { SQPARERR, "Sequencer Parity Error" }, 688 { DPARERR, "Data-path Parity Error" }, 689 { MPARERR, "Scratch or SCB Memory Parity Error" }, 690 { PCIERRSTAT, "PCI Error detected" }, 691 { CIOPARERR, "CIOBUS Parity Error" }, 692 }; 693 static const int num_errors = sizeof(hard_error)/sizeof(hard_error[0]); 694 695 static struct { 696 u_int8_t phase; 697 u_int8_t mesg_out; /* Message response to parity errors */ 698 char *phasemsg; 699 } phase_table[] = { 700 { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, 701 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, 702 { P_COMMAND, MSG_NOOP, "in Command phase" }, 703 { P_MESGOUT, MSG_NOOP, "in Message-out phase" }, 704 { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" }, 705 { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, 706 { P_BUSFREE, MSG_NOOP, "while idle" }, 707 { 0, MSG_NOOP, "in unknown phase" } 708 }; 709 static const int num_phases = (sizeof(phase_table)/sizeof(phase_table[0])) - 1; 710 711 /* 712 * Valid SCSIRATE values. (p. 3-17) 713 * Provides a mapping of tranfer periods in ns to the proper value to 714 * stick in the scsiscfr reg to use that transfer rate. 715 */ 716 #define AHC_SYNCRATE_DT 0 717 #define AHC_SYNCRATE_ULTRA2 1 718 #define AHC_SYNCRATE_ULTRA 2 719 #define AHC_SYNCRATE_FAST 5 720 static struct ahc_syncrate ahc_syncrates[] = { 721 /* ultra2 fast/ultra period rate */ 722 { 0x42, 0x000, 9, "80.0" }, 723 { 0x03, 0x000, 10, "40.0" }, 724 { 0x04, 0x000, 11, "33.0" }, 725 { 0x05, 0x100, 12, "20.0" }, 726 { 0x06, 0x110, 15, "16.0" }, 727 { 0x07, 0x120, 18, "13.4" }, 728 { 0x08, 0x000, 25, "10.0" }, 729 { 0x19, 0x010, 31, "8.0" }, 730 { 0x1a, 0x020, 37, "6.67" }, 731 { 0x1b, 0x030, 43, "5.7" }, 732 { 0x1c, 0x040, 50, "5.0" }, 733 { 0x00, 0x050, 56, "4.4" }, 734 { 0x00, 0x060, 62, "4.0" }, 735 { 0x00, 0x070, 68, "3.6" }, 736 { 0x00, 0x000, 0, NULL } 737 }; 738 739 /* 740 * Allocate a controller structure for a new device and initialize it. 741 */ 742 struct ahc_softc * 743 ahc_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id, 744 bus_dma_tag_t parent_dmat, ahc_chip chip, ahc_feature features, 745 ahc_flag flags, struct scb_data *scb_data) 746 { 747 /* 748 * find unit and check we have that many defined 749 */ 750 struct ahc_softc *ahc; 751 size_t alloc_size; 752 753 /* 754 * Allocate a storage area for us 755 */ 756 if (scb_data == NULL) 757 /* 758 * We are not sharing SCB space with another controller 759 * so allocate our own SCB data space. 760 */ 761 alloc_size = sizeof(struct full_ahc_softc); 762 else 763 alloc_size = sizeof(struct ahc_softc); 764 ahc = malloc(alloc_size, M_DEVBUF, M_NOWAIT); 765 if (!ahc) { 766 device_printf(dev, "cannot malloc softc!\n"); 767 return NULL; 768 } 769 bzero(ahc, alloc_size); 770 LIST_INIT(&ahc->pending_ccbs); 771 ahc->device = dev; 772 ahc->unit = device_get_unit(dev); 773 ahc->regs_res_type = regs_type; 774 ahc->regs_res_id = regs_id; 775 ahc->regs = regs; 776 ahc->tag = rman_get_bustag(regs); 777 ahc->bsh = rman_get_bushandle(regs); 778 ahc->parent_dmat = parent_dmat; 779 ahc->chip = chip; 780 ahc->features = features; 781 ahc->flags = flags; 782 if (scb_data == NULL) { 783 struct full_ahc_softc* full_softc = (struct full_ahc_softc*)ahc; 784 ahc->scb_data = &full_softc->scb_data_storage; 785 } else 786 ahc->scb_data = scb_data; 787 788 ahc->unpause = (ahc_inb(ahc, HCNTRL) & IRQMS) | INTEN; 789 /* The IRQMS bit is only valid on VL and EISA chips */ 790 if ((ahc->chip & AHC_PCI) != 0) 791 ahc->unpause &= ~IRQMS; 792 ahc->pause = ahc->unpause | PAUSE; 793 return (ahc); 794 } 795 796 void 797 ahc_free(ahc) 798 struct ahc_softc *ahc; 799 { 800 ahcfiniscbdata(ahc); 801 switch (ahc->init_level) { 802 case 3: 803 bus_dmamap_unload(ahc->shared_data_dmat, 804 ahc->shared_data_dmamap); 805 case 2: 806 bus_dmamem_free(ahc->shared_data_dmat, ahc->qoutfifo, 807 ahc->shared_data_dmamap); 808 bus_dmamap_destroy(ahc->shared_data_dmat, 809 ahc->shared_data_dmamap); 810 case 1: 811 bus_dma_tag_destroy(ahc->buffer_dmat); 812 break; 813 } 814 815 if (ahc->regs != NULL) 816 bus_release_resource(ahc->device, ahc->regs_res_type, 817 ahc->regs_res_id, ahc->regs); 818 if (ahc->irq != NULL) 819 bus_release_resource(ahc->device, ahc->irq_res_type, 820 0, ahc->irq); 821 822 free(ahc, M_DEVBUF); 823 return; 824 } 825 826 static int 827 ahcinitscbdata(struct ahc_softc *ahc) 828 { 829 struct scb_data *scb_data; 830 int i; 831 832 scb_data = ahc->scb_data; 833 SLIST_INIT(&scb_data->free_scbs); 834 SLIST_INIT(&scb_data->sg_maps); 835 836 /* Allocate SCB resources */ 837 scb_data->scbarray = 838 (struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX, 839 M_DEVBUF, M_NOWAIT); 840 if (scb_data->scbarray == NULL) 841 return (ENOMEM); 842 bzero(scb_data->scbarray, sizeof(struct scb) * AHC_SCB_MAX); 843 844 /* Determine the number of hardware SCBs and initialize them */ 845 846 scb_data->maxhscbs = ahc_probe_scbs(ahc); 847 /* SCB 0 heads the free list */ 848 ahc_outb(ahc, FREE_SCBH, 0); 849 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 850 ahc_outb(ahc, SCBPTR, i); 851 852 /* Clear the control byte. */ 853 ahc_outb(ahc, SCB_CONTROL, 0); 854 855 /* Set the next pointer */ 856 ahc_outb(ahc, SCB_NEXT, i+1); 857 858 /* Make the tag number invalid */ 859 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 860 } 861 862 /* Make sure that the last SCB terminates the free list */ 863 ahc_outb(ahc, SCBPTR, i-1); 864 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); 865 866 /* Ensure we clear the 0 SCB's control byte. */ 867 ahc_outb(ahc, SCBPTR, 0); 868 ahc_outb(ahc, SCB_CONTROL, 0); 869 870 scb_data->maxhscbs = i; 871 872 if (ahc->scb_data->maxhscbs == 0) 873 panic("%s: No SCB space found", ahc_name(ahc)); 874 875 /* 876 * Create our DMA tags. These tags define the kinds of device 877 * accessable memory allocations and memory mappings we will 878 * need to perform during normal operation. 879 * 880 * Unless we need to further restrict the allocation, we rely 881 * on the restrictions of the parent dmat, hence the common 882 * use of MAXADDR and MAXSIZE. 883 */ 884 885 /* DMA tag for our hardware scb structures */ 886 if (bus_dma_tag_create(ahc->parent_dmat, /*alignment*/1, /*boundary*/0, 887 /*lowaddr*/BUS_SPACE_MAXADDR, 888 /*highaddr*/BUS_SPACE_MAXADDR, 889 /*filter*/NULL, /*filterarg*/NULL, 890 AHC_SCB_MAX * sizeof(struct hardware_scb), 891 /*nsegments*/1, 892 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 893 /*flags*/0, &scb_data->hscb_dmat) != 0) { 894 goto error_exit; 895 } 896 897 scb_data->init_level++; 898 899 /* Allocation for our ccbs */ 900 if (bus_dmamem_alloc(scb_data->hscb_dmat, (void **)&scb_data->hscbs, 901 BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) { 902 goto error_exit; 903 } 904 905 scb_data->init_level++; 906 907 /* And permanently map them */ 908 bus_dmamap_load(scb_data->hscb_dmat, scb_data->hscb_dmamap, 909 scb_data->hscbs, 910 AHC_SCB_MAX * sizeof(struct hardware_scb), 911 ahcdmamapcb, &scb_data->hscb_busaddr, /*flags*/0); 912 913 scb_data->init_level++; 914 915 /* DMA tag for our sense buffers */ 916 if (bus_dma_tag_create(ahc->parent_dmat, /*alignment*/1, /*boundary*/0, 917 /*lowaddr*/BUS_SPACE_MAXADDR, 918 /*highaddr*/BUS_SPACE_MAXADDR, 919 /*filter*/NULL, /*filterarg*/NULL, 920 AHC_SCB_MAX * sizeof(struct scsi_sense_data), 921 /*nsegments*/1, 922 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 923 /*flags*/0, &scb_data->sense_dmat) != 0) { 924 goto error_exit; 925 } 926 927 scb_data->init_level++; 928 929 /* Allocate them */ 930 if (bus_dmamem_alloc(scb_data->sense_dmat, (void **)&scb_data->sense, 931 BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) { 932 goto error_exit; 933 } 934 935 scb_data->init_level++; 936 937 /* And permanently map them */ 938 bus_dmamap_load(scb_data->sense_dmat, scb_data->sense_dmamap, 939 scb_data->sense, 940 AHC_SCB_MAX * sizeof(struct scsi_sense_data), 941 ahcdmamapcb, &scb_data->sense_busaddr, /*flags*/0); 942 943 scb_data->init_level++; 944 945 /* DMA tag for our S/G structures. We allocate in page sized chunks */ 946 if (bus_dma_tag_create(ahc->parent_dmat, /*alignment*/1, /*boundary*/0, 947 /*lowaddr*/BUS_SPACE_MAXADDR, 948 /*highaddr*/BUS_SPACE_MAXADDR, 949 /*filter*/NULL, /*filterarg*/NULL, 950 PAGE_SIZE, /*nsegments*/1, 951 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 952 /*flags*/0, &scb_data->sg_dmat) != 0) { 953 goto error_exit; 954 } 955 956 scb_data->init_level++; 957 958 /* Perform initial CCB allocation */ 959 bzero(scb_data->hscbs, AHC_SCB_MAX * sizeof(struct hardware_scb)); 960 ahcallocscbs(ahc); 961 962 if (scb_data->numscbs == 0) { 963 printf("%s: ahc_init_scb_data - " 964 "Unable to allocate initial scbs\n", 965 ahc_name(ahc)); 966 goto error_exit; 967 } 968 969 /* 970 * Note that we were successfull 971 */ 972 return 0; 973 974 error_exit: 975 976 return ENOMEM; 977 } 978 979 static void 980 ahcfiniscbdata(struct ahc_softc *ahc) 981 { 982 struct scb_data *scb_data; 983 984 scb_data = ahc->scb_data; 985 986 switch (scb_data->init_level) { 987 default: 988 case 7: 989 { 990 struct sg_map_node *sg_map; 991 992 while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) { 993 SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); 994 bus_dmamap_unload(scb_data->sg_dmat, 995 sg_map->sg_dmamap); 996 bus_dmamem_free(scb_data->sg_dmat, sg_map->sg_vaddr, 997 sg_map->sg_dmamap); 998 free(sg_map, M_DEVBUF); 999 } 1000 bus_dma_tag_destroy(scb_data->sg_dmat); 1001 } 1002 case 6: 1003 bus_dmamap_unload(scb_data->sense_dmat, 1004 scb_data->sense_dmamap); 1005 case 5: 1006 bus_dmamem_free(scb_data->sense_dmat, scb_data->sense, 1007 scb_data->sense_dmamap); 1008 bus_dmamap_destroy(scb_data->sense_dmat, 1009 scb_data->sense_dmamap); 1010 case 4: 1011 bus_dma_tag_destroy(scb_data->sense_dmat); 1012 case 3: 1013 bus_dmamap_unload(scb_data->hscb_dmat, scb_data->hscb_dmamap); 1014 case 2: 1015 bus_dmamem_free(scb_data->hscb_dmat, scb_data->hscbs, 1016 scb_data->hscb_dmamap); 1017 bus_dmamap_destroy(scb_data->hscb_dmat, scb_data->hscb_dmamap); 1018 case 1: 1019 bus_dma_tag_destroy(scb_data->hscb_dmat); 1020 break; 1021 } 1022 if (scb_data->scbarray != NULL) 1023 free(scb_data->scbarray, M_DEVBUF); 1024 } 1025 1026 static void 1027 ahcdmamapcb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1028 { 1029 bus_addr_t *baddr; 1030 1031 baddr = (bus_addr_t *)arg; 1032 *baddr = segs->ds_addr; 1033 } 1034 1035 int 1036 ahc_reset(struct ahc_softc *ahc) 1037 { 1038 u_int sblkctl; 1039 int wait; 1040 1041 #ifdef AHC_DUMP_SEQ 1042 if (ahc->init_level == 0) 1043 ahc_dumpseq(ahc); 1044 #endif 1045 ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause); 1046 /* 1047 * Ensure that the reset has finished 1048 */ 1049 wait = 1000; 1050 do { 1051 DELAY(1000); 1052 } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK)); 1053 1054 if (wait == 0) { 1055 printf("%s: WARNING - Failed chip reset! " 1056 "Trying to initialize anyway.\n", ahc_name(ahc)); 1057 } 1058 ahc_outb(ahc, HCNTRL, ahc->pause); 1059 1060 /* Determine channel configuration */ 1061 sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE); 1062 /* No Twin Channel PCI cards */ 1063 if ((ahc->chip & AHC_PCI) != 0) 1064 sblkctl &= ~SELBUSB; 1065 switch (sblkctl) { 1066 case 0: 1067 /* Single Narrow Channel */ 1068 break; 1069 case 2: 1070 /* Wide Channel */ 1071 ahc->features |= AHC_WIDE; 1072 break; 1073 case 8: 1074 /* Twin Channel */ 1075 ahc->features |= AHC_TWIN; 1076 break; 1077 default: 1078 printf(" Unsupported adapter type. Ignoring\n"); 1079 return(-1); 1080 } 1081 1082 return (0); 1083 } 1084 1085 /* 1086 * Called when we have an active connection to a target on the bus, 1087 * this function finds the nearest syncrate to the input period limited 1088 * by the capabilities of the bus connectivity of the target. 1089 */ 1090 static struct ahc_syncrate * 1091 ahc_devlimited_syncrate(struct ahc_softc *ahc, u_int *period) { 1092 u_int maxsync; 1093 1094 if ((ahc->features & AHC_ULTRA2) != 0) { 1095 if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0 1096 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) { 1097 maxsync = AHC_SYNCRATE_ULTRA2; 1098 } else { 1099 maxsync = AHC_SYNCRATE_ULTRA; 1100 } 1101 } else if ((ahc->features & AHC_ULTRA) != 0) { 1102 maxsync = AHC_SYNCRATE_ULTRA; 1103 } else { 1104 maxsync = AHC_SYNCRATE_FAST; 1105 } 1106 return (ahc_find_syncrate(ahc, period, maxsync)); 1107 } 1108 1109 /* 1110 * Look up the valid period to SCSIRATE conversion in our table. 1111 * Return the period and offset that should be sent to the target 1112 * if this was the beginning of an SDTR. 1113 */ 1114 static struct ahc_syncrate * 1115 ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, u_int maxsync) 1116 { 1117 struct ahc_syncrate *syncrate; 1118 1119 syncrate = &ahc_syncrates[maxsync]; 1120 while ((syncrate->rate != NULL) 1121 && ((ahc->features & AHC_ULTRA2) == 0 1122 || (syncrate->sxfr_u2 != 0))) { 1123 1124 if (*period <= syncrate->period) { 1125 /* 1126 * When responding to a target that requests 1127 * sync, the requested rate may fall between 1128 * two rates that we can output, but still be 1129 * a rate that we can receive. Because of this, 1130 * we want to respond to the target with 1131 * the same rate that it sent to us even 1132 * if the period we use to send data to it 1133 * is lower. Only lower the response period 1134 * if we must. 1135 */ 1136 if (syncrate == &ahc_syncrates[maxsync]) 1137 *period = syncrate->period; 1138 break; 1139 } 1140 syncrate++; 1141 } 1142 1143 if ((*period == 0) 1144 || (syncrate->rate == NULL) 1145 || ((ahc->features & AHC_ULTRA2) != 0 1146 && (syncrate->sxfr_u2 == 0))) { 1147 /* Use asynchronous transfers. */ 1148 *period = 0; 1149 syncrate = NULL; 1150 } 1151 return (syncrate); 1152 } 1153 1154 static u_int 1155 ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync) 1156 { 1157 struct ahc_syncrate *syncrate; 1158 1159 if ((ahc->features & AHC_ULTRA2) != 0) 1160 scsirate &= SXFR_ULTRA2; 1161 else 1162 scsirate &= SXFR; 1163 1164 syncrate = &ahc_syncrates[maxsync]; 1165 while (syncrate->rate != NULL) { 1166 1167 if ((ahc->features & AHC_ULTRA2) != 0) { 1168 if (syncrate->sxfr_u2 == 0) 1169 break; 1170 else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2)) 1171 return (syncrate->period); 1172 } else if (scsirate == (syncrate->sxfr & SXFR)) { 1173 return (syncrate->period); 1174 } 1175 syncrate++; 1176 } 1177 return (0); /* async */ 1178 } 1179 1180 static void 1181 ahc_validate_offset(struct ahc_softc *ahc, struct ahc_syncrate *syncrate, 1182 u_int *offset, int wide) 1183 { 1184 u_int maxoffset; 1185 1186 /* Limit offset to what we can do */ 1187 if (syncrate == NULL) { 1188 maxoffset = 0; 1189 } else if ((ahc->features & AHC_ULTRA2) != 0) { 1190 maxoffset = MAX_OFFSET_ULTRA2; 1191 } else { 1192 if (wide) 1193 maxoffset = MAX_OFFSET_16BIT; 1194 else 1195 maxoffset = MAX_OFFSET_8BIT; 1196 } 1197 *offset = MIN(*offset, maxoffset); 1198 } 1199 1200 static void 1201 ahc_update_target_msg_request(struct ahc_softc *ahc, 1202 struct ahc_devinfo *devinfo, 1203 struct ahc_initiator_tinfo *tinfo, 1204 int force, int paused) 1205 { 1206 u_int targ_msg_req_orig; 1207 1208 targ_msg_req_orig = ahc->targ_msg_req; 1209 if (tinfo->current.period != tinfo->goal.period 1210 || tinfo->current.width != tinfo->goal.width 1211 || tinfo->current.offset != tinfo->goal.offset 1212 || (force 1213 && (tinfo->goal.period != 0 1214 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT))) 1215 ahc->targ_msg_req |= devinfo->target_mask; 1216 else 1217 ahc->targ_msg_req &= ~devinfo->target_mask; 1218 1219 if (ahc->targ_msg_req != targ_msg_req_orig) { 1220 /* Update the message request bit for this target */ 1221 if ((ahc->features & AHC_HS_MAILBOX) != 0) { 1222 if (paused) { 1223 ahc_outb(ahc, TARGET_MSG_REQUEST, 1224 ahc->targ_msg_req & 0xFF); 1225 ahc_outb(ahc, TARGET_MSG_REQUEST + 1, 1226 (ahc->targ_msg_req >> 8) & 0xFF); 1227 } else { 1228 ahc_outb(ahc, HS_MAILBOX, 1229 0x01 << HOST_MAILBOX_SHIFT); 1230 } 1231 } else { 1232 if (!paused) 1233 pause_sequencer(ahc); 1234 1235 ahc_outb(ahc, TARGET_MSG_REQUEST, 1236 ahc->targ_msg_req & 0xFF); 1237 ahc_outb(ahc, TARGET_MSG_REQUEST + 1, 1238 (ahc->targ_msg_req >> 8) & 0xFF); 1239 1240 if (!paused) 1241 unpause_sequencer(ahc); 1242 } 1243 } 1244 } 1245 1246 static int 1247 ahc_create_path(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1248 struct cam_path **path) 1249 { 1250 path_id_t path_id; 1251 1252 if (devinfo->channel == 'B') 1253 path_id = cam_sim_path(ahc->sim_b); 1254 else 1255 path_id = cam_sim_path(ahc->sim); 1256 1257 return (xpt_create_path(path, /*periph*/NULL, 1258 path_id, devinfo->target, 1259 devinfo->lun)); 1260 } 1261 1262 static void 1263 ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1264 struct cam_path *path, struct ahc_syncrate *syncrate, 1265 u_int period, u_int offset, u_int type, int paused) 1266 { 1267 struct ahc_initiator_tinfo *tinfo; 1268 struct tmode_tstate *tstate; 1269 u_int old_period; 1270 u_int old_offset; 1271 int active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 1272 1273 if (syncrate == NULL) { 1274 period = 0; 1275 offset = 0; 1276 } 1277 1278 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1279 devinfo->target, &tstate); 1280 old_period = tinfo->current.period; 1281 old_offset = tinfo->current.offset; 1282 1283 if ((type & AHC_TRANS_CUR) != 0 1284 && (old_period != period || old_offset != offset)) { 1285 struct cam_path *path2; 1286 u_int scsirate; 1287 1288 scsirate = tinfo->scsirate; 1289 if ((ahc->features & AHC_ULTRA2) != 0) { 1290 1291 /* XXX */ 1292 /* Force single edge until DT is fully implemented */ 1293 scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC); 1294 if (syncrate != NULL) 1295 scsirate |= syncrate->sxfr_u2|SINGLE_EDGE; 1296 1297 if (active) 1298 ahc_outb(ahc, SCSIOFFSET, offset); 1299 } else { 1300 1301 scsirate &= ~(SXFR|SOFS); 1302 /* 1303 * Ensure Ultra mode is set properly for 1304 * this target. 1305 */ 1306 tstate->ultraenb &= ~devinfo->target_mask; 1307 if (syncrate != NULL) { 1308 if (syncrate->sxfr & ULTRA_SXFR) { 1309 tstate->ultraenb |= 1310 devinfo->target_mask; 1311 } 1312 scsirate |= syncrate->sxfr & SXFR; 1313 scsirate |= offset & SOFS; 1314 } 1315 if (active) { 1316 u_int sxfrctl0; 1317 1318 sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 1319 sxfrctl0 &= ~FAST20; 1320 if (tstate->ultraenb & devinfo->target_mask) 1321 sxfrctl0 |= FAST20; 1322 ahc_outb(ahc, SXFRCTL0, sxfrctl0); 1323 } 1324 } 1325 if (active) 1326 ahc_outb(ahc, SCSIRATE, scsirate); 1327 1328 tinfo->scsirate = scsirate; 1329 tinfo->current.period = period; 1330 tinfo->current.offset = offset; 1331 1332 /* Update the syncrates in any pending scbs */ 1333 ahc_update_pending_syncrates(ahc); 1334 1335 /* 1336 * If possible, tell the SCSI layer about the 1337 * new transfer parameters. 1338 */ 1339 /* If possible, update the XPT's notion of our transfer rate */ 1340 path2 = NULL; 1341 if (path == NULL) { 1342 int error; 1343 1344 error = ahc_create_path(ahc, devinfo, &path2); 1345 if (error == CAM_REQ_CMP) 1346 path = path2; 1347 else 1348 path2 = NULL; 1349 } 1350 1351 if (path != NULL) { 1352 struct ccb_trans_settings neg; 1353 1354 neg.sync_period = period; 1355 neg.sync_offset = offset; 1356 neg.valid = CCB_TRANS_SYNC_RATE_VALID 1357 | CCB_TRANS_SYNC_OFFSET_VALID; 1358 xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1); 1359 xpt_async(AC_TRANSFER_NEG, path, &neg); 1360 } 1361 1362 if (path2 != NULL) 1363 xpt_free_path(path2); 1364 1365 if (bootverbose) { 1366 if (offset != 0) { 1367 printf("%s: target %d synchronous at %sMHz, " 1368 "offset = 0x%x\n", ahc_name(ahc), 1369 devinfo->target, syncrate->rate, offset); 1370 } else { 1371 printf("%s: target %d using " 1372 "asynchronous transfers\n", 1373 ahc_name(ahc), devinfo->target); 1374 } 1375 } 1376 } 1377 1378 if ((type & AHC_TRANS_GOAL) != 0) { 1379 tinfo->goal.period = period; 1380 tinfo->goal.offset = offset; 1381 } 1382 1383 if ((type & AHC_TRANS_USER) != 0) { 1384 tinfo->user.period = period; 1385 tinfo->user.offset = offset; 1386 } 1387 1388 ahc_update_target_msg_request(ahc, devinfo, tinfo, 1389 /*force*/FALSE, 1390 paused); 1391 } 1392 1393 static void 1394 ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1395 struct cam_path *path, u_int width, u_int type, int paused) 1396 { 1397 struct ahc_initiator_tinfo *tinfo; 1398 struct tmode_tstate *tstate; 1399 u_int oldwidth; 1400 int active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 1401 1402 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1403 devinfo->target, &tstate); 1404 oldwidth = tinfo->current.width; 1405 1406 if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) { 1407 struct cam_path *path2; 1408 u_int scsirate; 1409 1410 scsirate = tinfo->scsirate; 1411 scsirate &= ~WIDEXFER; 1412 if (width == MSG_EXT_WDTR_BUS_16_BIT) 1413 scsirate |= WIDEXFER; 1414 1415 tinfo->scsirate = scsirate; 1416 1417 if (active) 1418 ahc_outb(ahc, SCSIRATE, scsirate); 1419 1420 tinfo->current.width = width; 1421 1422 /* If possible, update the XPT's notion of our transfer rate */ 1423 path2 = NULL; 1424 if (path == NULL) { 1425 int error; 1426 1427 error = ahc_create_path(ahc, devinfo, &path2); 1428 if (error == CAM_REQ_CMP) 1429 path = path2; 1430 else 1431 path2 = NULL; 1432 } 1433 1434 if (path != NULL) { 1435 struct ccb_trans_settings neg; 1436 1437 neg.bus_width = width; 1438 neg.valid = CCB_TRANS_BUS_WIDTH_VALID; 1439 xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1); 1440 xpt_async(AC_TRANSFER_NEG, path, &neg); 1441 } 1442 1443 if (path2 != NULL) 1444 xpt_free_path(path2); 1445 1446 if (bootverbose) { 1447 printf("%s: target %d using %dbit transfers\n", 1448 ahc_name(ahc), devinfo->target, 1449 8 * (0x01 << width)); 1450 } 1451 } 1452 if ((type & AHC_TRANS_GOAL) != 0) 1453 tinfo->goal.width = width; 1454 if ((type & AHC_TRANS_USER) != 0) 1455 tinfo->user.width = width; 1456 1457 ahc_update_target_msg_request(ahc, devinfo, tinfo, 1458 /*force*/FALSE, paused); 1459 } 1460 1461 static void 1462 ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, int enable) 1463 { 1464 struct ahc_initiator_tinfo *tinfo; 1465 struct tmode_tstate *tstate; 1466 1467 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1468 devinfo->target, &tstate); 1469 1470 if (enable) 1471 tstate->tagenable |= devinfo->target_mask; 1472 else 1473 tstate->tagenable &= ~devinfo->target_mask; 1474 } 1475 1476 /* 1477 * Attach all the sub-devices we can find 1478 */ 1479 int 1480 ahc_attach(struct ahc_softc *ahc) 1481 { 1482 struct ccb_setasync csa; 1483 struct cam_devq *devq; 1484 int bus_id; 1485 int bus_id2; 1486 struct cam_sim *sim; 1487 struct cam_sim *sim2; 1488 struct cam_path *path; 1489 struct cam_path *path2; 1490 int count; 1491 int s; 1492 int error; 1493 1494 count = 0; 1495 sim = NULL; 1496 sim2 = NULL; 1497 1498 s = splcam(); 1499 /* Hook up our interrupt handler */ 1500 if ((error = bus_setup_intr(ahc->device, ahc->irq, INTR_TYPE_CAM, 1501 ahc_intr, ahc, &ahc->ih)) != 0) { 1502 device_printf(ahc->device, "bus_setup_intr() failed: %d\n", 1503 error); 1504 goto fail; 1505 } 1506 1507 /* 1508 * Attach secondary channel first if the user has 1509 * declared it the primary channel. 1510 */ 1511 if ((ahc->flags & AHC_CHANNEL_B_PRIMARY) != 0) { 1512 bus_id = 1; 1513 bus_id2 = 0; 1514 } else { 1515 bus_id = 0; 1516 bus_id2 = 1; 1517 } 1518 1519 /* 1520 * Create the device queue for our SIM(s). 1521 */ 1522 devq = cam_simq_alloc(AHC_SCB_MAX); 1523 if (devq == NULL) 1524 goto fail; 1525 1526 /* 1527 * Construct our first channel SIM entry 1528 */ 1529 sim = cam_sim_alloc(ahc_action, ahc_poll, "ahc", ahc, ahc->unit, 1530 1, AHC_SCB_MAX, devq); 1531 if (sim == NULL) { 1532 cam_simq_free(devq); 1533 goto fail; 1534 } 1535 1536 if (xpt_bus_register(sim, bus_id) != CAM_SUCCESS) { 1537 cam_sim_free(sim, /*free_devq*/TRUE); 1538 sim = NULL; 1539 goto fail; 1540 } 1541 1542 if (xpt_create_path(&path, /*periph*/NULL, 1543 cam_sim_path(sim), CAM_TARGET_WILDCARD, 1544 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1545 xpt_bus_deregister(cam_sim_path(sim)); 1546 cam_sim_free(sim, /*free_devq*/TRUE); 1547 sim = NULL; 1548 goto fail; 1549 } 1550 1551 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); 1552 csa.ccb_h.func_code = XPT_SASYNC_CB; 1553 csa.event_enable = AC_LOST_DEVICE; 1554 csa.callback = ahc_async; 1555 csa.callback_arg = sim; 1556 xpt_action((union ccb *)&csa); 1557 count++; 1558 1559 if (ahc->features & AHC_TWIN) { 1560 sim2 = cam_sim_alloc(ahc_action, ahc_poll, "ahc", 1561 ahc, ahc->unit, 1, 1562 AHC_SCB_MAX, devq); 1563 1564 if (sim2 == NULL) { 1565 printf("ahc_attach: Unable to attach second " 1566 "bus due to resource shortage"); 1567 goto fail; 1568 } 1569 1570 if (xpt_bus_register(sim2, bus_id2) != CAM_SUCCESS) { 1571 printf("ahc_attach: Unable to attach second " 1572 "bus due to resource shortage"); 1573 /* 1574 * We do not want to destroy the device queue 1575 * because the first bus is using it. 1576 */ 1577 cam_sim_free(sim2, /*free_devq*/FALSE); 1578 goto fail; 1579 } 1580 1581 if (xpt_create_path(&path2, /*periph*/NULL, 1582 cam_sim_path(sim2), 1583 CAM_TARGET_WILDCARD, 1584 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1585 xpt_bus_deregister(cam_sim_path(sim2)); 1586 cam_sim_free(sim2, /*free_devq*/FALSE); 1587 sim2 = NULL; 1588 goto fail; 1589 } 1590 xpt_setup_ccb(&csa.ccb_h, path2, /*priority*/5); 1591 csa.ccb_h.func_code = XPT_SASYNC_CB; 1592 csa.event_enable = AC_LOST_DEVICE; 1593 csa.callback = ahc_async; 1594 csa.callback_arg = sim2; 1595 xpt_action((union ccb *)&csa); 1596 count++; 1597 } 1598 1599 fail: 1600 if ((ahc->flags & AHC_CHANNEL_B_PRIMARY) != 0) { 1601 ahc->sim_b = sim; 1602 ahc->path_b = path; 1603 ahc->sim = sim2; 1604 ahc->path = path2; 1605 } else { 1606 ahc->sim = sim; 1607 ahc->path = path; 1608 ahc->sim_b = sim2; 1609 ahc->path_b = path2; 1610 } 1611 splx(s); 1612 return (count); 1613 } 1614 1615 #if UNUSED 1616 static void 1617 ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1618 struct scb *scb) 1619 { 1620 role_t role; 1621 int our_id; 1622 1623 if (scb->ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1624 our_id = scb->ccb->ccb_h.target_id; 1625 role = ROLE_TARGET; 1626 } else { 1627 our_id = SCB_CHANNEL(scb) == 'B' ? ahc->our_id_b : ahc->our_id; 1628 role = ROLE_INITIATOR; 1629 } 1630 ahc_compile_devinfo(devinfo, our_id, SCB_TARGET(scb), 1631 SCB_LUN(scb), SCB_CHANNEL(scb), role); 1632 } 1633 #endif 1634 1635 static void 1636 ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 1637 { 1638 u_int saved_tcl; 1639 role_t role; 1640 int our_id; 1641 1642 if (ahc_inb(ahc, SSTAT0) & TARGET) 1643 role = ROLE_TARGET; 1644 else 1645 role = ROLE_INITIATOR; 1646 1647 if (role == ROLE_TARGET 1648 && (ahc->features & AHC_MULTI_TID) != 0 1649 && (ahc_inb(ahc, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) { 1650 /* We were selected, so pull our id from TARGIDIN */ 1651 our_id = ahc_inb(ahc, TARGIDIN) & OID; 1652 } else if ((ahc->features & AHC_ULTRA2) != 0) 1653 our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID; 1654 else 1655 our_id = ahc_inb(ahc, SCSIID) & OID; 1656 1657 saved_tcl = ahc_inb(ahc, SAVED_TCL); 1658 ahc_compile_devinfo(devinfo, our_id, TCL_TARGET(saved_tcl), 1659 TCL_LUN(saved_tcl), TCL_CHANNEL(ahc, saved_tcl), 1660 role); 1661 } 1662 1663 static void 1664 ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target, 1665 u_int lun, char channel, role_t role) 1666 { 1667 devinfo->our_scsiid = our_id; 1668 devinfo->target = target; 1669 devinfo->lun = lun; 1670 devinfo->target_offset = target; 1671 devinfo->channel = channel; 1672 devinfo->role = role; 1673 if (channel == 'B') 1674 devinfo->target_offset += 8; 1675 devinfo->target_mask = (0x01 << devinfo->target_offset); 1676 } 1677 1678 /* 1679 * Catch an interrupt from the adapter 1680 */ 1681 void 1682 ahc_intr(void *arg) 1683 { 1684 struct ahc_softc *ahc; 1685 u_int intstat; 1686 1687 ahc = (struct ahc_softc *)arg; 1688 1689 intstat = ahc_inb(ahc, INTSTAT); 1690 1691 /* 1692 * Any interrupts to process? 1693 */ 1694 #if NPCI > 0 1695 if ((intstat & INT_PEND) == 0) { 1696 if ((ahc->chip & AHC_PCI) != 0 1697 && (ahc->unsolicited_ints > 500)) { 1698 if ((ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0) 1699 ahc_pci_intr(ahc); 1700 ahc->unsolicited_ints = 0; 1701 } else { 1702 ahc->unsolicited_ints++; 1703 } 1704 return; 1705 } else { 1706 ahc->unsolicited_ints = 0; 1707 } 1708 #else 1709 if ((intstat & INT_PEND) == 0) 1710 return; 1711 #endif 1712 1713 if (intstat & CMDCMPLT) { 1714 ahc_outb(ahc, CLRINT, CLRCMDINT); 1715 ahc_run_qoutfifo(ahc); 1716 if ((ahc->flags & AHC_TARGETMODE) != 0) { 1717 ahc_run_tqinfifo(ahc, /*paused*/FALSE); 1718 } 1719 } 1720 if (intstat & BRKADRINT) { 1721 /* 1722 * We upset the sequencer :-( 1723 * Lookup the error message 1724 */ 1725 int i, error, num_errors; 1726 1727 error = ahc_inb(ahc, ERROR); 1728 num_errors = sizeof(hard_error)/sizeof(hard_error[0]); 1729 for (i = 0; error != 1 && i < num_errors; i++) 1730 error >>= 1; 1731 panic("%s: brkadrint, %s at seqaddr = 0x%x\n", 1732 ahc_name(ahc), hard_error[i].errmesg, 1733 ahc_inb(ahc, SEQADDR0) | 1734 (ahc_inb(ahc, SEQADDR1) << 8)); 1735 1736 /* Tell everyone that this HBA is no longer availible */ 1737 ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS, 1738 CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, 1739 CAM_NO_HBA); 1740 } 1741 if (intstat & SEQINT) 1742 ahc_handle_seqint(ahc, intstat); 1743 1744 if (intstat & SCSIINT) 1745 ahc_handle_scsiint(ahc, intstat); 1746 } 1747 1748 static struct tmode_tstate * 1749 ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel) 1750 { 1751 struct tmode_tstate *master_tstate; 1752 struct tmode_tstate *tstate; 1753 int i, s; 1754 1755 master_tstate = ahc->enabled_targets[ahc->our_id]; 1756 if (channel == 'B') { 1757 scsi_id += 8; 1758 master_tstate = ahc->enabled_targets[ahc->our_id_b + 8]; 1759 } 1760 if (ahc->enabled_targets[scsi_id] != NULL 1761 && ahc->enabled_targets[scsi_id] != master_tstate) 1762 panic("%s: ahc_alloc_tstate - Target already allocated", 1763 ahc_name(ahc)); 1764 tstate = malloc(sizeof(*tstate), M_DEVBUF, M_NOWAIT); 1765 if (tstate == NULL) 1766 return (NULL); 1767 1768 /* 1769 * If we have allocated a master tstate, copy user settings from 1770 * the master tstate (taken from SRAM or the EEPROM) for this 1771 * channel, but reset our current and goal settings to async/narrow 1772 * until an initiator talks to us. 1773 */ 1774 if (master_tstate != NULL) { 1775 bcopy(master_tstate, tstate, sizeof(*tstate)); 1776 bzero(tstate->enabled_luns, sizeof(tstate->enabled_luns)); 1777 tstate->ultraenb = 0; 1778 for (i = 0; i < 16; i++) { 1779 bzero(&tstate->transinfo[i].current, 1780 sizeof(tstate->transinfo[i].current)); 1781 bzero(&tstate->transinfo[i].goal, 1782 sizeof(tstate->transinfo[i].goal)); 1783 } 1784 } else 1785 bzero(tstate, sizeof(*tstate)); 1786 s = splcam(); 1787 ahc->enabled_targets[scsi_id] = tstate; 1788 splx(s); 1789 return (tstate); 1790 } 1791 1792 static void 1793 ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force) 1794 { 1795 struct tmode_tstate *tstate; 1796 1797 /* Don't clean up the entry for our initiator role */ 1798 if ((ahc->flags & AHC_INITIATORMODE) != 0 1799 && ((channel == 'B' && scsi_id == ahc->our_id_b) 1800 || (channel == 'A' && scsi_id == ahc->our_id)) 1801 && force == FALSE) 1802 return; 1803 1804 if (channel == 'B') 1805 scsi_id += 8; 1806 tstate = ahc->enabled_targets[scsi_id]; 1807 if (tstate != NULL) 1808 free(tstate, M_DEVBUF); 1809 ahc->enabled_targets[scsi_id] = NULL; 1810 } 1811 1812 static void 1813 ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) 1814 { 1815 struct tmode_tstate *tstate; 1816 struct tmode_lstate *lstate; 1817 struct ccb_en_lun *cel; 1818 cam_status status; 1819 int target; 1820 int lun; 1821 u_int target_mask; 1822 char channel; 1823 int s; 1824 1825 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate, 1826 /* notfound_failure*/FALSE); 1827 1828 if (status != CAM_REQ_CMP) { 1829 ccb->ccb_h.status = status; 1830 return; 1831 } 1832 1833 cel = &ccb->cel; 1834 target = ccb->ccb_h.target_id; 1835 lun = ccb->ccb_h.target_lun; 1836 channel = SIM_CHANNEL(ahc, sim); 1837 target_mask = 0x01 << target; 1838 if (channel == 'B') 1839 target_mask <<= 8; 1840 1841 if (cel->enable != 0) { 1842 u_int scsiseq; 1843 1844 /* Are we already enabled?? */ 1845 if (lstate != NULL) { 1846 xpt_print_path(ccb->ccb_h.path); 1847 printf("Lun already enabled\n"); 1848 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 1849 return; 1850 } 1851 1852 if (cel->grp6_len != 0 1853 || cel->grp7_len != 0) { 1854 /* 1855 * Don't (yet?) support vendor 1856 * specific commands. 1857 */ 1858 ccb->ccb_h.status = CAM_REQ_INVALID; 1859 printf("Non-zero Group Codes\n"); 1860 return; 1861 } 1862 1863 /* 1864 * Seems to be okay. 1865 * Setup our data structures. 1866 */ 1867 if (target != CAM_TARGET_WILDCARD && tstate == NULL) { 1868 tstate = ahc_alloc_tstate(ahc, target, channel); 1869 if (tstate == NULL) { 1870 xpt_print_path(ccb->ccb_h.path); 1871 printf("Couldn't allocate tstate\n"); 1872 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1873 return; 1874 } 1875 } 1876 lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT); 1877 if (lstate == NULL) { 1878 xpt_print_path(ccb->ccb_h.path); 1879 printf("Couldn't allocate lstate\n"); 1880 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1881 return; 1882 } 1883 bzero(lstate, sizeof(*lstate)); 1884 status = xpt_create_path(&lstate->path, /*periph*/NULL, 1885 xpt_path_path_id(ccb->ccb_h.path), 1886 xpt_path_target_id(ccb->ccb_h.path), 1887 xpt_path_lun_id(ccb->ccb_h.path)); 1888 if (status != CAM_REQ_CMP) { 1889 free(lstate, M_DEVBUF); 1890 xpt_print_path(ccb->ccb_h.path); 1891 printf("Couldn't allocate path\n"); 1892 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1893 return; 1894 } 1895 SLIST_INIT(&lstate->accept_tios); 1896 SLIST_INIT(&lstate->immed_notifies); 1897 s = splcam(); 1898 pause_sequencer(ahc); 1899 if (target != CAM_TARGET_WILDCARD) { 1900 tstate->enabled_luns[lun] = lstate; 1901 ahc->enabled_luns++; 1902 1903 if ((ahc->features & AHC_MULTI_TID) != 0) { 1904 u_int16_t targid_mask; 1905 1906 targid_mask = ahc_inb(ahc, TARGID) 1907 | (ahc_inb(ahc, TARGID + 1) << 8); 1908 1909 targid_mask |= target_mask; 1910 ahc_outb(ahc, TARGID, targid_mask); 1911 ahc_outb(ahc, TARGID+1, (targid_mask >> 8)); 1912 } else { 1913 int our_id; 1914 char channel; 1915 1916 channel = SIM_CHANNEL(ahc, sim); 1917 our_id = SIM_SCSI_ID(ahc, sim); 1918 1919 /* 1920 * This can only happen if selections 1921 * are not enabled 1922 */ 1923 if (target != our_id) { 1924 u_int sblkctl; 1925 char cur_channel; 1926 int swap; 1927 1928 sblkctl = ahc_inb(ahc, SBLKCTL); 1929 cur_channel = (sblkctl & SELBUSB) 1930 ? 'B' : 'A'; 1931 if ((ahc->features & AHC_TWIN) == 0) 1932 cur_channel = 'A'; 1933 swap = cur_channel != channel; 1934 if (channel == 'A') 1935 ahc->our_id = target; 1936 else 1937 ahc->our_id_b = target; 1938 1939 if (swap) 1940 ahc_outb(ahc, SBLKCTL, 1941 sblkctl ^ SELBUSB); 1942 1943 ahc_outb(ahc, SCSIID, target); 1944 1945 if (swap) 1946 ahc_outb(ahc, SBLKCTL, sblkctl); 1947 } 1948 } 1949 } else 1950 ahc->black_hole = lstate; 1951 /* Allow select-in operations */ 1952 if (ahc->black_hole != NULL && ahc->enabled_luns > 0) { 1953 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 1954 scsiseq |= ENSELI; 1955 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 1956 scsiseq = ahc_inb(ahc, SCSISEQ); 1957 scsiseq |= ENSELI; 1958 ahc_outb(ahc, SCSISEQ, scsiseq); 1959 } 1960 unpause_sequencer(ahc); 1961 splx(s); 1962 ccb->ccb_h.status = CAM_REQ_CMP; 1963 xpt_print_path(ccb->ccb_h.path); 1964 printf("Lun now enabled for target mode\n"); 1965 } else { 1966 struct ccb_hdr *elm; 1967 1968 if (lstate == NULL) { 1969 ccb->ccb_h.status = CAM_LUN_INVALID; 1970 return; 1971 } 1972 1973 s = splcam(); 1974 ccb->ccb_h.status = CAM_REQ_CMP; 1975 LIST_FOREACH(elm, &ahc->pending_ccbs, sim_links.le) { 1976 if (elm->func_code == XPT_CONT_TARGET_IO 1977 && !xpt_path_comp(elm->path, ccb->ccb_h.path)){ 1978 printf("CTIO pending\n"); 1979 ccb->ccb_h.status = CAM_REQ_INVALID; 1980 splx(s); 1981 return; 1982 } 1983 } 1984 1985 if (SLIST_FIRST(&lstate->accept_tios) != NULL) { 1986 printf("ATIOs pending\n"); 1987 ccb->ccb_h.status = CAM_REQ_INVALID; 1988 } 1989 1990 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { 1991 printf("INOTs pending\n"); 1992 ccb->ccb_h.status = CAM_REQ_INVALID; 1993 } 1994 1995 if (ccb->ccb_h.status == CAM_REQ_CMP) { 1996 int i, empty; 1997 1998 xpt_print_path(ccb->ccb_h.path); 1999 printf("Target mode disabled\n"); 2000 xpt_free_path(lstate->path); 2001 free(lstate, M_DEVBUF); 2002 2003 pause_sequencer(ahc); 2004 /* Can we clean up the target too? */ 2005 if (target != CAM_TARGET_WILDCARD) { 2006 tstate->enabled_luns[lun] = NULL; 2007 ahc->enabled_luns--; 2008 for (empty = 1, i = 0; i < 8; i++) 2009 if (tstate->enabled_luns[i] != NULL) { 2010 empty = 0; 2011 break; 2012 } 2013 2014 if (empty) { 2015 ahc_free_tstate(ahc, target, channel, 2016 /*force*/FALSE); 2017 if (ahc->features & AHC_MULTI_TID) { 2018 u_int16_t targid_mask; 2019 2020 targid_mask = 2021 ahc_inb(ahc, TARGID) 2022 | (ahc_inb(ahc, TARGID + 1) 2023 << 8); 2024 2025 targid_mask &= ~target_mask; 2026 ahc_outb(ahc, TARGID, 2027 targid_mask); 2028 ahc_outb(ahc, TARGID+1, 2029 (targid_mask >> 8)); 2030 } 2031 } 2032 } else { 2033 2034 ahc->black_hole = NULL; 2035 2036 /* 2037 * We can't allow selections without 2038 * our black hole device. 2039 */ 2040 empty = TRUE; 2041 } 2042 if (ahc->enabled_luns == 0) { 2043 /* Disallow select-in */ 2044 u_int scsiseq; 2045 2046 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 2047 scsiseq &= ~ENSELI; 2048 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 2049 scsiseq = ahc_inb(ahc, SCSISEQ); 2050 scsiseq &= ~ENSELI; 2051 ahc_outb(ahc, SCSISEQ, scsiseq); 2052 } 2053 unpause_sequencer(ahc); 2054 } 2055 splx(s); 2056 } 2057 } 2058 2059 static int 2060 ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd) 2061 { 2062 struct tmode_tstate *tstate; 2063 struct tmode_lstate *lstate; 2064 struct ccb_accept_tio *atio; 2065 u_int8_t *byte; 2066 int initiator; 2067 int target; 2068 int lun; 2069 2070 initiator = cmd->initiator_channel >> 4; 2071 target = cmd->targ_id; 2072 lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); 2073 2074 byte = cmd->bytes; 2075 tstate = ahc->enabled_targets[target]; 2076 lstate = NULL; 2077 if (tstate != NULL && lun < 8) 2078 lstate = tstate->enabled_luns[lun]; 2079 2080 /* 2081 * Commands for disabled luns go to the black hole driver. 2082 */ 2083 if (lstate == NULL) { 2084 lstate = ahc->black_hole; 2085 atio = 2086 (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); 2087 } else { 2088 atio = 2089 (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); 2090 } 2091 if (atio == NULL) { 2092 ahc->flags |= AHC_TQINFIFO_BLOCKED; 2093 printf("No ATIOs for incoming command\n"); 2094 /* 2095 * Wait for more ATIOs from the peripheral driver for this lun. 2096 */ 2097 return (1); 2098 } else 2099 ahc->flags &= ~AHC_TQINFIFO_BLOCKED; 2100 SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); 2101 2102 if (lstate == ahc->black_hole) { 2103 /* Fill in the wildcards */ 2104 atio->ccb_h.target_id = target; 2105 atio->ccb_h.target_lun = lun; 2106 } 2107 2108 /* 2109 * Package it up and send it off to 2110 * whomever has this lun enabled. 2111 */ 2112 atio->init_id = initiator; 2113 if (byte[0] != 0xFF) { 2114 /* Tag was included */ 2115 atio->tag_action = *byte++; 2116 atio->tag_id = *byte++; 2117 atio->ccb_h.flags = CAM_TAG_ACTION_VALID; 2118 } else { 2119 byte++; 2120 atio->ccb_h.flags = 0; 2121 } 2122 2123 /* Okay. Now determine the cdb size based on the command code */ 2124 switch (*byte >> CMD_GROUP_CODE_SHIFT) { 2125 case 0: 2126 atio->cdb_len = 6; 2127 break; 2128 case 1: 2129 case 2: 2130 atio->cdb_len = 10; 2131 break; 2132 case 4: 2133 atio->cdb_len = 16; 2134 break; 2135 case 5: 2136 atio->cdb_len = 12; 2137 break; 2138 case 3: 2139 default: 2140 /* Only copy the opcode. */ 2141 atio->cdb_len = 1; 2142 printf("Reserved or VU command code type encountered\n"); 2143 break; 2144 } 2145 bcopy(byte, atio->cdb_io.cdb_bytes, atio->cdb_len); 2146 2147 atio->ccb_h.status |= CAM_CDB_RECVD; 2148 2149 if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { 2150 /* 2151 * We weren't allowed to disconnect. 2152 * We're hanging on the bus until a 2153 * continue target I/O comes in response 2154 * to this accept tio. 2155 */ 2156 #if 0 2157 printf("Received Immediate Command %d:%d:%d - %p\n", 2158 initiator, target, lun, ahc->pending_device); 2159 #endif 2160 ahc->pending_device = lstate; 2161 } 2162 xpt_done((union ccb*)atio); 2163 return (0); 2164 } 2165 2166 static void 2167 ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat) 2168 { 2169 struct scb *scb; 2170 struct ahc_devinfo devinfo; 2171 2172 ahc_fetch_devinfo(ahc, &devinfo); 2173 2174 /* 2175 * Clear the upper byte that holds SEQINT status 2176 * codes and clear the SEQINT bit. We will unpause 2177 * the sequencer, if appropriate, after servicing 2178 * the request. 2179 */ 2180 ahc_outb(ahc, CLRINT, CLRSEQINT); 2181 switch (intstat & SEQINT_MASK) { 2182 case NO_MATCH: 2183 { 2184 /* Ensure we don't leave the selection hardware on */ 2185 ahc_outb(ahc, SCSISEQ, 2186 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 2187 2188 printf("%s:%c:%d: no active SCB for reconnecting " 2189 "target - issuing BUS DEVICE RESET\n", 2190 ahc_name(ahc), devinfo.channel, devinfo.target); 2191 printf("SAVED_TCL == 0x%x, ARG_1 == 0x%x, SEQ_FLAGS == 0x%x\n", 2192 ahc_inb(ahc, SAVED_TCL), ahc_inb(ahc, ARG_1), 2193 ahc_inb(ahc, SEQ_FLAGS)); 2194 ahc->msgout_buf[0] = MSG_BUS_DEV_RESET; 2195 ahc->msgout_len = 1; 2196 ahc->msgout_index = 0; 2197 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2198 ahc_outb(ahc, MSG_OUT, HOST_MSG); 2199 ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, LASTPHASE) | ATNO); 2200 break; 2201 } 2202 case UPDATE_TMSG_REQ: 2203 ahc_outb(ahc, TARGET_MSG_REQUEST, ahc->targ_msg_req & 0xFF); 2204 ahc_outb(ahc, TARGET_MSG_REQUEST + 1, 2205 (ahc->targ_msg_req >> 8) & 0xFF); 2206 ahc_outb(ahc, HS_MAILBOX, 0); 2207 break; 2208 case SEND_REJECT: 2209 { 2210 u_int rejbyte = ahc_inb(ahc, ACCUM); 2211 printf("%s:%c:%d: Warning - unknown message received from " 2212 "target (0x%x). Rejecting\n", 2213 ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte); 2214 break; 2215 } 2216 case NO_IDENT: 2217 { 2218 /* 2219 * The reconnecting target either did not send an identify 2220 * message, or did, but we didn't find and SCB to match and 2221 * before it could respond to our ATN/abort, it hit a dataphase. 2222 * The only safe thing to do is to blow it away with a bus 2223 * reset. 2224 */ 2225 int found; 2226 2227 printf("%s:%c:%d: Target did not send an IDENTIFY message. " 2228 "LASTPHASE = 0x%x, SAVED_TCL == 0x%x\n", 2229 ahc_name(ahc), devinfo.channel, devinfo.target, 2230 ahc_inb(ahc, LASTPHASE), ahc_inb(ahc, SAVED_TCL)); 2231 found = ahc_reset_channel(ahc, devinfo.channel, 2232 /*initiate reset*/TRUE); 2233 printf("%s: Issued Channel %c Bus Reset. " 2234 "%d SCBs aborted\n", ahc_name(ahc), devinfo.channel, 2235 found); 2236 return; 2237 } 2238 case BAD_PHASE: 2239 { 2240 u_int lastphase; 2241 2242 lastphase = ahc_inb(ahc, LASTPHASE); 2243 if (lastphase == P_BUSFREE) { 2244 printf("%s:%c:%d: Missed busfree. Curphase = 0x%x\n", 2245 ahc_name(ahc), devinfo.channel, devinfo.target, 2246 ahc_inb(ahc, SCSISIGI)); 2247 restart_sequencer(ahc); 2248 return; 2249 } else { 2250 printf("%s:%c:%d: unknown scsi bus phase %x. " 2251 "Attempting to continue\n", 2252 ahc_name(ahc), devinfo.channel, devinfo.target, 2253 ahc_inb(ahc, SCSISIGI)); 2254 } 2255 break; 2256 } 2257 case BAD_STATUS: 2258 { 2259 u_int scb_index; 2260 struct hardware_scb *hscb; 2261 struct ccb_scsiio *csio; 2262 /* 2263 * The sequencer will notify us when a command 2264 * has an error that would be of interest to 2265 * the kernel. This allows us to leave the sequencer 2266 * running in the common case of command completes 2267 * without error. The sequencer will already have 2268 * dma'd the SCB back up to us, so we can reference 2269 * the in kernel copy directly. 2270 */ 2271 scb_index = ahc_inb(ahc, SCB_TAG); 2272 scb = &ahc->scb_data->scbarray[scb_index]; 2273 2274 /* 2275 * Set the default return value to 0 (don't 2276 * send sense). The sense code will change 2277 * this if needed. 2278 */ 2279 ahc_outb(ahc, RETURN_1, 0); 2280 if (!(scb_index < ahc->scb_data->numscbs 2281 && (scb->flags & SCB_ACTIVE) != 0)) { 2282 printf("%s:%c:%d: ahc_intr - referenced scb " 2283 "not valid during seqint 0x%x scb(%d)\n", 2284 ahc_name(ahc), devinfo.channel, 2285 devinfo.target, intstat, scb_index); 2286 goto unpause; 2287 } 2288 2289 hscb = scb->hscb; 2290 2291 /* Don't want to clobber the original sense code */ 2292 if ((scb->flags & SCB_SENSE) != 0) { 2293 /* 2294 * Clear the SCB_SENSE Flag and have 2295 * the sequencer do a normal command 2296 * complete. 2297 */ 2298 scb->flags &= ~SCB_SENSE; 2299 ahcsetccbstatus(scb->ccb, CAM_AUTOSENSE_FAIL); 2300 break; 2301 } 2302 ahcsetccbstatus(scb->ccb, CAM_SCSI_STATUS_ERROR); 2303 /* Freeze the queue unit the client sees the error. */ 2304 ahc_freeze_devq(ahc, scb->ccb->ccb_h.path); 2305 ahc_freeze_ccb(scb->ccb); 2306 csio = &scb->ccb->csio; 2307 csio->scsi_status = hscb->status; 2308 switch (hscb->status) { 2309 case SCSI_STATUS_OK: 2310 printf("%s: Interrupted for staus of 0???\n", 2311 ahc_name(ahc)); 2312 break; 2313 case SCSI_STATUS_CMD_TERMINATED: 2314 case SCSI_STATUS_CHECK_COND: 2315 #ifdef AHC_DEBUG 2316 if (ahc_debug & AHC_SHOWSENSE) { 2317 xpt_print_path(csio->ccb_h.path); 2318 printf("SCB %d: requests Check Status\n", 2319 scb->hscb->tag); 2320 } 2321 #endif 2322 2323 if ((csio->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) { 2324 struct ahc_dma_seg *sg; 2325 struct scsi_sense *sc; 2326 struct ahc_initiator_tinfo *tinfo; 2327 struct tmode_tstate *tstate; 2328 2329 sg = scb->sg_list; 2330 sc = (struct scsi_sense *)(&hscb->cmdstore); 2331 /* 2332 * Save off the residual if there is one. 2333 */ 2334 if (hscb->residual_SG_count != 0) 2335 ahc_calc_residual(scb); 2336 else 2337 scb->ccb->csio.resid = 0; 2338 2339 #ifdef AHC_DEBUG 2340 if (ahc_debug & AHC_SHOWSENSE) { 2341 xpt_print_path(csio->ccb_h.path); 2342 printf("Sending Sense\n"); 2343 } 2344 #endif 2345 sg->addr = ahc->scb_data->sense_busaddr 2346 + (hscb->tag*sizeof(struct scsi_sense_data)); 2347 sg->len = MIN(sizeof(struct scsi_sense_data), 2348 csio->sense_len); 2349 2350 sc->opcode = REQUEST_SENSE; 2351 sc->byte2 = SCB_LUN(scb) << 5; 2352 sc->unused[0] = 0; 2353 sc->unused[1] = 0; 2354 sc->length = sg->len; 2355 sc->control = 0; 2356 2357 /* 2358 * Would be nice to preserve DISCENB here, 2359 * but due to the way we page SCBs, we can't. 2360 */ 2361 hscb->control = 0; 2362 2363 /* 2364 * This request sense could be because the 2365 * the device lost power or in some other 2366 * way has lost our transfer negotiations. 2367 * Renegotiate if appropriate. Unit attention 2368 * errors will be reported before any data 2369 * phases occur. 2370 */ 2371 ahc_calc_residual(scb); 2372 if (scb->ccb->csio.resid 2373 == scb->ccb->csio.dxfer_len) { 2374 tinfo = ahc_fetch_transinfo(ahc, 2375 devinfo.channel, 2376 devinfo.our_scsiid, 2377 devinfo.target, 2378 &tstate); 2379 ahc_update_target_msg_request(ahc, 2380 &devinfo, 2381 tinfo, 2382 /*force*/TRUE, 2383 /*paused*/TRUE); 2384 } 2385 hscb->status = 0; 2386 hscb->SG_count = 1; 2387 hscb->SG_pointer = scb->sg_list_phys; 2388 hscb->data = sg->addr; 2389 hscb->datalen = sg->len; 2390 hscb->cmdpointer = hscb->cmdstore_busaddr; 2391 hscb->cmdlen = sizeof(*sc); 2392 scb->sg_count = hscb->SG_count; 2393 scb->flags |= SCB_SENSE; 2394 /* 2395 * Ensure the target is busy since this 2396 * will be an untagged request. 2397 */ 2398 ahc_busy_tcl(ahc, scb); 2399 ahc_outb(ahc, RETURN_1, SEND_SENSE); 2400 2401 /* 2402 * Ensure we have enough time to actually 2403 * retrieve the sense. 2404 */ 2405 untimeout(ahc_timeout, (caddr_t)scb, 2406 scb->ccb->ccb_h.timeout_ch); 2407 scb->ccb->ccb_h.timeout_ch = 2408 timeout(ahc_timeout, (caddr_t)scb, 5 * hz); 2409 } 2410 break; 2411 case SCSI_STATUS_BUSY: 2412 case SCSI_STATUS_QUEUE_FULL: 2413 /* 2414 * Requeue any transactions that haven't been 2415 * sent yet. 2416 */ 2417 ahc_freeze_devq(ahc, scb->ccb->ccb_h.path); 2418 ahc_freeze_ccb(scb->ccb); 2419 break; 2420 } 2421 break; 2422 } 2423 case TRACE_POINT: 2424 { 2425 printf("SSTAT2 = 0x%x DFCNTRL = 0x%x\n", ahc_inb(ahc, SSTAT2), 2426 ahc_inb(ahc, DFCNTRL)); 2427 printf("SSTAT3 = 0x%x DSTATUS = 0x%x\n", ahc_inb(ahc, SSTAT3), 2428 ahc_inb(ahc, DFSTATUS)); 2429 printf("SSTAT0 = 0x%x, SCB_DATACNT = 0x%x\n", 2430 ahc_inb(ahc, SSTAT0), 2431 ahc_inb(ahc, SCB_DATACNT)); 2432 break; 2433 } 2434 case HOST_MSG_LOOP: 2435 { 2436 /* 2437 * The sequencer has encountered a message phase 2438 * that requires host assistance for completion. 2439 * While handling the message phase(s), we will be 2440 * notified by the sequencer after each byte is 2441 * transfered so we can track bus phases. 2442 * 2443 * If this is the first time we've seen a HOST_MSG_LOOP, 2444 * initialize the state of the host message loop. 2445 */ 2446 if (ahc->msg_type == MSG_TYPE_NONE) { 2447 u_int bus_phase; 2448 2449 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 2450 if (bus_phase != P_MESGIN 2451 && bus_phase != P_MESGOUT) { 2452 printf("ahc_intr: HOST_MSG_LOOP bad " 2453 "phase 0x%x\n", 2454 bus_phase); 2455 /* 2456 * Probably transitioned to bus free before 2457 * we got here. Just punt the message. 2458 */ 2459 ahc_clear_intstat(ahc); 2460 restart_sequencer(ahc); 2461 } 2462 2463 if (devinfo.role == ROLE_INITIATOR) { 2464 struct scb *scb; 2465 u_int scb_index; 2466 2467 scb_index = ahc_inb(ahc, SCB_TAG); 2468 scb = &ahc->scb_data->scbarray[scb_index]; 2469 2470 if (bus_phase == P_MESGOUT) 2471 ahc_setup_initiator_msgout(ahc, 2472 &devinfo, 2473 scb); 2474 else { 2475 ahc->msg_type = 2476 MSG_TYPE_INITIATOR_MSGIN; 2477 ahc->msgin_index = 0; 2478 } 2479 } else { 2480 if (bus_phase == P_MESGOUT) { 2481 ahc->msg_type = 2482 MSG_TYPE_TARGET_MSGOUT; 2483 ahc->msgin_index = 0; 2484 } else 2485 /* XXX Ever executed??? */ 2486 ahc_setup_target_msgin(ahc, &devinfo); 2487 } 2488 } 2489 2490 /* Pass a NULL path so that handlers generate their own */ 2491 ahc_handle_message_phase(ahc, /*path*/NULL); 2492 break; 2493 } 2494 case PERR_DETECTED: 2495 { 2496 /* 2497 * If we've cleared the parity error interrupt 2498 * but the sequencer still believes that SCSIPERR 2499 * is true, it must be that the parity error is 2500 * for the currently presented byte on the bus, 2501 * and we are not in a phase (data-in) where we will 2502 * eventually ack this byte. Ack the byte and 2503 * throw it away in the hope that the target will 2504 * take us to message out to deliver the appropriate 2505 * error message. 2506 */ 2507 if ((intstat & SCSIINT) == 0 2508 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) { 2509 u_int curphase; 2510 2511 /* 2512 * The hardware will only let you ack bytes 2513 * if the expected phase in SCSISIGO matches 2514 * the current phase. Make sure this is 2515 * currently the case. 2516 */ 2517 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 2518 ahc_outb(ahc, LASTPHASE, curphase); 2519 ahc_outb(ahc, SCSISIGO, curphase); 2520 ahc_inb(ahc, SCSIDATL); 2521 } 2522 break; 2523 } 2524 case DATA_OVERRUN: 2525 { 2526 /* 2527 * When the sequencer detects an overrun, it 2528 * places the controller in "BITBUCKET" mode 2529 * and allows the target to complete its transfer. 2530 * Unfortunately, none of the counters get updated 2531 * when the controller is in this mode, so we have 2532 * no way of knowing how large the overrun was. 2533 */ 2534 u_int scbindex = ahc_inb(ahc, SCB_TAG); 2535 u_int lastphase = ahc_inb(ahc, LASTPHASE); 2536 int i; 2537 2538 scb = &ahc->scb_data->scbarray[scbindex]; 2539 for (i = 0; i < num_phases; i++) { 2540 if (lastphase == phase_table[i].phase) 2541 break; 2542 } 2543 xpt_print_path(scb->ccb->ccb_h.path); 2544 printf("data overrun detected %s." 2545 " Tag == 0x%x.\n", 2546 phase_table[i].phasemsg, 2547 scb->hscb->tag); 2548 xpt_print_path(scb->ccb->ccb_h.path); 2549 printf("%s seen Data Phase. Length = %d. NumSGs = %d.\n", 2550 ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", 2551 scb->ccb->csio.dxfer_len, scb->sg_count); 2552 if (scb->sg_count > 0) { 2553 for (i = 0; i < scb->sg_count; i++) { 2554 printf("sg[%d] - Addr 0x%x : Length %d\n", 2555 i, 2556 scb->sg_list[i].addr, 2557 scb->sg_list[i].len); 2558 } 2559 } 2560 /* 2561 * Set this and it will take affect when the 2562 * target does a command complete. 2563 */ 2564 ahc_freeze_devq(ahc, scb->ccb->ccb_h.path); 2565 ahcsetccbstatus(scb->ccb, CAM_DATA_RUN_ERR); 2566 ahc_freeze_ccb(scb->ccb); 2567 break; 2568 } 2569 case TRACEPOINT: 2570 { 2571 printf("TRACEPOINT: RETURN_2 = %d\n", ahc_inb(ahc, RETURN_2)); 2572 #if 0 2573 printf("SSTAT1 == 0x%x\n", ahc_inb(ahc, SSTAT1)); 2574 printf("SSTAT0 == 0x%x\n", ahc_inb(ahc, SSTAT0)); 2575 printf(", SCSISIGI == 0x%x\n", ahc_inb(ahc, SCSISIGI)); 2576 printf("TRACEPOINT: CCHCNT = %d, SG_COUNT = %d\n", 2577 ahc_inb(ahc, CCHCNT), ahc_inb(ahc, SG_COUNT)); 2578 printf("TRACEPOINT: SCB_TAG = %d\n", ahc_inb(ahc, SCB_TAG)); 2579 printf("TRACEPOINT1: CCHADDR = %d, CCHCNT = %d, SCBPTR = %d\n", 2580 ahc_inb(ahc, CCHADDR) 2581 | (ahc_inb(ahc, CCHADDR+1) << 8) 2582 | (ahc_inb(ahc, CCHADDR+2) << 16) 2583 | (ahc_inb(ahc, CCHADDR+3) << 24), 2584 ahc_inb(ahc, CCHCNT) 2585 | (ahc_inb(ahc, CCHCNT+1) << 8) 2586 | (ahc_inb(ahc, CCHCNT+2) << 16), 2587 ahc_inb(ahc, SCBPTR)); 2588 printf("TRACEPOINT: WAITING_SCBH = %d\n", ahc_inb(ahc, WAITING_SCBH)); 2589 printf("TRACEPOINT: SCB_TAG = %d\n", ahc_inb(ahc, SCB_TAG)); 2590 #endif 2591 break; 2592 } 2593 #if NOT_YET 2594 /* XXX Fill these in later */ 2595 case MESG_BUFFER_BUSY: 2596 break; 2597 case MSGIN_PHASEMIS: 2598 break; 2599 #endif 2600 default: 2601 printf("ahc_intr: seqint, " 2602 "intstat == 0x%x, scsisigi = 0x%x\n", 2603 intstat, ahc_inb(ahc, SCSISIGI)); 2604 break; 2605 } 2606 2607 unpause: 2608 /* 2609 * The sequencer is paused immediately on 2610 * a SEQINT, so we should restart it when 2611 * we're done. 2612 */ 2613 unpause_sequencer(ahc); 2614 } 2615 2616 static void 2617 ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat) 2618 { 2619 u_int scb_index; 2620 u_int status; 2621 struct scb *scb; 2622 char cur_channel; 2623 char intr_channel; 2624 2625 if ((ahc->features & AHC_TWIN) != 0 2626 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0)) 2627 cur_channel = 'B'; 2628 else 2629 cur_channel = 'A'; 2630 intr_channel = cur_channel; 2631 2632 status = ahc_inb(ahc, SSTAT1); 2633 if (status == 0) { 2634 if ((ahc->features & AHC_TWIN) != 0) { 2635 /* Try the other channel */ 2636 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 2637 status = ahc_inb(ahc, SSTAT1); 2638 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 2639 intr_channel = (cur_channel == 'A') ? 'B' : 'A'; 2640 } 2641 if (status == 0) { 2642 printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc)); 2643 return; 2644 } 2645 } 2646 2647 scb_index = ahc_inb(ahc, SCB_TAG); 2648 if (scb_index < ahc->scb_data->numscbs) { 2649 scb = &ahc->scb_data->scbarray[scb_index]; 2650 if ((scb->flags & SCB_ACTIVE) == 0 2651 || (ahc_inb(ahc, SEQ_FLAGS) & IDENTIFY_SEEN) == 0) 2652 scb = NULL; 2653 } else 2654 scb = NULL; 2655 2656 if ((status & SCSIRSTI) != 0) { 2657 printf("%s: Someone reset channel %c\n", 2658 ahc_name(ahc), intr_channel); 2659 ahc_reset_channel(ahc, intr_channel, /* Initiate Reset */FALSE); 2660 } else if ((status & SCSIPERR) != 0) { 2661 /* 2662 * Determine the bus phase and queue an appropriate message. 2663 * SCSIPERR is latched true as soon as a parity error 2664 * occurs. If the sequencer acked the transfer that 2665 * caused the parity error and the currently presented 2666 * transfer on the bus has correct parity, SCSIPERR will 2667 * be cleared by CLRSCSIPERR. Use this to determine if 2668 * we should look at the last phase the sequencer recorded, 2669 * or the current phase presented on the bus. 2670 */ 2671 u_int mesg_out; 2672 u_int curphase; 2673 u_int errorphase; 2674 u_int lastphase; 2675 int i; 2676 2677 lastphase = ahc_inb(ahc, LASTPHASE); 2678 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 2679 ahc_outb(ahc, CLRSINT1, CLRSCSIPERR); 2680 /* 2681 * For all phases save DATA, the sequencer won't 2682 * automatically ack a byte that has a parity error 2683 * in it. So the only way that the current phase 2684 * could be 'data-in' is if the parity error is for 2685 * an already acked byte in the data phase. During 2686 * synchronous data-in transfers, we may actually 2687 * ack bytes before latching the current phase in 2688 * LASTPHASE, leading to the discrepancy between 2689 * curphase and lastphase. 2690 */ 2691 if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0 2692 || curphase == P_DATAIN) 2693 errorphase = curphase; 2694 else 2695 errorphase = lastphase; 2696 2697 for (i = 0; i < num_phases; i++) { 2698 if (errorphase == phase_table[i].phase) 2699 break; 2700 } 2701 mesg_out = phase_table[i].mesg_out; 2702 if (scb != NULL) 2703 xpt_print_path(scb->ccb->ccb_h.path); 2704 else 2705 printf("%s:%c:%d: ", ahc_name(ahc), 2706 intr_channel, 2707 TCL_TARGET(ahc_inb(ahc, SAVED_TCL))); 2708 2709 printf("parity error detected %s. " 2710 "SEQADDR(0x%x) SCSIRATE(0x%x)\n", 2711 phase_table[i].phasemsg, 2712 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8), 2713 ahc_inb(ahc, SCSIRATE)); 2714 2715 /* 2716 * We've set the hardware to assert ATN if we 2717 * get a parity error on "in" phases, so all we 2718 * need to do is stuff the message buffer with 2719 * the appropriate message. "In" phases have set 2720 * mesg_out to something other than MSG_NOP. 2721 */ 2722 if (mesg_out != MSG_NOOP) { 2723 if (ahc->msg_type != MSG_TYPE_NONE) 2724 ahc->send_msg_perror = TRUE; 2725 else 2726 ahc_outb(ahc, MSG_OUT, mesg_out); 2727 } 2728 ahc_outb(ahc, CLRINT, CLRSCSIINT); 2729 unpause_sequencer(ahc); 2730 } else if ((status & BUSFREE) != 0 2731 && (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) { 2732 /* 2733 * First look at what phase we were last in. 2734 * If its message out, chances are pretty good 2735 * that the busfree was in response to one of 2736 * our abort requests. 2737 */ 2738 u_int lastphase = ahc_inb(ahc, LASTPHASE); 2739 u_int saved_tcl = ahc_inb(ahc, SAVED_TCL); 2740 u_int target = TCL_TARGET(saved_tcl); 2741 u_int initiator_role_id = TCL_SCSI_ID(ahc, saved_tcl); 2742 char channel = TCL_CHANNEL(ahc, saved_tcl); 2743 int printerror = 1; 2744 2745 ahc_outb(ahc, SCSISEQ, 2746 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 2747 if (lastphase == P_MESGOUT) { 2748 u_int message; 2749 u_int tag; 2750 2751 message = ahc->msgout_buf[ahc->msgout_index - 1]; 2752 tag = SCB_LIST_NULL; 2753 switch (message) { 2754 case MSG_ABORT_TAG: 2755 tag = scb->hscb->tag; 2756 /* FALLTRHOUGH */ 2757 case MSG_ABORT: 2758 xpt_print_path(scb->ccb->ccb_h.path); 2759 printf("SCB %d - Abort %s Completed.\n", 2760 scb->hscb->tag, tag == SCB_LIST_NULL ? 2761 "" : "Tag"); 2762 ahc_abort_scbs(ahc, target, channel, 2763 TCL_LUN(saved_tcl), tag, 2764 ROLE_INITIATOR, 2765 CAM_REQ_ABORTED); 2766 printerror = 0; 2767 break; 2768 case MSG_BUS_DEV_RESET: 2769 { 2770 struct ahc_devinfo devinfo; 2771 2772 /* 2773 * Don't mark the user's request for this BDR 2774 * as completing with CAM_BDR_SENT. CAM3 2775 * specifies CAM_REQ_CMP. 2776 */ 2777 if (scb != NULL 2778 && scb->ccb->ccb_h.func_code == XPT_RESET_DEV 2779 && ahc_match_scb(scb, target, channel, 2780 TCL_LUN(saved_tcl), 2781 SCB_LIST_NULL, 2782 ROLE_INITIATOR)) { 2783 ahcsetccbstatus(scb->ccb, CAM_REQ_CMP); 2784 } 2785 ahc_compile_devinfo(&devinfo, 2786 initiator_role_id, 2787 target, 2788 TCL_LUN(saved_tcl), 2789 channel, 2790 ROLE_INITIATOR); 2791 ahc_handle_devreset(ahc, &devinfo, 2792 CAM_BDR_SENT, AC_SENT_BDR, 2793 "Bus Device Reset", 2794 /*verbose_level*/0); 2795 printerror = 0; 2796 break; 2797 } 2798 default: 2799 break; 2800 } 2801 } 2802 if (printerror != 0) { 2803 int i; 2804 2805 if (scb != NULL) { 2806 u_int tag; 2807 2808 if ((scb->hscb->control & TAG_ENB) != 0) 2809 tag = scb->hscb->tag; 2810 else 2811 tag = SCB_LIST_NULL; 2812 ahc_abort_scbs(ahc, target, channel, 2813 SCB_LUN(scb), tag, 2814 ROLE_INITIATOR, 2815 CAM_UNEXP_BUSFREE); 2816 xpt_print_path(scb->ccb->ccb_h.path); 2817 } else { 2818 /* 2819 * We had not fully identified this connection, 2820 * so we cannot abort anything. 2821 */ 2822 printf("%s: ", ahc_name(ahc)); 2823 } 2824 for (i = 0; i < num_phases; i++) { 2825 if (lastphase == phase_table[i].phase) 2826 break; 2827 } 2828 printf("Unexpected busfree %s\n" 2829 "SEQADDR == 0x%x\n", 2830 phase_table[i].phasemsg, ahc_inb(ahc, SEQADDR0) 2831 | (ahc_inb(ahc, SEQADDR1) << 8)); 2832 } 2833 ahc_clear_msg_state(ahc); 2834 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 2835 ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR); 2836 ahc_outb(ahc, CLRINT, CLRSCSIINT); 2837 restart_sequencer(ahc); 2838 } else if ((status & SELTO) != 0) { 2839 u_int scbptr; 2840 2841 scbptr = ahc_inb(ahc, WAITING_SCBH); 2842 ahc_outb(ahc, SCBPTR, scbptr); 2843 scb_index = ahc_inb(ahc, SCB_TAG); 2844 2845 if (scb_index < ahc->scb_data->numscbs) { 2846 scb = &ahc->scb_data->scbarray[scb_index]; 2847 if ((scb->flags & SCB_ACTIVE) == 0) 2848 scb = NULL; 2849 } else 2850 scb = NULL; 2851 2852 if (scb == NULL) { 2853 printf("%s: ahc_intr - referenced scb not " 2854 "valid during SELTO scb(%d, %d)\n", 2855 ahc_name(ahc), scbptr, scb_index); 2856 } else { 2857 u_int tag; 2858 2859 tag = SCB_LIST_NULL; 2860 if ((scb->hscb->control & MSG_SIMPLE_Q_TAG) != 0) 2861 tag = scb->hscb->tag; 2862 2863 ahc_abort_scbs(ahc, SCB_TARGET(scb), SCB_CHANNEL(scb), 2864 SCB_LUN(scb), tag, 2865 ROLE_INITIATOR, CAM_SEL_TIMEOUT); 2866 } 2867 /* Stop the selection */ 2868 ahc_outb(ahc, SCSISEQ, 0); 2869 2870 /* No more pending messages */ 2871 ahc_clear_msg_state(ahc); 2872 2873 /* 2874 * Although the driver does not care about the 2875 * 'Selection in Progress' status bit, the busy 2876 * LED does. SELINGO is only cleared by a sucessful 2877 * selection, so we must manually clear it to ensure 2878 * the LED turns off just incase no future successful 2879 * selections occur (e.g. no devices on the bus). 2880 */ 2881 ahc_outb(ahc, CLRSINT0, CLRSELINGO); 2882 2883 /* Clear interrupt state */ 2884 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); 2885 ahc_outb(ahc, CLRINT, CLRSCSIINT); 2886 restart_sequencer(ahc); 2887 } else { 2888 xpt_print_path(scb->ccb->ccb_h.path); 2889 printf("Unknown SCSIINT. Status = 0x%x\n", status); 2890 ahc_outb(ahc, CLRSINT1, status); 2891 ahc_outb(ahc, CLRINT, CLRSCSIINT); 2892 unpause_sequencer(ahc); 2893 } 2894 } 2895 2896 static void 2897 ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2898 { 2899 /* 2900 * We need to initiate transfer negotiations. 2901 * If our current and goal settings are identical, 2902 * we want to renegotiate due to a check condition. 2903 */ 2904 struct ahc_initiator_tinfo *tinfo; 2905 struct tmode_tstate *tstate; 2906 int dowide; 2907 int dosync; 2908 2909 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 2910 devinfo->target, &tstate); 2911 dowide = tinfo->current.width != tinfo->goal.width; 2912 dosync = tinfo->current.period != tinfo->goal.period; 2913 2914 if (!dowide && !dosync) { 2915 dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; 2916 dosync = tinfo->goal.period != 0; 2917 } 2918 2919 if (dowide) { 2920 ahc_construct_wdtr(ahc, tinfo->goal.width); 2921 } else if (dosync) { 2922 struct ahc_syncrate *rate; 2923 u_int period; 2924 u_int offset; 2925 2926 period = tinfo->goal.period; 2927 rate = ahc_devlimited_syncrate(ahc, &period); 2928 offset = tinfo->goal.offset; 2929 ahc_validate_offset(ahc, rate, &offset, 2930 tinfo->current.width); 2931 ahc_construct_sdtr(ahc, period, offset); 2932 } else { 2933 panic("ahc_intr: AWAITING_MSG for negotiation, " 2934 "but no negotiation needed\n"); 2935 } 2936 } 2937 2938 static void 2939 ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2940 struct scb *scb) 2941 { 2942 /* 2943 * To facilitate adding multiple messages together, 2944 * each routine should increment the index and len 2945 * variables instead of setting them explicitly. 2946 */ 2947 ahc->msgout_index = 0; 2948 ahc->msgout_len = 0; 2949 2950 if ((scb->flags & SCB_DEVICE_RESET) == 0 2951 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) { 2952 u_int identify_msg; 2953 2954 identify_msg = MSG_IDENTIFYFLAG | SCB_LUN(scb); 2955 if ((scb->hscb->control & DISCENB) != 0) 2956 identify_msg |= MSG_IDENTIFY_DISCFLAG; 2957 ahc->msgout_buf[ahc->msgout_index++] = identify_msg; 2958 ahc->msgout_len++; 2959 2960 if ((scb->hscb->control & TAG_ENB) != 0) { 2961 ahc->msgout_buf[ahc->msgout_index++] = 2962 scb->ccb->csio.tag_action; 2963 ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag; 2964 ahc->msgout_len += 2; 2965 } 2966 } 2967 2968 if (scb->flags & SCB_DEVICE_RESET) { 2969 ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET; 2970 ahc->msgout_len++; 2971 xpt_print_path(scb->ccb->ccb_h.path); 2972 printf("Bus Device Reset Message Sent\n"); 2973 } else if (scb->flags & SCB_ABORT) { 2974 if ((scb->hscb->control & TAG_ENB) != 0) 2975 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG; 2976 else 2977 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT; 2978 ahc->msgout_len++; 2979 xpt_print_path(scb->ccb->ccb_h.path); 2980 printf("Abort Message Sent\n"); 2981 } else if ((ahc->targ_msg_req & devinfo->target_mask) != 0) { 2982 ahc_build_transfer_msg(ahc, devinfo); 2983 } else { 2984 printf("ahc_intr: AWAITING_MSG for an SCB that " 2985 "does not have a waiting message"); 2986 panic("SCB = %d, SCB Control = %x, MSG_OUT = %x " 2987 "SCB flags = %x", scb->hscb->tag, scb->hscb->control, 2988 ahc_inb(ahc, MSG_OUT), scb->flags); 2989 } 2990 2991 /* 2992 * Clear the MK_MESSAGE flag from the SCB so we aren't 2993 * asked to send this message again. 2994 */ 2995 ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE); 2996 ahc->msgout_index = 0; 2997 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2998 } 2999 3000 static void 3001 ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 3002 { 3003 /* 3004 * To facilitate adding multiple messages together, 3005 * each routine should increment the index and len 3006 * variables instead of setting them explicitly. 3007 */ 3008 ahc->msgout_index = 0; 3009 ahc->msgout_len = 0; 3010 3011 if ((ahc->targ_msg_req & devinfo->target_mask) != 0) 3012 ahc_build_transfer_msg(ahc, devinfo); 3013 else 3014 panic("ahc_intr: AWAITING target message with no message"); 3015 3016 ahc->msgout_index = 0; 3017 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 3018 } 3019 3020 static int 3021 ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 3022 { 3023 /* 3024 * What we care about here is if we had an 3025 * outstanding SDTR or WDTR message for this 3026 * target. If we did, this is a signal that 3027 * the target is refusing negotiation. 3028 */ 3029 struct scb *scb; 3030 u_int scb_index; 3031 u_int last_msg; 3032 int response = 0; 3033 3034 scb_index = ahc_inb(ahc, SCB_TAG); 3035 scb = &ahc->scb_data->scbarray[scb_index]; 3036 3037 /* Might be necessary */ 3038 last_msg = ahc_inb(ahc, LAST_MSG); 3039 3040 if (ahc_sent_msg(ahc, MSG_EXT_WDTR, /*full*/FALSE)) { 3041 struct ahc_initiator_tinfo *tinfo; 3042 struct tmode_tstate *tstate; 3043 3044 /* note 8bit xfers */ 3045 printf("%s:%c:%d: refuses WIDE negotiation. Using " 3046 "8bit transfers\n", ahc_name(ahc), 3047 devinfo->channel, devinfo->target); 3048 ahc_set_width(ahc, devinfo, scb->ccb->ccb_h.path, 3049 MSG_EXT_WDTR_BUS_8_BIT, 3050 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3051 /*paused*/TRUE); 3052 /* 3053 * No need to clear the sync rate. If the target 3054 * did not accept the command, our syncrate is 3055 * unaffected. If the target started the negotiation, 3056 * but rejected our response, we already cleared the 3057 * sync rate before sending our WDTR. 3058 */ 3059 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, 3060 devinfo->our_scsiid, 3061 devinfo->target, &tstate); 3062 if (tinfo->goal.period) { 3063 u_int period; 3064 3065 /* Start the sync negotiation */ 3066 period = tinfo->goal.period; 3067 ahc_devlimited_syncrate(ahc, &period); 3068 ahc->msgout_index = 0; 3069 ahc->msgout_len = 0; 3070 ahc_construct_sdtr(ahc, period, tinfo->goal.offset); 3071 ahc->msgout_index = 0; 3072 response = 1; 3073 } 3074 } else if (ahc_sent_msg(ahc, MSG_EXT_SDTR, /*full*/FALSE)) { 3075 /* note asynch xfers and clear flag */ 3076 ahc_set_syncrate(ahc, devinfo, scb->ccb->ccb_h.path, 3077 /*syncrate*/NULL, /*period*/0, 3078 /*offset*/0, 3079 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3080 /*paused*/TRUE); 3081 printf("%s:%c:%d: refuses synchronous negotiation. " 3082 "Using asynchronous transfers\n", 3083 ahc_name(ahc), 3084 devinfo->channel, devinfo->target); 3085 } else if ((scb->hscb->control & MSG_SIMPLE_Q_TAG) != 0) { 3086 struct ccb_trans_settings neg; 3087 3088 printf("%s:%c:%d: refuses tagged commands. Performing " 3089 "non-tagged I/O\n", ahc_name(ahc), 3090 devinfo->channel, devinfo->target); 3091 3092 ahc_set_tags(ahc, devinfo, FALSE); 3093 neg.flags = 0; 3094 neg.valid = CCB_TRANS_TQ_VALID; 3095 xpt_setup_ccb(&neg.ccb_h, scb->ccb->ccb_h.path, /*priority*/1); 3096 xpt_async(AC_TRANSFER_NEG, scb->ccb->ccb_h.path, &neg); 3097 3098 /* 3099 * Resend the identify for this CCB as the target 3100 * may believe that the selection is invalid otherwise. 3101 */ 3102 ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) 3103 & ~MSG_SIMPLE_Q_TAG); 3104 scb->hscb->control &= ~MSG_SIMPLE_Q_TAG; 3105 scb->ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; 3106 ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG); 3107 ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, SCSISIGO) | ATNO); 3108 3109 /* 3110 * Requeue all tagged commands for this target 3111 * currently in our posession so they can be 3112 * converted to untagged commands. 3113 */ 3114 ahc_search_qinfifo(ahc, SCB_TARGET(scb), SCB_CHANNEL(scb), 3115 SCB_LUN(scb), /*tag*/SCB_LIST_NULL, 3116 ROLE_INITIATOR, CAM_REQUEUE_REQ, 3117 SEARCH_COMPLETE); 3118 } else { 3119 /* 3120 * Otherwise, we ignore it. 3121 */ 3122 printf("%s:%c:%d: Message reject for %x -- ignored\n", 3123 ahc_name(ahc), devinfo->channel, devinfo->target, 3124 last_msg); 3125 } 3126 return (response); 3127 } 3128 3129 static void 3130 ahc_clear_msg_state(struct ahc_softc *ahc) 3131 { 3132 ahc->msgout_len = 0; 3133 ahc->msgin_index = 0; 3134 ahc->msg_type = MSG_TYPE_NONE; 3135 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 3136 } 3137 3138 static void 3139 ahc_handle_message_phase(struct ahc_softc *ahc, struct cam_path *path) 3140 { 3141 struct ahc_devinfo devinfo; 3142 u_int bus_phase; 3143 int end_session; 3144 3145 ahc_fetch_devinfo(ahc, &devinfo); 3146 end_session = FALSE; 3147 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 3148 3149 reswitch: 3150 switch (ahc->msg_type) { 3151 case MSG_TYPE_INITIATOR_MSGOUT: 3152 { 3153 int lastbyte; 3154 int phasemis; 3155 int msgdone; 3156 3157 if (ahc->msgout_len == 0) 3158 panic("REQINIT interrupt with no active message"); 3159 3160 phasemis = bus_phase != P_MESGOUT; 3161 if (phasemis) { 3162 if (bus_phase == P_MESGIN) { 3163 /* 3164 * Change gears and see if 3165 * this messages is of interest to 3166 * us or should be passed back to 3167 * the sequencer. 3168 */ 3169 ahc_outb(ahc, CLRSINT1, CLRATNO); 3170 ahc->send_msg_perror = FALSE; 3171 ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN; 3172 ahc->msgin_index = 0; 3173 goto reswitch; 3174 } 3175 end_session = TRUE; 3176 break; 3177 } 3178 3179 if (ahc->send_msg_perror) { 3180 ahc_outb(ahc, CLRSINT1, CLRATNO); 3181 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 3182 ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR); 3183 break; 3184 } 3185 3186 msgdone = ahc->msgout_index == ahc->msgout_len; 3187 if (msgdone) { 3188 /* 3189 * The target has requested a retry. 3190 * Re-assert ATN, reset our message index to 3191 * 0, and try again. 3192 */ 3193 ahc->msgout_index = 0; 3194 ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, SCSISIGO) | ATNO); 3195 } 3196 3197 lastbyte = ahc->msgout_index == (ahc->msgout_len - 1); 3198 if (lastbyte) { 3199 /* Last byte is signified by dropping ATN */ 3200 ahc_outb(ahc, CLRSINT1, CLRATNO); 3201 } 3202 3203 /* 3204 * Clear our interrupt status and present 3205 * the next byte on the bus. 3206 */ 3207 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 3208 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 3209 break; 3210 } 3211 case MSG_TYPE_INITIATOR_MSGIN: 3212 { 3213 int phasemis; 3214 int message_done; 3215 3216 phasemis = bus_phase != P_MESGIN; 3217 3218 if (phasemis) { 3219 ahc->msgin_index = 0; 3220 if (bus_phase == P_MESGOUT 3221 && (ahc->send_msg_perror == TRUE 3222 || (ahc->msgout_len != 0 3223 && ahc->msgout_index == 0))) { 3224 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 3225 goto reswitch; 3226 } 3227 end_session = TRUE; 3228 break; 3229 } 3230 3231 /* Pull the byte in without acking it */ 3232 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL); 3233 3234 message_done = ahc_parse_msg(ahc, path, &devinfo); 3235 3236 if (message_done) { 3237 /* 3238 * Clear our incoming message buffer in case there 3239 * is another message following this one. 3240 */ 3241 ahc->msgin_index = 0; 3242 3243 /* 3244 * If this message illicited a response, 3245 * assert ATN so the target takes us to the 3246 * message out phase. 3247 */ 3248 if (ahc->msgout_len != 0) 3249 ahc_outb(ahc, SCSISIGO, 3250 ahc_inb(ahc, SCSISIGO) | ATNO); 3251 } else 3252 ahc->msgin_index++; 3253 3254 /* Ack the byte */ 3255 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 3256 ahc_inb(ahc, SCSIDATL); 3257 break; 3258 } 3259 case MSG_TYPE_TARGET_MSGIN: 3260 { 3261 int msgdone; 3262 int msgout_request; 3263 3264 if (ahc->msgout_len == 0) 3265 panic("Target MSGIN with no active message"); 3266 3267 /* 3268 * If we interrupted a mesgout session, the initiator 3269 * will not know this until our first REQ. So, we 3270 * only honor mesgout requests after we've sent our 3271 * first byte. 3272 */ 3273 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0 3274 && ahc->msgout_index > 0) 3275 msgout_request = TRUE; 3276 else 3277 msgout_request = FALSE; 3278 3279 if (msgout_request) { 3280 3281 /* 3282 * Change gears and see if 3283 * this messages is of interest to 3284 * us or should be passed back to 3285 * the sequencer. 3286 */ 3287 ahc->msg_type = MSG_TYPE_TARGET_MSGOUT; 3288 ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO); 3289 ahc->msgin_index = 0; 3290 /* Dummy read to REQ for first byte */ 3291 ahc_inb(ahc, SCSIDATL); 3292 ahc_outb(ahc, SXFRCTL0, 3293 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 3294 break; 3295 } 3296 3297 msgdone = ahc->msgout_index == ahc->msgout_len; 3298 if (msgdone) { 3299 ahc_outb(ahc, SXFRCTL0, 3300 ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 3301 end_session = TRUE; 3302 break; 3303 } 3304 3305 /* 3306 * Present the next byte on the bus. 3307 */ 3308 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); 3309 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 3310 break; 3311 } 3312 case MSG_TYPE_TARGET_MSGOUT: 3313 { 3314 int lastbyte; 3315 int msgdone; 3316 3317 /* 3318 * The initiator signals that this is 3319 * the last byte by dropping ATN. 3320 */ 3321 lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0; 3322 3323 /* 3324 * Read the latched byte, but turn off SPIOEN first 3325 * so that we don't inadvertantly cause a REQ for the 3326 * next byte. 3327 */ 3328 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 3329 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL); 3330 msgdone = ahc_parse_msg(ahc, path, &devinfo); 3331 if (msgdone == MSGLOOP_TERMINATED) { 3332 /* 3333 * The message is *really* done in that it caused 3334 * us to go to bus free. The sequencer has already 3335 * been reset at this point, so pull the ejection 3336 * handle. 3337 */ 3338 return; 3339 } 3340 3341 ahc->msgin_index++; 3342 3343 /* 3344 * XXX Read spec about initiator dropping ATN too soon 3345 * and use msgdone to detect it. 3346 */ 3347 if (msgdone == MSGLOOP_MSGCOMPLETE) { 3348 ahc->msgin_index = 0; 3349 3350 /* 3351 * If this message illicited a response, transition 3352 * to the Message in phase and send it. 3353 */ 3354 if (ahc->msgout_len != 0) { 3355 ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO); 3356 ahc_outb(ahc, SXFRCTL0, 3357 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 3358 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 3359 ahc->msgin_index = 0; 3360 break; 3361 } 3362 } 3363 3364 if (lastbyte) 3365 end_session = TRUE; 3366 else { 3367 /* Ask for the next byte. */ 3368 ahc_outb(ahc, SXFRCTL0, 3369 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 3370 } 3371 3372 break; 3373 } 3374 default: 3375 panic("Unknown REQINIT message type"); 3376 } 3377 3378 if (end_session) { 3379 ahc_clear_msg_state(ahc); 3380 ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP); 3381 } else 3382 ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP); 3383 } 3384 3385 /* 3386 * See if we sent a particular extended message to the target. 3387 * If "full" is true, the target saw the full message. 3388 * If "full" is false, the target saw at least the first 3389 * byte of the message. 3390 */ 3391 static int 3392 ahc_sent_msg(struct ahc_softc *ahc, u_int msgtype, int full) 3393 { 3394 int found; 3395 int index; 3396 3397 found = FALSE; 3398 index = 0; 3399 3400 while (index < ahc->msgout_len) { 3401 if (ahc->msgout_buf[index] == MSG_EXTENDED) { 3402 3403 /* Found a candidate */ 3404 if (ahc->msgout_buf[index+2] == msgtype) { 3405 u_int end_index; 3406 3407 end_index = index + 1 3408 + ahc->msgout_buf[index + 1]; 3409 if (full) { 3410 if (ahc->msgout_index > end_index) 3411 found = TRUE; 3412 } else if (ahc->msgout_index > index) 3413 found = TRUE; 3414 } 3415 break; 3416 } else if (ahc->msgout_buf[index] >= MSG_SIMPLE_Q_TAG 3417 && ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) { 3418 3419 /* Skip tag type and tag id or residue param*/ 3420 index += 2; 3421 } else { 3422 /* Single byte message */ 3423 index++; 3424 } 3425 } 3426 return (found); 3427 } 3428 3429 static int 3430 ahc_parse_msg(struct ahc_softc *ahc, struct cam_path *path, 3431 struct ahc_devinfo *devinfo) 3432 { 3433 struct ahc_initiator_tinfo *tinfo; 3434 struct tmode_tstate *tstate; 3435 int reject; 3436 int done; 3437 int response; 3438 u_int targ_scsirate; 3439 3440 done = MSGLOOP_IN_PROG; 3441 response = FALSE; 3442 reject = FALSE; 3443 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 3444 devinfo->target, &tstate); 3445 targ_scsirate = tinfo->scsirate; 3446 3447 /* 3448 * Parse as much of the message as is availible, 3449 * rejecting it if we don't support it. When 3450 * the entire message is availible and has been 3451 * handled, return MSGLOOP_MSGCOMPLETE, indicating 3452 * that we have parsed an entire message. 3453 * 3454 * In the case of extended messages, we accept the length 3455 * byte outright and perform more checking once we know the 3456 * extended message type. 3457 */ 3458 switch (ahc->msgin_buf[0]) { 3459 case MSG_MESSAGE_REJECT: 3460 response = ahc_handle_msg_reject(ahc, devinfo); 3461 /* FALLTHROUGH */ 3462 case MSG_NOOP: 3463 done = MSGLOOP_MSGCOMPLETE; 3464 break; 3465 case MSG_IGN_WIDE_RESIDUE: 3466 { 3467 /* Wait for the whole message */ 3468 if (ahc->msgin_index >= 1) { 3469 if (ahc->msgin_buf[1] != 1 3470 || tinfo->current.width == MSG_EXT_WDTR_BUS_8_BIT) { 3471 reject = TRUE; 3472 done = MSGLOOP_MSGCOMPLETE; 3473 } else 3474 ahc_handle_ign_wide_residue(ahc, devinfo); 3475 } 3476 break; 3477 } 3478 case MSG_EXTENDED: 3479 { 3480 /* Wait for enough of the message to begin validation */ 3481 if (ahc->msgin_index < 2) 3482 break; 3483 switch (ahc->msgin_buf[2]) { 3484 case MSG_EXT_SDTR: 3485 { 3486 struct ahc_syncrate *syncrate; 3487 u_int period; 3488 u_int offset; 3489 u_int saved_offset; 3490 3491 if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) { 3492 reject = TRUE; 3493 break; 3494 } 3495 3496 /* 3497 * Wait until we have both args before validating 3498 * and acting on this message. 3499 * 3500 * Add one to MSG_EXT_SDTR_LEN to account for 3501 * the extended message preamble. 3502 */ 3503 if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1)) 3504 break; 3505 3506 period = ahc->msgin_buf[3]; 3507 saved_offset = offset = ahc->msgin_buf[4]; 3508 syncrate = ahc_devlimited_syncrate(ahc, &period); 3509 ahc_validate_offset(ahc, syncrate, &offset, 3510 targ_scsirate & WIDEXFER); 3511 ahc_set_syncrate(ahc, devinfo, path, 3512 syncrate, period, offset, 3513 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3514 /*paused*/TRUE); 3515 3516 /* 3517 * See if we initiated Sync Negotiation 3518 * and didn't have to fall down to async 3519 * transfers. 3520 */ 3521 if (ahc_sent_msg(ahc, MSG_EXT_SDTR, /*full*/TRUE)) { 3522 /* We started it */ 3523 if (saved_offset != offset) { 3524 /* Went too low - force async */ 3525 reject = TRUE; 3526 } 3527 } else { 3528 /* 3529 * Send our own SDTR in reply 3530 */ 3531 if (bootverbose) 3532 printf("Sending SDTR!\n"); 3533 ahc->msgout_index = 0; 3534 ahc->msgout_len = 0; 3535 ahc_construct_sdtr(ahc, period, offset); 3536 ahc->msgout_index = 0; 3537 response = TRUE; 3538 } 3539 done = MSGLOOP_MSGCOMPLETE; 3540 break; 3541 } 3542 case MSG_EXT_WDTR: 3543 { 3544 u_int bus_width; 3545 u_int sending_reply; 3546 3547 sending_reply = FALSE; 3548 if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) { 3549 reject = TRUE; 3550 break; 3551 } 3552 3553 /* 3554 * Wait until we have our arg before validating 3555 * and acting on this message. 3556 * 3557 * Add one to MSG_EXT_WDTR_LEN to account for 3558 * the extended message preamble. 3559 */ 3560 if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1)) 3561 break; 3562 3563 bus_width = ahc->msgin_buf[3]; 3564 if (ahc_sent_msg(ahc, MSG_EXT_WDTR, /*full*/TRUE)) { 3565 /* 3566 * Don't send a WDTR back to the 3567 * target, since we asked first. 3568 */ 3569 switch (bus_width){ 3570 default: 3571 /* 3572 * How can we do anything greater 3573 * than 16bit transfers on a 16bit 3574 * bus? 3575 */ 3576 reject = TRUE; 3577 printf("%s: target %d requested %dBit " 3578 "transfers. Rejecting...\n", 3579 ahc_name(ahc), devinfo->target, 3580 8 * (0x01 << bus_width)); 3581 /* FALLTHROUGH */ 3582 case MSG_EXT_WDTR_BUS_8_BIT: 3583 bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3584 break; 3585 case MSG_EXT_WDTR_BUS_16_BIT: 3586 break; 3587 } 3588 } else { 3589 /* 3590 * Send our own WDTR in reply 3591 */ 3592 if (bootverbose) 3593 printf("Sending WDTR!\n"); 3594 switch (bus_width) { 3595 default: 3596 if (ahc->features & AHC_WIDE) { 3597 /* Respond Wide */ 3598 bus_width = 3599 MSG_EXT_WDTR_BUS_16_BIT; 3600 break; 3601 } 3602 /* FALLTHROUGH */ 3603 case MSG_EXT_WDTR_BUS_8_BIT: 3604 bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3605 break; 3606 } 3607 ahc->msgout_index = 0; 3608 ahc->msgout_len = 0; 3609 ahc_construct_wdtr(ahc, bus_width); 3610 ahc->msgout_index = 0; 3611 response = TRUE; 3612 sending_reply = TRUE; 3613 } 3614 ahc_set_width(ahc, devinfo, path, bus_width, 3615 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3616 /*paused*/TRUE); 3617 3618 /* After a wide message, we are async */ 3619 ahc_set_syncrate(ahc, devinfo, path, 3620 /*syncrate*/NULL, /*period*/0, 3621 /*offset*/0, AHC_TRANS_ACTIVE, 3622 /*paused*/TRUE); 3623 if (sending_reply == FALSE && reject == FALSE) { 3624 3625 if (tinfo->goal.period) { 3626 struct ahc_syncrate *rate; 3627 u_int period; 3628 u_int offset; 3629 3630 /* Start the sync negotiation */ 3631 period = tinfo->goal.period; 3632 rate = ahc_devlimited_syncrate(ahc, 3633 &period); 3634 offset = tinfo->goal.offset; 3635 ahc_validate_offset(ahc, rate, &offset, 3636 tinfo->current.width); 3637 ahc->msgout_index = 0; 3638 ahc->msgout_len = 0; 3639 ahc_construct_sdtr(ahc, period, offset); 3640 ahc->msgout_index = 0; 3641 response = TRUE; 3642 } 3643 } 3644 done = MSGLOOP_MSGCOMPLETE; 3645 break; 3646 } 3647 default: 3648 /* Unknown extended message. Reject it. */ 3649 reject = TRUE; 3650 break; 3651 } 3652 break; 3653 } 3654 case MSG_BUS_DEV_RESET: 3655 ahc_handle_devreset(ahc, devinfo, 3656 CAM_BDR_SENT, AC_SENT_BDR, 3657 "Bus Device Reset Received", 3658 /*verbose_level*/0); 3659 restart_sequencer(ahc); 3660 done = MSGLOOP_TERMINATED; 3661 break; 3662 case MSG_ABORT_TAG: 3663 case MSG_ABORT: 3664 case MSG_CLEAR_QUEUE: 3665 /* Target mode messages */ 3666 if (devinfo->role != ROLE_TARGET) { 3667 reject = TRUE; 3668 break; 3669 } 3670 ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 3671 devinfo->lun, 3672 ahc->msgin_buf[0] == MSG_ABORT_TAG 3673 ? SCB_LIST_NULL 3674 : ahc_inb(ahc, INITIATOR_TAG), 3675 ROLE_TARGET, CAM_REQ_ABORTED); 3676 3677 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 3678 if (tstate != NULL) { 3679 struct tmode_lstate* lstate; 3680 3681 lstate = tstate->enabled_luns[devinfo->lun]; 3682 if (lstate != NULL) { 3683 ahc_queue_lstate_event(ahc, lstate, 3684 devinfo->our_scsiid, 3685 ahc->msgin_buf[0], 3686 /*arg*/0); 3687 ahc_send_lstate_events(ahc, lstate); 3688 } 3689 } 3690 done = MSGLOOP_MSGCOMPLETE; 3691 break; 3692 case MSG_TERM_IO_PROC: 3693 default: 3694 reject = TRUE; 3695 break; 3696 } 3697 3698 if (reject) { 3699 /* 3700 * Setup to reject the message. 3701 */ 3702 ahc->msgout_index = 0; 3703 ahc->msgout_len = 1; 3704 ahc->msgout_buf[0] = MSG_MESSAGE_REJECT; 3705 done = MSGLOOP_MSGCOMPLETE; 3706 response = TRUE; 3707 } 3708 3709 if (done != MSGLOOP_IN_PROG && !response) 3710 /* Clear the outgoing message buffer */ 3711 ahc->msgout_len = 0; 3712 3713 return (done); 3714 } 3715 3716 static void 3717 ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 3718 { 3719 u_int scb_index; 3720 struct scb *scb; 3721 3722 scb_index = ahc_inb(ahc, SCB_TAG); 3723 scb = &ahc->scb_data->scbarray[scb_index]; 3724 if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0 3725 || (scb->ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_IN) { 3726 /* 3727 * Ignore the message if we haven't 3728 * seen an appropriate data phase yet. 3729 */ 3730 } else { 3731 /* 3732 * If the residual occurred on the last 3733 * transfer and the transfer request was 3734 * expected to end on an odd count, do 3735 * nothing. Otherwise, subtract a byte 3736 * and update the residual count accordingly. 3737 */ 3738 u_int resid_sgcnt; 3739 3740 resid_sgcnt = ahc_inb(ahc, SCB_RESID_SGCNT); 3741 if (resid_sgcnt == 0 3742 && ahc_inb(ahc, DATA_COUNT_ODD) == 1) { 3743 /* 3744 * If the residual occurred on the last 3745 * transfer and the transfer request was 3746 * expected to end on an odd count, do 3747 * nothing. 3748 */ 3749 } else { 3750 u_int data_cnt; 3751 u_int data_addr; 3752 u_int sg_index; 3753 3754 data_cnt = (ahc_inb(ahc, SCB_RESID_DCNT + 2) << 16) 3755 | (ahc_inb(ahc, SCB_RESID_DCNT + 1) << 8) 3756 | (ahc_inb(ahc, SCB_RESID_DCNT)); 3757 3758 data_addr = (ahc_inb(ahc, SHADDR + 3) << 24) 3759 | (ahc_inb(ahc, SHADDR + 2) << 16) 3760 | (ahc_inb(ahc, SHADDR + 1) << 8) 3761 | (ahc_inb(ahc, SHADDR)); 3762 3763 data_cnt += 1; 3764 data_addr -= 1; 3765 3766 sg_index = scb->sg_count - resid_sgcnt; 3767 3768 if (sg_index != 0 3769 && (scb->sg_list[sg_index].len < data_cnt)) { 3770 u_int sg_addr; 3771 3772 sg_index--; 3773 data_cnt = 1; 3774 data_addr = scb->sg_list[sg_index].addr 3775 + scb->sg_list[sg_index].len - 1; 3776 3777 /* 3778 * The physical address base points to the 3779 * second entry as it is always used for 3780 * calculating the "next S/G pointer". 3781 */ 3782 sg_addr = scb->sg_list_phys 3783 + (sg_index* sizeof(*scb->sg_list)); 3784 ahc_outb(ahc, SG_NEXT + 3, sg_addr >> 24); 3785 ahc_outb(ahc, SG_NEXT + 2, sg_addr >> 16); 3786 ahc_outb(ahc, SG_NEXT + 1, sg_addr >> 8); 3787 ahc_outb(ahc, SG_NEXT, sg_addr); 3788 } 3789 3790 ahc_outb(ahc, SCB_RESID_DCNT + 2, data_cnt >> 16); 3791 ahc_outb(ahc, SCB_RESID_DCNT + 1, data_cnt >> 8); 3792 ahc_outb(ahc, SCB_RESID_DCNT, data_cnt); 3793 3794 ahc_outb(ahc, SHADDR + 3, data_addr >> 24); 3795 ahc_outb(ahc, SHADDR + 2, data_addr >> 16); 3796 ahc_outb(ahc, SHADDR + 1, data_addr >> 8); 3797 ahc_outb(ahc, SHADDR, data_addr); 3798 } 3799 } 3800 } 3801 3802 static void 3803 ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 3804 cam_status status, ac_code acode, char *message, 3805 int verbose_level) 3806 { 3807 struct cam_path *path; 3808 int found; 3809 int error; 3810 struct tmode_tstate* tstate; 3811 u_int lun; 3812 3813 error = ahc_create_path(ahc, devinfo, &path); 3814 3815 found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 3816 CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role, 3817 status); 3818 3819 /* 3820 * Send an immediate notify ccb to all target more peripheral 3821 * drivers affected by this action. 3822 */ 3823 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 3824 if (tstate != NULL) { 3825 for (lun = 0; lun <= 7; lun++) { 3826 struct tmode_lstate* lstate; 3827 3828 lstate = tstate->enabled_luns[lun]; 3829 if (lstate == NULL) 3830 continue; 3831 3832 ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid, 3833 MSG_BUS_DEV_RESET, /*arg*/0); 3834 ahc_send_lstate_events(ahc, lstate); 3835 } 3836 } 3837 3838 /* 3839 * Go back to async/narrow transfers and renegotiate. 3840 * ahc_set_width and ahc_set_syncrate can cope with NULL 3841 * paths. 3842 */ 3843 ahc_set_width(ahc, devinfo, path, MSG_EXT_WDTR_BUS_8_BIT, 3844 AHC_TRANS_CUR, /*paused*/TRUE); 3845 ahc_set_syncrate(ahc, devinfo, path, /*syncrate*/NULL, 3846 /*period*/0, /*offset*/0, AHC_TRANS_CUR, 3847 /*paused*/TRUE); 3848 3849 if (error == CAM_REQ_CMP && acode != 0) 3850 xpt_async(AC_SENT_BDR, path, NULL); 3851 3852 if (error == CAM_REQ_CMP) 3853 xpt_free_path(path); 3854 3855 if (message != NULL 3856 && (verbose_level <= bootverbose)) 3857 printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc), 3858 message, devinfo->channel, devinfo->target, found); 3859 } 3860 3861 /* 3862 * We have an scb which has been processed by the 3863 * adaptor, now we look to see how the operation 3864 * went. 3865 */ 3866 static void 3867 ahc_done(struct ahc_softc *ahc, struct scb *scb) 3868 { 3869 union ccb *ccb; 3870 3871 CAM_DEBUG(scb->ccb->ccb_h.path, CAM_DEBUG_TRACE, 3872 ("ahc_done - scb %d\n", scb->hscb->tag)); 3873 3874 ccb = scb->ccb; 3875 LIST_REMOVE(&ccb->ccb_h, sim_links.le); 3876 3877 untimeout(ahc_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch); 3878 3879 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 3880 bus_dmasync_op_t op; 3881 3882 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 3883 op = BUS_DMASYNC_POSTREAD; 3884 else 3885 op = BUS_DMASYNC_POSTWRITE; 3886 bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op); 3887 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap); 3888 } 3889 3890 /* 3891 * Unbusy this target/channel/lun. 3892 * XXX if we are holding two commands per lun, 3893 * send the next command. 3894 */ 3895 ahc_index_busy_tcl(ahc, scb->hscb->tcl, /*unbusy*/TRUE); 3896 3897 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 3898 if (ahc_ccb_status(ccb) == CAM_REQ_INPROG) 3899 ccb->ccb_h.status |= CAM_REQ_CMP; 3900 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3901 ahcfreescb(ahc, scb); 3902 xpt_done(ccb); 3903 return; 3904 } 3905 3906 /* 3907 * If the recovery SCB completes, we have to be 3908 * out of our timeout. 3909 */ 3910 if ((scb->flags & SCB_RECOVERY_SCB) != 0) { 3911 3912 struct ccb_hdr *ccbh; 3913 3914 /* 3915 * We were able to complete the command successfully, 3916 * so reinstate the timeouts for all other pending 3917 * commands. 3918 */ 3919 ccbh = ahc->pending_ccbs.lh_first; 3920 while (ccbh != NULL) { 3921 struct scb *pending_scb; 3922 3923 pending_scb = (struct scb *)ccbh->ccb_scb_ptr; 3924 ccbh->timeout_ch = 3925 timeout(ahc_timeout, pending_scb, 3926 (ccbh->timeout * hz)/1000); 3927 ccbh = LIST_NEXT(ccbh, sim_links.le); 3928 } 3929 3930 /* 3931 * Ensure that we didn't put a second instance of this 3932 * SCB into the QINFIFO. 3933 */ 3934 ahc_search_qinfifo(ahc, SCB_TARGET(scb), SCB_CHANNEL(scb), 3935 SCB_LUN(scb), scb->hscb->tag, 3936 ROLE_INITIATOR, /*status*/0, 3937 SEARCH_REMOVE); 3938 if (ahc_ccb_status(ccb) == CAM_BDR_SENT 3939 || ahc_ccb_status(ccb) == CAM_REQ_ABORTED) 3940 ahcsetccbstatus(ccb, CAM_CMD_TIMEOUT); 3941 xpt_print_path(ccb->ccb_h.path); 3942 printf("no longer in timeout, status = %x\n", 3943 ccb->ccb_h.status); 3944 } 3945 3946 /* Don't clobber any existing error state */ 3947 if (ahc_ccb_status(ccb) == CAM_REQ_INPROG) { 3948 ccb->ccb_h.status |= CAM_REQ_CMP; 3949 } else if ((scb->flags & SCB_SENSE) != 0) { 3950 /* 3951 * We performed autosense retrieval. 3952 * 3953 * bzero the sense data before having 3954 * the drive fill it. The SCSI spec mandates 3955 * that any untransfered data should be 3956 * assumed to be zero. Complete the 'bounce' 3957 * of sense information through buffers accessible 3958 * via bus-space by copying it into the clients 3959 * csio. 3960 */ 3961 bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data)); 3962 bcopy(&ahc->scb_data->sense[scb->hscb->tag], 3963 &ccb->csio.sense_data, scb->sg_list->len); 3964 scb->ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 3965 } 3966 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3967 ahcfreescb(ahc, scb); 3968 xpt_done(ccb); 3969 } 3970 3971 /* 3972 * Determine the number of SCBs available on the controller 3973 */ 3974 int 3975 ahc_probe_scbs(struct ahc_softc *ahc) { 3976 int i; 3977 3978 for (i = 0; i < AHC_SCB_MAX; i++) { 3979 ahc_outb(ahc, SCBPTR, i); 3980 ahc_outb(ahc, SCB_CONTROL, i); 3981 if (ahc_inb(ahc, SCB_CONTROL) != i) 3982 break; 3983 ahc_outb(ahc, SCBPTR, 0); 3984 if (ahc_inb(ahc, SCB_CONTROL) != 0) 3985 break; 3986 } 3987 return (i); 3988 } 3989 3990 /* 3991 * Start the board, ready for normal operation 3992 */ 3993 int 3994 ahc_init(struct ahc_softc *ahc) 3995 { 3996 int max_targ = 15; 3997 int i; 3998 int term; 3999 u_int scsi_conf; 4000 u_int scsiseq_template; 4001 u_int ultraenb; 4002 u_int discenable; 4003 u_int tagenable; 4004 size_t driver_data_size; 4005 u_int32_t physaddr; 4006 4007 #ifdef AHC_PRINT_SRAM 4008 printf("Scratch Ram:"); 4009 for (i = 0x20; i < 0x5f; i++) { 4010 if (((i % 8) == 0) && (i != 0)) { 4011 printf ("\n "); 4012 } 4013 printf (" 0x%x", ahc_inb(ahc, i)); 4014 } 4015 if ((ahc->features & AHC_MORE_SRAM) != 0) { 4016 for (i = 0x70; i < 0x7f; i++) { 4017 if (((i % 8) == 0) && (i != 0)) { 4018 printf ("\n "); 4019 } 4020 printf (" 0x%x", ahc_inb(ahc, i)); 4021 } 4022 } 4023 printf ("\n"); 4024 #endif 4025 4026 /* 4027 * Assume we have a board at this stage and it has been reset. 4028 */ 4029 if ((ahc->flags & AHC_USEDEFAULTS) != 0) 4030 ahc->our_id = ahc->our_id_b = 7; 4031 4032 /* 4033 * Default to allowing initiator operations. 4034 */ 4035 ahc->flags |= AHC_INITIATORMODE; 4036 4037 /* 4038 * XXX Would be better to use a per device flag, but PCI and EISA 4039 * devices don't have them yet. 4040 */ 4041 if ((AHC_TMODE_ENABLE & (0x01 << ahc->unit)) != 0) { 4042 ahc->flags |= AHC_TARGETMODE; 4043 /* 4044 * Although we have space for both the initiator and 4045 * target roles on ULTRA2 chips, we currently disable 4046 * the initiator role to allow multi-scsi-id target mode 4047 * configurations. We can only respond on the same SCSI 4048 * ID as our initiator role if we allow initiator operation. 4049 * At some point, we should add a configuration knob to 4050 * allow both roles to be loaded. 4051 */ 4052 ahc->flags &= ~AHC_INITIATORMODE; 4053 } 4054 4055 /* DMA tag for mapping buffers into device visible space. */ 4056 if (bus_dma_tag_create(ahc->parent_dmat, /*alignment*/1, /*boundary*/0, 4057 /*lowaddr*/BUS_SPACE_MAXADDR, 4058 /*highaddr*/BUS_SPACE_MAXADDR, 4059 /*filter*/NULL, /*filterarg*/NULL, 4060 /*maxsize*/MAXBSIZE, /*nsegments*/AHC_NSEG, 4061 /*maxsegsz*/AHC_MAXTRANSFER_SIZE, 4062 /*flags*/BUS_DMA_ALLOCNOW, 4063 &ahc->buffer_dmat) != 0) { 4064 return (ENOMEM); 4065 } 4066 4067 ahc->init_level++; 4068 4069 /* 4070 * DMA tag for our command fifos and other data in system memory 4071 * the card's sequencer must be able to access. For initiator 4072 * roles, we need to allocate space for the qinfifo, qoutfifo, 4073 * and untagged_scb arrays each of which are composed of 256 4074 * 1 byte elements. When providing for the target mode role, 4075 * we additionally must provide space for the incoming target 4076 * command fifo. 4077 */ 4078 driver_data_size = 3 * 256 * sizeof(u_int8_t); 4079 if ((ahc->flags & AHC_TARGETMODE) != 0) 4080 driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd) 4081 + /*DMA WideOdd Bug Buffer*/1; 4082 if (bus_dma_tag_create(ahc->parent_dmat, /*alignment*/1, /*boundary*/0, 4083 /*lowaddr*/BUS_SPACE_MAXADDR, 4084 /*highaddr*/BUS_SPACE_MAXADDR, 4085 /*filter*/NULL, /*filterarg*/NULL, 4086 driver_data_size, 4087 /*nsegments*/1, 4088 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 4089 /*flags*/0, &ahc->shared_data_dmat) != 0) { 4090 return (ENOMEM); 4091 } 4092 4093 ahc->init_level++; 4094 4095 /* Allocation of driver data */ 4096 if (bus_dmamem_alloc(ahc->shared_data_dmat, (void **)&ahc->qoutfifo, 4097 BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) { 4098 return (ENOMEM); 4099 } 4100 4101 ahc->init_level++; 4102 4103 /* And permanently map it in */ 4104 bus_dmamap_load(ahc->shared_data_dmat, ahc->shared_data_dmamap, 4105 ahc->qoutfifo, driver_data_size, 4106 ahcdmamapcb, &ahc->shared_data_busaddr, /*flags*/0); 4107 4108 ahc->init_level++; 4109 4110 /* Allocate SCB data now that buffer_dmat is initialized */ 4111 if (ahc->scb_data->maxhscbs == 0) 4112 if (ahcinitscbdata(ahc) != 0) 4113 return (ENOMEM); 4114 4115 ahc->qinfifo = &ahc->qoutfifo[256]; 4116 ahc->untagged_scbs = &ahc->qinfifo[256]; 4117 /* There are no untagged SCBs active yet. */ 4118 for (i = 0; i < 256; i++) 4119 ahc->untagged_scbs[i] = SCB_LIST_NULL; 4120 4121 /* All of our queues are empty */ 4122 for (i = 0; i < 256; i++) 4123 ahc->qoutfifo[i] = SCB_LIST_NULL; 4124 4125 if ((ahc->flags & AHC_TARGETMODE) != 0) { 4126 4127 ahc->targetcmds = (struct target_cmd *)&ahc->untagged_scbs[256]; 4128 ahc->dma_bug_buf = ahc->shared_data_busaddr 4129 + driver_data_size - 1; 4130 /* All target command blocks start out invalid. */ 4131 for (i = 0; i < AHC_TMODE_CMDS; i++) 4132 ahc->targetcmds[i].cmd_valid = 0; 4133 ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1); 4134 ahc_outb(ahc, TQINPOS, 0); 4135 } 4136 4137 /* 4138 * Allocate a tstate to house information for our 4139 * initiator presence on the bus as well as the user 4140 * data for any target mode initiator. 4141 */ 4142 if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) { 4143 printf("%s: unable to allocate tmode_tstate. " 4144 "Failing attach\n", ahc_name(ahc)); 4145 return (-1); 4146 } 4147 4148 if ((ahc->features & AHC_TWIN) != 0) { 4149 if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) { 4150 printf("%s: unable to allocate tmode_tstate. " 4151 "Failing attach\n", ahc_name(ahc)); 4152 return (-1); 4153 } 4154 printf("Twin Channel, A SCSI Id=%d, B SCSI Id=%d, primary %c, ", 4155 ahc->our_id, ahc->our_id_b, 4156 ahc->flags & AHC_CHANNEL_B_PRIMARY? 'B': 'A'); 4157 } else { 4158 if ((ahc->features & AHC_WIDE) != 0) { 4159 printf("Wide "); 4160 } else { 4161 printf("Single "); 4162 } 4163 printf("Channel %c, SCSI Id=%d, ", ahc->channel, ahc->our_id); 4164 } 4165 4166 ahc_outb(ahc, SEQ_FLAGS, 0); 4167 4168 if (ahc->scb_data->maxhscbs < AHC_SCB_MAX) { 4169 ahc->flags |= AHC_PAGESCBS; 4170 printf("%d/%d SCBs\n", ahc->scb_data->maxhscbs, AHC_SCB_MAX); 4171 } else { 4172 ahc->flags &= ~AHC_PAGESCBS; 4173 printf("%d SCBs\n", ahc->scb_data->maxhscbs); 4174 } 4175 4176 #ifdef AHC_DEBUG 4177 if (ahc_debug & AHC_SHOWMISC) { 4178 printf("%s: hardware scb %d bytes; kernel scb %d bytes; " 4179 "ahc_dma %d bytes\n", 4180 ahc_name(ahc), 4181 sizeof(struct hardware_scb), 4182 sizeof(struct scb), 4183 sizeof(struct ahc_dma_seg)); 4184 } 4185 #endif /* AHC_DEBUG */ 4186 4187 /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/ 4188 if (ahc->features & AHC_TWIN) { 4189 4190 /* 4191 * The device is gated to channel B after a chip reset, 4192 * so set those values first 4193 */ 4194 term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0; 4195 if ((ahc->features & AHC_ULTRA2) != 0) 4196 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id_b); 4197 else 4198 ahc_outb(ahc, SCSIID, ahc->our_id_b); 4199 scsi_conf = ahc_inb(ahc, SCSICONF + 1); 4200 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 4201 |term|ENSTIMER|ACTNEGEN); 4202 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 4203 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 4204 4205 if ((scsi_conf & RESET_SCSI) != 0 4206 && (ahc->flags & AHC_INITIATORMODE) != 0) 4207 ahc->flags |= AHC_RESET_BUS_B; 4208 4209 /* Select Channel A */ 4210 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 4211 } 4212 term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0; 4213 if ((ahc->features & AHC_ULTRA2) != 0) 4214 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); 4215 else 4216 ahc_outb(ahc, SCSIID, ahc->our_id); 4217 scsi_conf = ahc_inb(ahc, SCSICONF); 4218 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 4219 |term 4220 |ENSTIMER|ACTNEGEN); 4221 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 4222 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 4223 4224 if ((scsi_conf & RESET_SCSI) != 0 4225 && (ahc->flags & AHC_INITIATORMODE) != 0) 4226 ahc->flags |= AHC_RESET_BUS_A; 4227 4228 /* 4229 * Look at the information that board initialization or 4230 * the board bios has left us. 4231 */ 4232 ultraenb = 0; 4233 tagenable = ALL_TARGETS_MASK; 4234 4235 /* Grab the disconnection disable table and invert it for our needs */ 4236 if (ahc->flags & AHC_USEDEFAULTS) { 4237 printf("%s: Host Adapter Bios disabled. Using default SCSI " 4238 "device parameters\n", ahc_name(ahc)); 4239 ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B| 4240 AHC_TERM_ENB_A|AHC_TERM_ENB_B; 4241 discenable = ALL_TARGETS_MASK; 4242 if ((ahc->features & AHC_ULTRA) != 0) 4243 ultraenb = ALL_TARGETS_MASK; 4244 } else { 4245 discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8) 4246 | ahc_inb(ahc, DISC_DSB)); 4247 if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0) 4248 ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8) 4249 | ahc_inb(ahc, ULTRA_ENB); 4250 } 4251 4252 if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) 4253 max_targ = 7; 4254 4255 for (i = 0; i <= max_targ; i++) { 4256 struct ahc_initiator_tinfo *tinfo; 4257 struct tmode_tstate *tstate; 4258 u_int our_id; 4259 u_int target_id; 4260 char channel; 4261 4262 channel = 'A'; 4263 our_id = ahc->our_id; 4264 target_id = i; 4265 if (i > 7 && (ahc->features & AHC_TWIN) != 0) { 4266 channel = 'B'; 4267 our_id = ahc->our_id_b; 4268 target_id = i % 8; 4269 } 4270 tinfo = ahc_fetch_transinfo(ahc, channel, our_id, 4271 target_id, &tstate); 4272 /* Default to async narrow across the board */ 4273 bzero(tinfo, sizeof(*tinfo)); 4274 if (ahc->flags & AHC_USEDEFAULTS) { 4275 if ((ahc->features & AHC_WIDE) != 0) 4276 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 4277 4278 /* 4279 * These will be truncated when we determine the 4280 * connection type we have with the target. 4281 */ 4282 tinfo->user.period = ahc_syncrates->period; 4283 tinfo->user.offset = ~0; 4284 } else { 4285 u_int scsirate; 4286 u_int16_t mask; 4287 4288 /* Take the settings leftover in scratch RAM. */ 4289 scsirate = ahc_inb(ahc, TARG_SCSIRATE + i); 4290 mask = (0x01 << i); 4291 if ((ahc->features & AHC_ULTRA2) != 0) { 4292 u_int offset; 4293 u_int maxsync; 4294 4295 if ((scsirate & SOFS) == 0x0F) { 4296 /* 4297 * Haven't negotiated yet, 4298 * so the format is different. 4299 */ 4300 scsirate = (scsirate & SXFR) >> 4 4301 | (ultraenb & mask) 4302 ? 0x08 : 0x0 4303 | (scsirate & WIDEXFER); 4304 offset = MAX_OFFSET_ULTRA2; 4305 } else 4306 offset = ahc_inb(ahc, TARG_OFFSET + i); 4307 maxsync = AHC_SYNCRATE_ULTRA2; 4308 if ((ahc->features & AHC_DT) != 0) 4309 maxsync = AHC_SYNCRATE_DT; 4310 tinfo->user.period = 4311 ahc_find_period(ahc, scsirate, maxsync); 4312 if (offset == 0) 4313 tinfo->user.period = 0; 4314 else 4315 tinfo->user.offset = ~0; 4316 } else if ((scsirate & SOFS) != 0) { 4317 tinfo->user.period = 4318 ahc_find_period(ahc, scsirate, 4319 (ultraenb & mask) 4320 ? AHC_SYNCRATE_ULTRA 4321 : AHC_SYNCRATE_FAST); 4322 if (tinfo->user.period != 0) 4323 tinfo->user.offset = ~0; 4324 } 4325 if ((scsirate & WIDEXFER) != 0 4326 && (ahc->features & AHC_WIDE) != 0) 4327 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 4328 } 4329 tstate->ultraenb = ultraenb; 4330 tstate->discenable = discenable; 4331 tstate->tagenable = 0; /* Wait until the XPT says its okay */ 4332 } 4333 ahc->user_discenable = discenable; 4334 ahc->user_tagenable = tagenable; 4335 4336 /* 4337 * Tell the sequencer where it can find our arrays in memory. 4338 */ 4339 physaddr = ahc->scb_data->hscb_busaddr; 4340 ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF); 4341 ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF); 4342 ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF); 4343 ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF); 4344 4345 physaddr = ahc->shared_data_busaddr; 4346 ahc_outb(ahc, SCBID_ADDR, physaddr & 0xFF); 4347 ahc_outb(ahc, SCBID_ADDR + 1, (physaddr >> 8) & 0xFF); 4348 ahc_outb(ahc, SCBID_ADDR + 2, (physaddr >> 16) & 0xFF); 4349 ahc_outb(ahc, SCBID_ADDR + 3, (physaddr >> 24) & 0xFF); 4350 4351 /* Target mode incomding command fifo */ 4352 physaddr += 3 * 256 * sizeof(u_int8_t); 4353 ahc_outb(ahc, TMODE_CMDADDR, physaddr & 0xFF); 4354 ahc_outb(ahc, TMODE_CMDADDR + 1, (physaddr >> 8) & 0xFF); 4355 ahc_outb(ahc, TMODE_CMDADDR + 2, (physaddr >> 16) & 0xFF); 4356 ahc_outb(ahc, TMODE_CMDADDR + 3, (physaddr >> 24) & 0xFF); 4357 4358 /* 4359 * Initialize the group code to command length table. 4360 * This overrides the values in TARG_SCSIRATE, so only 4361 * setup the table after we have processed that information. 4362 */ 4363 ahc_outb(ahc, CMDSIZE_TABLE, 5); 4364 ahc_outb(ahc, CMDSIZE_TABLE + 1, 9); 4365 ahc_outb(ahc, CMDSIZE_TABLE + 2, 9); 4366 ahc_outb(ahc, CMDSIZE_TABLE + 3, 0); 4367 ahc_outb(ahc, CMDSIZE_TABLE + 4, 15); 4368 ahc_outb(ahc, CMDSIZE_TABLE + 5, 11); 4369 ahc_outb(ahc, CMDSIZE_TABLE + 6, 0); 4370 ahc_outb(ahc, CMDSIZE_TABLE + 7, 0); 4371 4372 /* Tell the sequencer of our initial queue positions */ 4373 ahc_outb(ahc, KERNEL_QINPOS, 0); 4374 ahc_outb(ahc, QINPOS, 0); 4375 ahc_outb(ahc, QOUTPOS, 0); 4376 4377 #ifdef AHC_DEBUG 4378 if (ahc_debug & AHC_SHOWMISC) 4379 printf("NEEDSDTR == 0x%x\nNEEDWDTR == 0x%x\n" 4380 "DISCENABLE == 0x%x\nULTRAENB == 0x%x\n", 4381 ahc->needsdtr_orig, ahc->needwdtr_orig, 4382 discenable, ultraenb); 4383 #endif 4384 4385 /* Don't have any special messages to send to targets */ 4386 ahc_outb(ahc, TARGET_MSG_REQUEST, 0); 4387 ahc_outb(ahc, TARGET_MSG_REQUEST + 1, 0); 4388 4389 /* 4390 * Use the built in queue management registers 4391 * if they are available. 4392 */ 4393 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 4394 ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256); 4395 ahc_outb(ahc, SDSCB_QOFF, 0); 4396 ahc_outb(ahc, SNSCB_QOFF, 0); 4397 ahc_outb(ahc, HNSCB_QOFF, 0); 4398 } 4399 4400 4401 /* We don't have any waiting selections */ 4402 ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL); 4403 4404 /* Our disconnection list is empty too */ 4405 ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL); 4406 4407 /* Message out buffer starts empty */ 4408 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 4409 4410 /* 4411 * Setup the allowed SCSI Sequences based on operational mode. 4412 * If we are a target, we'll enalbe select in operations once 4413 * we've had a lun enabled. 4414 */ 4415 scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP; 4416 if ((ahc->flags & AHC_INITIATORMODE) != 0) 4417 scsiseq_template |= ENRSELI; 4418 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template); 4419 4420 /* 4421 * Load the Sequencer program and Enable the adapter 4422 * in "fast" mode. 4423 */ 4424 if (bootverbose) 4425 printf("%s: Downloading Sequencer Program...", 4426 ahc_name(ahc)); 4427 4428 ahc_loadseq(ahc); 4429 4430 /* We have to wait until after any system dumps... */ 4431 EVENTHANDLER_REGISTER(shutdown_final, ahc_shutdown, 4432 ahc, SHUTDOWN_PRI_DEFAULT); 4433 4434 return (0); 4435 } 4436 4437 static cam_status 4438 ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb, 4439 struct tmode_tstate **tstate, struct tmode_lstate **lstate, 4440 int notfound_failure) 4441 { 4442 int our_id; 4443 4444 /* 4445 * If we are not configured for target mode, someone 4446 * is really confused to be sending this to us. 4447 */ 4448 if ((ahc->flags & AHC_TARGETMODE) == 0) 4449 return (CAM_REQ_INVALID); 4450 4451 /* Range check target and lun */ 4452 4453 /* 4454 * Handle the 'black hole' device that sucks up 4455 * requests to unattached luns on enabled targets. 4456 */ 4457 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD 4458 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 4459 *tstate = NULL; 4460 *lstate = ahc->black_hole; 4461 } else { 4462 u_int max_id; 4463 4464 if (cam_sim_bus(sim) == 0) 4465 our_id = ahc->our_id; 4466 else 4467 our_id = ahc->our_id_b; 4468 4469 max_id = (ahc->features & AHC_WIDE) ? 15 : 7; 4470 if (ccb->ccb_h.target_id > max_id) 4471 return (CAM_TID_INVALID); 4472 4473 if (ccb->ccb_h.target_lun > 7) 4474 return (CAM_LUN_INVALID); 4475 4476 if (ccb->ccb_h.target_id != our_id) { 4477 if ((ahc->features & AHC_MULTI_TID) != 0) { 4478 /* 4479 * Only allow additional targets if 4480 * the initiator role is disabled. 4481 * The hardware cannot handle a re-select-in 4482 * on the initiator id during a re-select-out 4483 * on a different target id. 4484 */ 4485 if ((ahc->flags & AHC_INITIATORMODE) != 0) 4486 return (CAM_TID_INVALID); 4487 } else { 4488 /* 4489 * Only allow our target id to change 4490 * if the initiator role is not configured 4491 * and there are no enabled luns which 4492 * are attached to the currently registered 4493 * scsi id. 4494 */ 4495 if ((ahc->flags & AHC_INITIATORMODE) != 0 4496 || ahc->enabled_luns > 0) 4497 return (CAM_TID_INVALID); 4498 } 4499 } 4500 4501 *tstate = ahc->enabled_targets[ccb->ccb_h.target_id]; 4502 *lstate = NULL; 4503 if (*tstate != NULL) 4504 *lstate = 4505 (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; 4506 } 4507 4508 if (notfound_failure != 0 && *lstate == NULL) 4509 return (CAM_PATH_INVALID); 4510 4511 return (CAM_REQ_CMP); 4512 } 4513 4514 static void 4515 ahc_action(struct cam_sim *sim, union ccb *ccb) 4516 { 4517 struct ahc_softc *ahc; 4518 struct tmode_lstate *lstate; 4519 u_int target_id; 4520 u_int our_id; 4521 int s; 4522 4523 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahc_action\n")); 4524 4525 ahc = (struct ahc_softc *)cam_sim_softc(sim); 4526 4527 target_id = ccb->ccb_h.target_id; 4528 our_id = SIM_SCSI_ID(ahc, sim); 4529 4530 switch (ccb->ccb_h.func_code) { 4531 /* Common cases first */ 4532 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ 4533 case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/ 4534 { 4535 struct tmode_tstate *tstate; 4536 cam_status status; 4537 4538 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, 4539 &lstate, TRUE); 4540 4541 if (status != CAM_REQ_CMP) { 4542 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 4543 /* Response from the black hole device */ 4544 tstate = NULL; 4545 lstate = ahc->black_hole; 4546 } else { 4547 ccb->ccb_h.status = status; 4548 xpt_done(ccb); 4549 break; 4550 } 4551 } 4552 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 4553 int s; 4554 4555 s = splcam(); 4556 SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h, 4557 sim_links.sle); 4558 ccb->ccb_h.status = CAM_REQ_INPROG; 4559 if ((ahc->flags & AHC_TQINFIFO_BLOCKED) != 0) 4560 ahc_run_tqinfifo(ahc, /*paused*/FALSE); 4561 splx(s); 4562 break; 4563 } 4564 4565 /* 4566 * The target_id represents the target we attempt to 4567 * select. In target mode, this is the initiator of 4568 * the original command. 4569 */ 4570 our_id = target_id; 4571 target_id = ccb->csio.init_id; 4572 /* FALLTHROUGH */ 4573 } 4574 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 4575 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 4576 { 4577 struct scb *scb; 4578 struct hardware_scb *hscb; 4579 struct ahc_initiator_tinfo *tinfo; 4580 struct tmode_tstate *tstate; 4581 u_int16_t mask; 4582 4583 /* 4584 * get an scb to use. 4585 */ 4586 if ((scb = ahcgetscb(ahc)) == NULL) { 4587 int s; 4588 4589 s = splcam(); 4590 ahc->flags |= AHC_RESOURCE_SHORTAGE; 4591 splx(s); 4592 xpt_freeze_simq(ahc->sim, /*count*/1); 4593 ahcsetccbstatus(ccb, CAM_REQUEUE_REQ); 4594 xpt_done(ccb); 4595 return; 4596 } 4597 4598 hscb = scb->hscb; 4599 4600 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE, 4601 ("start scb(%p)\n", scb)); 4602 scb->ccb = ccb; 4603 /* 4604 * So we can find the SCB when an abort is requested 4605 */ 4606 ccb->ccb_h.ccb_scb_ptr = scb; 4607 ccb->ccb_h.ccb_ahc_ptr = ahc; 4608 4609 /* 4610 * Put all the arguments for the xfer in the scb 4611 */ 4612 hscb->tcl = ((target_id << 4) & 0xF0) 4613 | (SIM_IS_SCSIBUS_B(ahc, sim) ? SELBUSB : 0) 4614 | (ccb->ccb_h.target_lun & 0x07); 4615 4616 mask = SCB_TARGET_MASK(scb); 4617 tinfo = ahc_fetch_transinfo(ahc, SIM_CHANNEL(ahc, sim), our_id, 4618 target_id, &tstate); 4619 4620 hscb->scsirate = tinfo->scsirate; 4621 hscb->scsioffset = tinfo->current.offset; 4622 if ((tstate->ultraenb & mask) != 0) 4623 hscb->control |= ULTRAENB; 4624 4625 if ((tstate->discenable & mask) != 0 4626 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0) 4627 hscb->control |= DISCENB; 4628 4629 if (ccb->ccb_h.func_code == XPT_RESET_DEV) { 4630 hscb->cmdpointer = NULL; 4631 scb->flags |= SCB_DEVICE_RESET; 4632 hscb->control |= MK_MESSAGE; 4633 ahc_execute_scb(scb, NULL, 0, 0); 4634 } else { 4635 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 4636 if (ahc->pending_device == lstate) { 4637 scb->flags |= SCB_TARGET_IMMEDIATE; 4638 ahc->pending_device = NULL; 4639 } 4640 hscb->control |= TARGET_SCB; 4641 hscb->cmdpointer = IDENTIFY_SEEN; 4642 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 4643 hscb->cmdpointer |= SPHASE_PENDING; 4644 hscb->status = ccb->csio.scsi_status; 4645 } 4646 4647 /* Overloaded with tag ID */ 4648 hscb->cmdlen = ccb->csio.tag_id; 4649 /* 4650 * Overloaded with the value to place 4651 * in SCSIID for reselection. 4652 */ 4653 hscb->cmdpointer |= 4654 (our_id|(hscb->tcl & 0xF0)) << 16; 4655 } 4656 if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) 4657 hscb->control |= ccb->csio.tag_action; 4658 4659 ahc_setup_data(ahc, &ccb->csio, scb); 4660 } 4661 break; 4662 } 4663 case XPT_NOTIFY_ACK: 4664 case XPT_IMMED_NOTIFY: 4665 { 4666 struct tmode_tstate *tstate; 4667 struct tmode_lstate *lstate; 4668 cam_status status; 4669 4670 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, 4671 &lstate, TRUE); 4672 4673 if (status != CAM_REQ_CMP) { 4674 ccb->ccb_h.status = status; 4675 xpt_done(ccb); 4676 break; 4677 } 4678 SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h, 4679 sim_links.sle); 4680 ccb->ccb_h.status = CAM_REQ_INPROG; 4681 ahc_send_lstate_events(ahc, lstate); 4682 break; 4683 } 4684 case XPT_EN_LUN: /* Enable LUN as a target */ 4685 ahc_handle_en_lun(ahc, sim, ccb); 4686 xpt_done(ccb); 4687 break; 4688 case XPT_ABORT: /* Abort the specified CCB */ 4689 { 4690 ahc_abort_ccb(ahc, sim, ccb); 4691 break; 4692 } 4693 case XPT_SET_TRAN_SETTINGS: 4694 { 4695 struct ahc_devinfo devinfo; 4696 struct ccb_trans_settings *cts; 4697 struct ahc_initiator_tinfo *tinfo; 4698 struct tmode_tstate *tstate; 4699 u_int16_t *discenable; 4700 u_int16_t *tagenable; 4701 u_int update_type; 4702 int s; 4703 4704 cts = &ccb->cts; 4705 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim), 4706 cts->ccb_h.target_id, 4707 cts->ccb_h.target_lun, 4708 SIM_CHANNEL(ahc, sim), 4709 ROLE_UNKNOWN); 4710 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 4711 devinfo.our_scsiid, 4712 devinfo.target, &tstate); 4713 update_type = 0; 4714 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { 4715 update_type |= AHC_TRANS_GOAL; 4716 discenable = &tstate->discenable; 4717 tagenable = &tstate->tagenable; 4718 } else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { 4719 update_type |= AHC_TRANS_USER; 4720 discenable = &ahc->user_discenable; 4721 tagenable = &ahc->user_tagenable; 4722 } else { 4723 ccb->ccb_h.status = CAM_REQ_INVALID; 4724 xpt_done(ccb); 4725 break; 4726 } 4727 4728 s = splcam(); 4729 4730 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 4731 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) 4732 *discenable |= devinfo.target_mask; 4733 else 4734 *discenable &= ~devinfo.target_mask; 4735 } 4736 4737 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 4738 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) 4739 *tagenable |= devinfo.target_mask; 4740 else 4741 *tagenable &= ~devinfo.target_mask; 4742 } 4743 4744 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { 4745 switch (cts->bus_width) { 4746 case MSG_EXT_WDTR_BUS_16_BIT: 4747 if ((ahc->features & AHC_WIDE) != 0) 4748 break; 4749 /* FALLTHROUGH to 8bit */ 4750 case MSG_EXT_WDTR_BUS_32_BIT: 4751 case MSG_EXT_WDTR_BUS_8_BIT: 4752 default: 4753 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 4754 break; 4755 } 4756 ahc_set_width(ahc, &devinfo, cts->ccb_h.path, 4757 cts->bus_width, update_type, 4758 /*paused*/FALSE); 4759 } 4760 4761 if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) 4762 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) { 4763 struct ahc_syncrate *syncrate; 4764 u_int maxsync; 4765 4766 if ((ahc->features & AHC_ULTRA2) != 0) 4767 maxsync = AHC_SYNCRATE_ULTRA2; 4768 else if ((ahc->features & AHC_ULTRA) != 0) 4769 maxsync = AHC_SYNCRATE_ULTRA; 4770 else 4771 maxsync = AHC_SYNCRATE_FAST; 4772 4773 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) { 4774 if (update_type & AHC_TRANS_USER) 4775 cts->sync_offset = tinfo->user.offset; 4776 else 4777 cts->sync_offset = tinfo->goal.offset; 4778 } 4779 4780 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) { 4781 if (update_type & AHC_TRANS_USER) 4782 cts->sync_period = tinfo->user.period; 4783 else 4784 cts->sync_period = tinfo->goal.period; 4785 } 4786 4787 syncrate = ahc_find_syncrate(ahc, &cts->sync_period, 4788 maxsync); 4789 ahc_validate_offset(ahc, syncrate, &cts->sync_offset, 4790 MSG_EXT_WDTR_BUS_8_BIT); 4791 4792 /* We use a period of 0 to represent async */ 4793 if (cts->sync_offset == 0) 4794 cts->sync_period = 0; 4795 4796 ahc_set_syncrate(ahc, &devinfo, cts->ccb_h.path, 4797 syncrate, cts->sync_period, 4798 cts->sync_offset, update_type, 4799 /*paused*/FALSE); 4800 } 4801 splx(s); 4802 ccb->ccb_h.status = CAM_REQ_CMP; 4803 xpt_done(ccb); 4804 break; 4805 } 4806 case XPT_GET_TRAN_SETTINGS: 4807 /* Get default/user set transfer settings for the target */ 4808 { 4809 struct ahc_devinfo devinfo; 4810 struct ccb_trans_settings *cts; 4811 struct ahc_initiator_tinfo *targ_info; 4812 struct tmode_tstate *tstate; 4813 struct ahc_transinfo *tinfo; 4814 int s; 4815 4816 cts = &ccb->cts; 4817 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim), 4818 cts->ccb_h.target_id, 4819 cts->ccb_h.target_lun, 4820 SIM_CHANNEL(ahc, sim), 4821 ROLE_UNKNOWN); 4822 targ_info = ahc_fetch_transinfo(ahc, devinfo.channel, 4823 devinfo.our_scsiid, 4824 devinfo.target, &tstate); 4825 4826 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) 4827 tinfo = &targ_info->current; 4828 else 4829 tinfo = &targ_info->user; 4830 4831 s = splcam(); 4832 4833 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 4834 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { 4835 if ((ahc->user_discenable & devinfo.target_mask) != 0) 4836 cts->flags |= CCB_TRANS_DISC_ENB; 4837 4838 if ((ahc->user_tagenable & devinfo.target_mask) != 0) 4839 cts->flags |= CCB_TRANS_TAG_ENB; 4840 } else { 4841 if ((tstate->discenable & devinfo.target_mask) != 0) 4842 cts->flags |= CCB_TRANS_DISC_ENB; 4843 4844 if ((tstate->tagenable & devinfo.target_mask) != 0) 4845 cts->flags |= CCB_TRANS_TAG_ENB; 4846 } 4847 4848 cts->sync_period = tinfo->period; 4849 cts->sync_offset = tinfo->offset; 4850 cts->bus_width = tinfo->width; 4851 4852 splx(s); 4853 4854 cts->valid = CCB_TRANS_SYNC_RATE_VALID 4855 | CCB_TRANS_SYNC_OFFSET_VALID 4856 | CCB_TRANS_BUS_WIDTH_VALID 4857 | CCB_TRANS_DISC_VALID 4858 | CCB_TRANS_TQ_VALID; 4859 4860 ccb->ccb_h.status = CAM_REQ_CMP; 4861 xpt_done(ccb); 4862 break; 4863 } 4864 case XPT_CALC_GEOMETRY: 4865 { 4866 struct ccb_calc_geometry *ccg; 4867 u_int32_t size_mb; 4868 u_int32_t secs_per_cylinder; 4869 int extended; 4870 4871 ccg = &ccb->ccg; 4872 size_mb = ccg->volume_size 4873 / ((1024L * 1024L) / ccg->block_size); 4874 extended = SIM_IS_SCSIBUS_B(ahc, sim) 4875 ? ahc->flags & AHC_EXTENDED_TRANS_B 4876 : ahc->flags & AHC_EXTENDED_TRANS_A; 4877 4878 if (size_mb > 1024 && extended) { 4879 ccg->heads = 255; 4880 ccg->secs_per_track = 63; 4881 } else { 4882 ccg->heads = 64; 4883 ccg->secs_per_track = 32; 4884 } 4885 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 4886 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 4887 ccb->ccb_h.status = CAM_REQ_CMP; 4888 xpt_done(ccb); 4889 break; 4890 } 4891 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 4892 { 4893 int found; 4894 4895 s = splcam(); 4896 found = ahc_reset_channel(ahc, SIM_CHANNEL(ahc, sim), 4897 /*initiate reset*/TRUE); 4898 splx(s); 4899 if (bootverbose) { 4900 xpt_print_path(SIM_PATH(ahc, sim)); 4901 printf("SCSI bus reset delivered. " 4902 "%d SCBs aborted.\n", found); 4903 } 4904 ccb->ccb_h.status = CAM_REQ_CMP; 4905 xpt_done(ccb); 4906 break; 4907 } 4908 case XPT_TERM_IO: /* Terminate the I/O process */ 4909 /* XXX Implement */ 4910 ccb->ccb_h.status = CAM_REQ_INVALID; 4911 xpt_done(ccb); 4912 break; 4913 case XPT_PATH_INQ: /* Path routing inquiry */ 4914 { 4915 struct ccb_pathinq *cpi = &ccb->cpi; 4916 4917 cpi->version_num = 1; /* XXX??? */ 4918 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; 4919 if ((ahc->features & AHC_WIDE) != 0) 4920 cpi->hba_inquiry |= PI_WIDE_16; 4921 if ((ahc->flags & AHC_TARGETMODE) != 0) { 4922 cpi->target_sprt = PIT_PROCESSOR 4923 | PIT_DISCONNECT 4924 | PIT_TERM_IO; 4925 } else { 4926 cpi->target_sprt = 0; 4927 } 4928 cpi->hba_misc = (ahc->flags & AHC_INITIATORMODE) 4929 ? 0 : PIM_NOINITIATOR; 4930 cpi->hba_eng_cnt = 0; 4931 cpi->max_target = (ahc->features & AHC_WIDE) ? 15 : 7; 4932 cpi->max_lun = 7; 4933 if (SIM_IS_SCSIBUS_B(ahc, sim)) { 4934 cpi->initiator_id = ahc->our_id_b; 4935 if ((ahc->flags & AHC_RESET_BUS_B) == 0) 4936 cpi->hba_misc |= PIM_NOBUSRESET; 4937 } else { 4938 cpi->initiator_id = ahc->our_id; 4939 if ((ahc->flags & AHC_RESET_BUS_A) == 0) 4940 cpi->hba_misc |= PIM_NOBUSRESET; 4941 } 4942 cpi->bus_id = cam_sim_bus(sim); 4943 cpi->base_transfer_speed = 3300; 4944 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 4945 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); 4946 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 4947 cpi->unit_number = cam_sim_unit(sim); 4948 cpi->ccb_h.status = CAM_REQ_CMP; 4949 xpt_done(ccb); 4950 break; 4951 } 4952 default: 4953 ccb->ccb_h.status = CAM_REQ_INVALID; 4954 xpt_done(ccb); 4955 break; 4956 } 4957 } 4958 4959 static void 4960 ahc_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) 4961 { 4962 struct ahc_softc *ahc; 4963 struct cam_sim *sim; 4964 4965 sim = (struct cam_sim *)callback_arg; 4966 ahc = (struct ahc_softc *)cam_sim_softc(sim); 4967 switch (code) { 4968 case AC_LOST_DEVICE: 4969 { 4970 struct ahc_devinfo devinfo; 4971 int s; 4972 4973 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim), 4974 xpt_path_target_id(path), 4975 xpt_path_lun_id(path), 4976 SIM_CHANNEL(ahc, sim), 4977 ROLE_UNKNOWN); 4978 4979 /* 4980 * Revert to async/narrow transfers 4981 * for the next device. 4982 */ 4983 s = splcam(); 4984 ahc_set_width(ahc, &devinfo, path, MSG_EXT_WDTR_BUS_8_BIT, 4985 AHC_TRANS_GOAL|AHC_TRANS_CUR, 4986 /*paused*/FALSE); 4987 ahc_set_syncrate(ahc, &devinfo, path, /*syncrate*/NULL, 4988 /*period*/0, /*offset*/0, 4989 AHC_TRANS_GOAL|AHC_TRANS_CUR, 4990 /*paused*/FALSE); 4991 splx(s); 4992 break; 4993 } 4994 default: 4995 break; 4996 } 4997 } 4998 4999 static void 5000 ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments, 5001 int error) 5002 { 5003 struct scb *scb; 5004 union ccb *ccb; 5005 struct ahc_softc *ahc; 5006 int s; 5007 5008 scb = (struct scb *)arg; 5009 ccb = scb->ccb; 5010 ahc = (struct ahc_softc *)ccb->ccb_h.ccb_ahc_ptr; 5011 5012 if (error != 0) { 5013 if (error == EFBIG) 5014 ahcsetccbstatus(scb->ccb, CAM_REQ_TOO_BIG); 5015 else 5016 ahcsetccbstatus(scb->ccb, CAM_REQ_CMP_ERR); 5017 if (nsegments != 0) 5018 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap); 5019 ahcfreescb(ahc, scb); 5020 xpt_done(ccb); 5021 return; 5022 } 5023 if (nsegments != 0) { 5024 struct ahc_dma_seg *sg; 5025 bus_dma_segment_t *end_seg; 5026 bus_dmasync_op_t op; 5027 5028 end_seg = dm_segs + nsegments; 5029 5030 /* Copy the first SG into the data pointer area */ 5031 scb->hscb->data = dm_segs->ds_addr; 5032 scb->hscb->datalen = dm_segs->ds_len; 5033 5034 /* Copy the segments into our SG list */ 5035 sg = scb->sg_list; 5036 while (dm_segs < end_seg) { 5037 sg->addr = dm_segs->ds_addr; 5038 sg->len = dm_segs->ds_len; 5039 sg++; 5040 dm_segs++; 5041 } 5042 5043 /* Note where to find the SG entries in bus space */ 5044 scb->hscb->SG_pointer = scb->sg_list_phys; 5045 5046 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 5047 op = BUS_DMASYNC_PREREAD; 5048 else 5049 op = BUS_DMASYNC_PREWRITE; 5050 5051 bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op); 5052 5053 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 5054 scb->hscb->cmdpointer |= DPHASE_PENDING; 5055 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 5056 scb->hscb->cmdpointer |= (TARGET_DATA_IN << 8); 5057 5058 /* 5059 * If the transfer is of an odd length and in the 5060 * "in" direction (scsi->HostBus), then it may 5061 * trigger a bug in the 'WideODD' feature of 5062 * non-Ultra2 chips. Force the total data-length 5063 * to be even by adding an extra, 1 byte, SG, 5064 * element. We do this even if we are not currently 5065 * negotiated wide as negotiation could occur before 5066 * this command is executed. 5067 */ 5068 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN 5069 && (ccb->csio.dxfer_len & 0x1) != 0 5070 && (ahc->features & AHC_TARG_DMABUG) != 0) { 5071 5072 nsegments++; 5073 if (nsegments > AHC_NSEG) { 5074 5075 ahcsetccbstatus(scb->ccb, 5076 CAM_REQ_TOO_BIG); 5077 bus_dmamap_unload(ahc->buffer_dmat, 5078 scb->dmamap); 5079 ahcfreescb(ahc, scb); 5080 xpt_done(ccb); 5081 return; 5082 } 5083 sg->addr = ahc->dma_bug_buf; 5084 sg->len = 1; 5085 } 5086 } 5087 } else { 5088 scb->hscb->SG_pointer = 0; 5089 scb->hscb->data = 0; 5090 scb->hscb->datalen = 0; 5091 } 5092 5093 scb->sg_count = scb->hscb->SG_count = nsegments; 5094 5095 s = splcam(); 5096 5097 /* 5098 * Last time we need to check if this SCB needs to 5099 * be aborted. 5100 */ 5101 if (ahc_ccb_status(ccb) != CAM_REQ_INPROG) { 5102 if (nsegments != 0) 5103 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap); 5104 ahcfreescb(ahc, scb); 5105 xpt_done(ccb); 5106 splx(s); 5107 return; 5108 } 5109 5110 /* Busy this tcl if we are untagged */ 5111 if ((scb->hscb->control & TAG_ENB) == 0) 5112 ahc_busy_tcl(ahc, scb); 5113 5114 LIST_INSERT_HEAD(&ahc->pending_ccbs, &ccb->ccb_h, 5115 sim_links.le); 5116 5117 scb->flags |= SCB_ACTIVE; 5118 ccb->ccb_h.status |= CAM_SIM_QUEUED; 5119 5120 ccb->ccb_h.timeout_ch = 5121 timeout(ahc_timeout, (caddr_t)scb, 5122 (ccb->ccb_h.timeout * hz) / 1000); 5123 5124 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) { 5125 #if 0 5126 printf("Continueing Immediate Command %d:%d\n", 5127 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 5128 #endif 5129 pause_sequencer(ahc); 5130 if ((ahc->flags & AHC_PAGESCBS) == 0) 5131 ahc_outb(ahc, SCBPTR, scb->hscb->tag); 5132 ahc_outb(ahc, SCB_TAG, scb->hscb->tag); 5133 ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP); 5134 unpause_sequencer(ahc); 5135 } else { 5136 5137 ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; 5138 5139 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5140 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 5141 } else { 5142 pause_sequencer(ahc); 5143 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 5144 unpause_sequencer(ahc); 5145 } 5146 } 5147 5148 splx(s); 5149 } 5150 5151 static void 5152 ahc_poll(struct cam_sim *sim) 5153 { 5154 ahc_intr(cam_sim_softc(sim)); 5155 } 5156 5157 static void 5158 ahc_setup_data(struct ahc_softc *ahc, struct ccb_scsiio *csio, 5159 struct scb *scb) 5160 { 5161 struct hardware_scb *hscb; 5162 struct ccb_hdr *ccb_h; 5163 5164 hscb = scb->hscb; 5165 ccb_h = &csio->ccb_h; 5166 5167 if (ccb_h->func_code == XPT_SCSI_IO) { 5168 hscb->cmdlen = csio->cdb_len; 5169 if ((ccb_h->flags & CAM_CDB_POINTER) != 0) { 5170 if ((ccb_h->flags & CAM_CDB_PHYS) == 0) 5171 if (hscb->cmdlen <= 16) { 5172 memcpy(hscb->cmdstore, 5173 csio->cdb_io.cdb_ptr, 5174 hscb->cmdlen); 5175 hscb->cmdpointer = 5176 hscb->cmdstore_busaddr; 5177 } else { 5178 ahcsetccbstatus(scb->ccb, 5179 CAM_REQ_INVALID); 5180 xpt_done(scb->ccb); 5181 ahcfreescb(ahc, scb); 5182 return; 5183 } 5184 else 5185 hscb->cmdpointer = 5186 ((intptr_t)csio->cdb_io.cdb_ptr) & 0xffffffff; 5187 } else { 5188 /* 5189 * CCB CDB Data Storage area is only 16 bytes 5190 * so no additional testing is required 5191 */ 5192 memcpy(hscb->cmdstore, csio->cdb_io.cdb_bytes, 5193 hscb->cmdlen); 5194 hscb->cmdpointer = hscb->cmdstore_busaddr; 5195 } 5196 } 5197 5198 /* Only use S/G if there is a transfer */ 5199 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 5200 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) { 5201 /* We've been given a pointer to a single buffer */ 5202 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) { 5203 int s; 5204 int error; 5205 5206 s = splsoftvm(); 5207 error = bus_dmamap_load(ahc->buffer_dmat, 5208 scb->dmamap, 5209 csio->data_ptr, 5210 csio->dxfer_len, 5211 ahc_execute_scb, 5212 scb, /*flags*/0); 5213 if (error == EINPROGRESS) { 5214 /* 5215 * So as to maintain ordering, 5216 * freeze the controller queue 5217 * until our mapping is 5218 * returned. 5219 */ 5220 xpt_freeze_simq(ahc->sim, 5221 /*count*/1); 5222 scb->ccb->ccb_h.status |= 5223 CAM_RELEASE_SIMQ; 5224 } 5225 splx(s); 5226 } else { 5227 struct bus_dma_segment seg; 5228 5229 /* Pointer to physical buffer */ 5230 if (csio->dxfer_len > AHC_MAXTRANSFER_SIZE) 5231 panic("ahc_setup_data - Transfer size " 5232 "larger than can device max"); 5233 5234 seg.ds_addr = (bus_addr_t)csio->data_ptr; 5235 seg.ds_len = csio->dxfer_len; 5236 ahc_execute_scb(scb, &seg, 1, 0); 5237 } 5238 } else { 5239 struct bus_dma_segment *segs; 5240 5241 if ((ccb_h->flags & CAM_DATA_PHYS) != 0) 5242 panic("ahc_setup_data - Physical segment " 5243 "pointers unsupported"); 5244 5245 if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) 5246 panic("ahc_setup_data - Virtual segment " 5247 "addresses unsupported"); 5248 5249 /* Just use the segments provided */ 5250 segs = (struct bus_dma_segment *)csio->data_ptr; 5251 ahc_execute_scb(scb, segs, csio->sglist_cnt, 0); 5252 } 5253 } else { 5254 ahc_execute_scb(scb, NULL, 0, 0); 5255 } 5256 } 5257 5258 static void 5259 ahc_freeze_devq(struct ahc_softc *ahc, struct cam_path *path) 5260 { 5261 int target; 5262 char channel; 5263 int lun; 5264 5265 target = xpt_path_target_id(path); 5266 lun = xpt_path_lun_id(path); 5267 channel = xpt_path_sim(path)->bus_id == 0 ? 'A' : 'B'; 5268 5269 ahc_search_qinfifo(ahc, target, channel, lun, 5270 /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, 5271 CAM_REQUEUE_REQ, SEARCH_COMPLETE); 5272 } 5273 5274 static void 5275 ahcallocscbs(struct ahc_softc *ahc) 5276 { 5277 struct scb_data *scb_data; 5278 struct scb *next_scb; 5279 struct sg_map_node *sg_map; 5280 bus_addr_t physaddr; 5281 struct ahc_dma_seg *segs; 5282 int newcount; 5283 int i; 5284 5285 scb_data = ahc->scb_data; 5286 if (scb_data->numscbs >= AHC_SCB_MAX) 5287 /* Can't allocate any more */ 5288 return; 5289 5290 next_scb = &scb_data->scbarray[scb_data->numscbs]; 5291 5292 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); 5293 5294 if (sg_map == NULL) 5295 return; 5296 5297 /* Allocate S/G space for the next batch of SCBS */ 5298 if (bus_dmamem_alloc(scb_data->sg_dmat, (void **)&sg_map->sg_vaddr, 5299 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { 5300 free(sg_map, M_DEVBUF); 5301 return; 5302 } 5303 5304 SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); 5305 5306 bus_dmamap_load(scb_data->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, 5307 PAGE_SIZE, ahcdmamapcb, &sg_map->sg_physaddr, 5308 /*flags*/0); 5309 5310 segs = sg_map->sg_vaddr; 5311 physaddr = sg_map->sg_physaddr; 5312 5313 newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg))); 5314 for (i = 0; scb_data->numscbs < AHC_SCB_MAX && i < newcount; i++) { 5315 int error; 5316 5317 next_scb->sg_list = segs; 5318 /* 5319 * The sequencer always starts with the second entry. 5320 * The first entry is embedded in the scb. 5321 */ 5322 next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg); 5323 next_scb->flags = SCB_FREE; 5324 error = bus_dmamap_create(ahc->buffer_dmat, /*flags*/0, 5325 &next_scb->dmamap); 5326 if (error != 0) 5327 break; 5328 next_scb->hscb = &scb_data->hscbs[scb_data->numscbs]; 5329 next_scb->hscb->tag = ahc->scb_data->numscbs; 5330 next_scb->hscb->cmdstore_busaddr = 5331 ahc_hscb_busaddr(ahc, next_scb->hscb->tag) 5332 + offsetof(struct hardware_scb, cmdstore); 5333 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, next_scb, links); 5334 segs += AHC_NSEG; 5335 physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg)); 5336 next_scb++; 5337 ahc->scb_data->numscbs++; 5338 } 5339 } 5340 5341 #ifdef AHC_DUMP_SEQ 5342 static void 5343 ahc_dumpseq(struct ahc_softc* ahc) 5344 { 5345 int i; 5346 int max_prog; 5347 5348 if ((ahc->chip & AHC_BUS_MASK) < AHC_PCI) 5349 max_prog = 448; 5350 else if ((ahc->features & AHC_ULTRA2) != 0) 5351 max_prog = 768; 5352 else 5353 max_prog = 512; 5354 5355 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 5356 ahc_outb(ahc, SEQADDR0, 0); 5357 ahc_outb(ahc, SEQADDR1, 0); 5358 for (i = 0; i < max_prog; i++) { 5359 u_int8_t ins_bytes[4]; 5360 5361 ahc_insb(ahc, SEQRAM, ins_bytes, 4); 5362 printf("0x%08x\n", ins_bytes[0] << 24 5363 | ins_bytes[1] << 16 5364 | ins_bytes[2] << 8 5365 | ins_bytes[3]); 5366 } 5367 } 5368 #endif 5369 5370 static void 5371 ahc_loadseq(struct ahc_softc *ahc) 5372 { 5373 struct patch *cur_patch; 5374 int i; 5375 int downloaded; 5376 int skip_addr; 5377 u_int8_t download_consts[4]; 5378 5379 /* Setup downloadable constant table */ 5380 #if 0 5381 /* No downloaded constants are currently defined. */ 5382 download_consts[TMODE_NUMCMDS] = ahc->num_targetcmds; 5383 #endif 5384 5385 cur_patch = patches; 5386 downloaded = 0; 5387 skip_addr = 0; 5388 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 5389 ahc_outb(ahc, SEQADDR0, 0); 5390 ahc_outb(ahc, SEQADDR1, 0); 5391 5392 for (i = 0; i < sizeof(seqprog)/4; i++) { 5393 if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) { 5394 /* 5395 * Don't download this instruction as it 5396 * is in a patch that was removed. 5397 */ 5398 continue; 5399 } 5400 ahc_download_instr(ahc, i, download_consts); 5401 downloaded++; 5402 } 5403 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE); 5404 restart_sequencer(ahc); 5405 5406 if (bootverbose) 5407 printf(" %d instructions downloaded\n", downloaded); 5408 } 5409 5410 static int 5411 ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch, 5412 int start_instr, int *skip_addr) 5413 { 5414 struct patch *cur_patch; 5415 struct patch *last_patch; 5416 int num_patches; 5417 5418 num_patches = sizeof(patches)/sizeof(struct patch); 5419 last_patch = &patches[num_patches]; 5420 cur_patch = *start_patch; 5421 5422 while (cur_patch < last_patch && start_instr == cur_patch->begin) { 5423 5424 if (cur_patch->patch_func(ahc) == 0) { 5425 5426 /* Start rejecting code */ 5427 *skip_addr = start_instr + cur_patch->skip_instr; 5428 cur_patch += cur_patch->skip_patch; 5429 } else { 5430 /* Accepted this patch. Advance to the next 5431 * one and wait for our intruction pointer to 5432 * hit this point. 5433 */ 5434 cur_patch++; 5435 } 5436 } 5437 5438 *start_patch = cur_patch; 5439 if (start_instr < *skip_addr) 5440 /* Still skipping */ 5441 return (0); 5442 5443 return (1); 5444 } 5445 5446 static void 5447 ahc_download_instr(struct ahc_softc *ahc, int instrptr, u_int8_t *dconsts) 5448 { 5449 union ins_formats instr; 5450 struct ins_format1 *fmt1_ins; 5451 struct ins_format3 *fmt3_ins; 5452 u_int opcode; 5453 5454 /* Structure copy */ 5455 instr = *(union ins_formats*)&seqprog[instrptr * 4]; 5456 5457 fmt1_ins = &instr.format1; 5458 fmt3_ins = NULL; 5459 5460 /* Pull the opcode */ 5461 opcode = instr.format1.opcode; 5462 switch (opcode) { 5463 case AIC_OP_JMP: 5464 case AIC_OP_JC: 5465 case AIC_OP_JNC: 5466 case AIC_OP_CALL: 5467 case AIC_OP_JNE: 5468 case AIC_OP_JNZ: 5469 case AIC_OP_JE: 5470 case AIC_OP_JZ: 5471 { 5472 struct patch *cur_patch; 5473 int address_offset; 5474 u_int address; 5475 int skip_addr; 5476 int i; 5477 5478 fmt3_ins = &instr.format3; 5479 address_offset = 0; 5480 address = fmt3_ins->address; 5481 cur_patch = patches; 5482 skip_addr = 0; 5483 5484 for (i = 0; i < address;) { 5485 5486 ahc_check_patch(ahc, &cur_patch, i, &skip_addr); 5487 5488 if (skip_addr > i) { 5489 int end_addr; 5490 5491 end_addr = MIN(address, skip_addr); 5492 address_offset += end_addr - i; 5493 i = skip_addr; 5494 } else { 5495 i++; 5496 } 5497 } 5498 address -= address_offset; 5499 fmt3_ins->address = address; 5500 /* FALLTHROUGH */ 5501 } 5502 case AIC_OP_OR: 5503 case AIC_OP_AND: 5504 case AIC_OP_XOR: 5505 case AIC_OP_ADD: 5506 case AIC_OP_ADC: 5507 case AIC_OP_BMOV: 5508 if (fmt1_ins->parity != 0) { 5509 fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; 5510 } 5511 fmt1_ins->parity = 0; 5512 /* FALLTHROUGH */ 5513 case AIC_OP_ROL: 5514 if ((ahc->features & AHC_ULTRA2) != 0) { 5515 int i, count; 5516 5517 /* Calculate odd parity for the instruction */ 5518 for (i = 0, count = 0; i < 31; i++) { 5519 u_int32_t mask; 5520 5521 mask = 0x01 << i; 5522 if ((instr.integer & mask) != 0) 5523 count++; 5524 } 5525 if ((count & 0x01) == 0) 5526 instr.format1.parity = 1; 5527 } else { 5528 /* Compress the instruction for older sequencers */ 5529 if (fmt3_ins != NULL) { 5530 instr.integer = 5531 fmt3_ins->immediate 5532 | (fmt3_ins->source << 8) 5533 | (fmt3_ins->address << 16) 5534 | (fmt3_ins->opcode << 25); 5535 } else { 5536 instr.integer = 5537 fmt1_ins->immediate 5538 | (fmt1_ins->source << 8) 5539 | (fmt1_ins->destination << 16) 5540 | (fmt1_ins->ret << 24) 5541 | (fmt1_ins->opcode << 25); 5542 } 5543 } 5544 ahc_outsb(ahc, SEQRAM, instr.bytes, 4); 5545 break; 5546 default: 5547 panic("Unknown opcode encountered in seq program"); 5548 break; 5549 } 5550 } 5551 5552 static void 5553 ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) { 5554 5555 if ((scb->flags & SCB_RECOVERY_SCB) == 0) { 5556 struct ccb_hdr *ccbh; 5557 5558 scb->flags |= SCB_RECOVERY_SCB; 5559 5560 /* 5561 * Take all queued, but not sent SCBs out of the equation. 5562 * Also ensure that no new CCBs are queued to us while we 5563 * try to fix this problem. 5564 */ 5565 if ((scb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { 5566 xpt_freeze_simq(ahc->sim, /*count*/1); 5567 scb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5568 } 5569 5570 /* 5571 * Go through all of our pending SCBs and remove 5572 * any scheduled timeouts for them. We will reschedule 5573 * them after we've successfully fixed this problem. 5574 */ 5575 ccbh = ahc->pending_ccbs.lh_first; 5576 while (ccbh != NULL) { 5577 struct scb *pending_scb; 5578 5579 pending_scb = (struct scb *)ccbh->ccb_scb_ptr; 5580 untimeout(ahc_timeout, pending_scb, ccbh->timeout_ch); 5581 ccbh = ccbh->sim_links.le.le_next; 5582 } 5583 } 5584 } 5585 5586 static void 5587 ahc_timeout(void *arg) 5588 { 5589 struct scb *scb; 5590 struct ahc_softc *ahc; 5591 int s, found; 5592 u_int last_phase; 5593 int target; 5594 int lun; 5595 int i; 5596 char channel; 5597 5598 scb = (struct scb *)arg; 5599 ahc = (struct ahc_softc *)scb->ccb->ccb_h.ccb_ahc_ptr; 5600 5601 s = splcam(); 5602 5603 /* 5604 * Ensure that the card doesn't do anything 5605 * behind our back. Also make sure that we 5606 * didn't "just" miss an interrupt that would 5607 * affect this timeout. 5608 */ 5609 do { 5610 ahc_intr(ahc); 5611 pause_sequencer(ahc); 5612 } while (ahc_inb(ahc, INTSTAT) & INT_PEND); 5613 5614 if ((scb->flags & SCB_ACTIVE) == 0) { 5615 /* Previous timeout took care of me already */ 5616 printf("Timedout SCB handled by another timeout\n"); 5617 unpause_sequencer(ahc); 5618 splx(s); 5619 return; 5620 } 5621 5622 target = SCB_TARGET(scb); 5623 channel = SCB_CHANNEL(scb); 5624 lun = SCB_LUN(scb); 5625 5626 xpt_print_path(scb->ccb->ccb_h.path); 5627 printf("SCB 0x%x - timed out ", scb->hscb->tag); 5628 /* 5629 * Take a snapshot of the bus state and print out 5630 * some information so we can track down driver bugs. 5631 */ 5632 last_phase = ahc_inb(ahc, LASTPHASE); 5633 5634 for (i = 0; i < num_phases; i++) { 5635 if (last_phase == phase_table[i].phase) 5636 break; 5637 } 5638 printf("%s", phase_table[i].phasemsg); 5639 5640 printf(", SEQADDR == 0x%x\n", 5641 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); 5642 5643 #if 0 5644 printf("SSTAT1 == 0x%x\n", ahc_inb(ahc, SSTAT1)); 5645 printf("SSTAT3 == 0x%x\n", ahc_inb(ahc, SSTAT3)); 5646 printf("SCSIPHASE == 0x%x\n", ahc_inb(ahc, SCSIPHASE)); 5647 printf("SCSIRATE == 0x%x\n", ahc_inb(ahc, SCSIRATE)); 5648 printf("SCSIOFFSET == 0x%x\n", ahc_inb(ahc, SCSIOFFSET)); 5649 printf("SEQ_FLAGS == 0x%x\n", ahc_inb(ahc, SEQ_FLAGS)); 5650 printf("SCB_DATAPTR == 0x%x\n", ahc_inb(ahc, SCB_DATAPTR) 5651 | ahc_inb(ahc, SCB_DATAPTR + 1) << 8 5652 | ahc_inb(ahc, SCB_DATAPTR + 2) << 16 5653 | ahc_inb(ahc, SCB_DATAPTR + 3) << 24); 5654 printf("SCB_DATACNT == 0x%x\n", ahc_inb(ahc, SCB_DATACNT) 5655 | ahc_inb(ahc, SCB_DATACNT + 1) << 8 5656 | ahc_inb(ahc, SCB_DATACNT + 2) << 16); 5657 printf("SCB_SGCOUNT == 0x%x\n", ahc_inb(ahc, SCB_SGCOUNT)); 5658 printf("CCSCBCTL == 0x%x\n", ahc_inb(ahc, CCSCBCTL)); 5659 printf("CCSCBCNT == 0x%x\n", ahc_inb(ahc, CCSCBCNT)); 5660 printf("DFCNTRL == 0x%x\n", ahc_inb(ahc, DFCNTRL)); 5661 printf("DFSTATUS == 0x%x\n", ahc_inb(ahc, DFSTATUS)); 5662 printf("CCHCNT == 0x%x\n", ahc_inb(ahc, CCHCNT)); 5663 if (scb->sg_count > 0) { 5664 for (i = 0; i < scb->sg_count; i++) { 5665 printf("sg[%d] - Addr 0x%x : Length %d\n", 5666 i, 5667 scb->sg_list[i].addr, 5668 scb->sg_list[i].len); 5669 } 5670 } 5671 #endif 5672 if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) { 5673 /* 5674 * Been down this road before. 5675 * Do a full bus reset. 5676 */ 5677 bus_reset: 5678 ahcsetccbstatus(scb->ccb, CAM_CMD_TIMEOUT); 5679 found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE); 5680 printf("%s: Issued Channel %c Bus Reset. " 5681 "%d SCBs aborted\n", ahc_name(ahc), channel, found); 5682 } else { 5683 /* 5684 * If we are a target, transition to bus free and report 5685 * the timeout. 5686 * 5687 * The target/initiator that is holding up the bus may not 5688 * be the same as the one that triggered this timeout 5689 * (different commands have different timeout lengths). 5690 * If the bus is idle and we are actiing as the initiator 5691 * for this request, queue a BDR message to the timed out 5692 * target. Otherwise, if the timed out transaction is 5693 * active: 5694 * Initiator transaction: 5695 * Stuff the message buffer with a BDR message and assert 5696 * ATN in the hopes that the target will let go of the bus 5697 * and go to the mesgout phase. If this fails, we'll 5698 * get another timeout 2 seconds later which will attempt 5699 * a bus reset. 5700 * 5701 * Target transaction: 5702 * Transition to BUS FREE and report the error. 5703 * It's good to be the target! 5704 */ 5705 u_int active_scb_index; 5706 5707 active_scb_index = ahc_inb(ahc, SCB_TAG); 5708 5709 if (last_phase != P_BUSFREE 5710 && (active_scb_index < ahc->scb_data->numscbs)) { 5711 struct scb *active_scb; 5712 5713 /* 5714 * If the active SCB is not from our device, 5715 * assume that another device is hogging the bus 5716 * and wait for it's timeout to expire before 5717 * taking additional action. 5718 */ 5719 active_scb = &ahc->scb_data->scbarray[active_scb_index]; 5720 if (active_scb->hscb->tcl != scb->hscb->tcl) { 5721 struct ccb_hdr *ccbh; 5722 u_int newtimeout; 5723 5724 xpt_print_path(scb->ccb->ccb_h.path); 5725 printf("Other SCB Timeout%s", 5726 (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0 5727 ? " again\n" : "\n"); 5728 scb->flags |= SCB_OTHERTCL_TIMEOUT; 5729 newtimeout = MAX(active_scb->ccb->ccb_h.timeout, 5730 scb->ccb->ccb_h.timeout); 5731 ccbh = &scb->ccb->ccb_h; 5732 scb->ccb->ccb_h.timeout_ch = 5733 timeout(ahc_timeout, scb, 5734 (newtimeout * hz) / 1000); 5735 splx(s); 5736 return; 5737 } 5738 5739 /* It's us */ 5740 if ((scb->hscb->control & TARGET_SCB) != 0) { 5741 5742 /* 5743 * Send back any queued up transactions 5744 * and properly record the error condition. 5745 */ 5746 ahc_freeze_devq(ahc, scb->ccb->ccb_h.path); 5747 ahcsetccbstatus(scb->ccb, CAM_CMD_TIMEOUT); 5748 ahc_freeze_ccb(scb->ccb); 5749 ahc_done(ahc, scb); 5750 5751 /* Will clear us from the bus */ 5752 restart_sequencer(ahc); 5753 return; 5754 } 5755 5756 ahc_set_recoveryscb(ahc, active_scb); 5757 ahc_outb(ahc, MSG_OUT, MSG_BUS_DEV_RESET); 5758 ahc_outb(ahc, SCSISIGO, last_phase|ATNO); 5759 xpt_print_path(active_scb->ccb->ccb_h.path); 5760 printf("BDR message in message buffer\n"); 5761 active_scb->flags |= SCB_DEVICE_RESET; 5762 active_scb->ccb->ccb_h.timeout_ch = 5763 timeout(ahc_timeout, (caddr_t)active_scb, 2 * hz); 5764 unpause_sequencer(ahc); 5765 } else { 5766 int disconnected; 5767 5768 /* XXX Shouldn't panic. Just punt instead */ 5769 if ((scb->hscb->control & TARGET_SCB) != 0) 5770 panic("Timed-out target SCB but bus idle"); 5771 5772 if (last_phase != P_BUSFREE 5773 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) { 5774 /* XXX What happened to the SCB? */ 5775 /* Hung target selection. Goto busfree */ 5776 printf("%s: Hung target selection\n", 5777 ahc_name(ahc)); 5778 restart_sequencer(ahc); 5779 return; 5780 } 5781 5782 if (ahc_search_qinfifo(ahc, target, channel, lun, 5783 scb->hscb->tag, ROLE_INITIATOR, 5784 /*status*/0, SEARCH_COUNT) > 0) { 5785 disconnected = FALSE; 5786 } else { 5787 disconnected = TRUE; 5788 } 5789 5790 if (disconnected) { 5791 u_int active_scb; 5792 5793 ahc_set_recoveryscb(ahc, scb); 5794 /* 5795 * Simply set the MK_MESSAGE control bit. 5796 */ 5797 scb->hscb->control |= MK_MESSAGE; 5798 scb->flags |= SCB_QUEUED_MSG 5799 | SCB_DEVICE_RESET; 5800 5801 /* 5802 * Mark the cached copy of this SCB in the 5803 * disconnected list too, so that a reconnect 5804 * at this point causes a BDR or abort. 5805 */ 5806 active_scb = ahc_inb(ahc, SCBPTR); 5807 if (ahc_search_disc_list(ahc, target, 5808 channel, lun, 5809 scb->hscb->tag, 5810 /*stop_on_first*/TRUE, 5811 /*remove*/FALSE, 5812 /*save_state*/FALSE)) { 5813 u_int scb_control; 5814 5815 scb_control = ahc_inb(ahc, SCB_CONTROL); 5816 scb_control |= MK_MESSAGE; 5817 ahc_outb(ahc, SCB_CONTROL, scb_control); 5818 } 5819 ahc_outb(ahc, SCBPTR, active_scb); 5820 ahc_index_busy_tcl(ahc, scb->hscb->tcl, 5821 /*unbusy*/TRUE); 5822 5823 /* 5824 * Actually re-queue this SCB in case we can 5825 * select the device before it reconnects. 5826 * Clear out any entries in the QINFIFO first 5827 * so we are the next SCB for this target 5828 * to run. 5829 */ 5830 ahc_search_qinfifo(ahc, SCB_TARGET(scb), 5831 channel, SCB_LUN(scb), 5832 SCB_LIST_NULL, 5833 ROLE_INITIATOR, 5834 CAM_REQUEUE_REQ, 5835 SEARCH_COMPLETE); 5836 xpt_print_path(scb->ccb->ccb_h.path); 5837 printf("Queuing a BDR SCB\n"); 5838 ahc->qinfifo[ahc->qinfifonext++] = 5839 scb->hscb->tag; 5840 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5841 ahc_outb(ahc, HNSCB_QOFF, 5842 ahc->qinfifonext); 5843 } else { 5844 ahc_outb(ahc, KERNEL_QINPOS, 5845 ahc->qinfifonext); 5846 } 5847 scb->ccb->ccb_h.timeout_ch = 5848 timeout(ahc_timeout, (caddr_t)scb, 2 * hz); 5849 unpause_sequencer(ahc); 5850 } else { 5851 /* Go "immediatly" to the bus reset */ 5852 /* This shouldn't happen */ 5853 ahc_set_recoveryscb(ahc, scb); 5854 xpt_print_path(scb->ccb->ccb_h.path); 5855 printf("SCB %d: Immediate reset. " 5856 "Flags = 0x%x\n", scb->hscb->tag, 5857 scb->flags); 5858 goto bus_reset; 5859 } 5860 } 5861 } 5862 splx(s); 5863 } 5864 5865 static int 5866 ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel, 5867 int lun, u_int tag, role_t role, u_int32_t status, 5868 ahc_search_action action) 5869 { 5870 struct scb *scbp; 5871 u_int8_t qinpos; 5872 u_int8_t qintail; 5873 int found; 5874 5875 qinpos = ahc_inb(ahc, QINPOS); 5876 qintail = ahc->qinfifonext; 5877 found = 0; 5878 5879 /* 5880 * Start with an empty queue. Entries that are not chosen 5881 * for removal will be re-added to the queue as we go. 5882 */ 5883 ahc->qinfifonext = qinpos; 5884 5885 while (qinpos != qintail) { 5886 scbp = &ahc->scb_data->scbarray[ahc->qinfifo[qinpos]]; 5887 if (ahc_match_scb(scbp, target, channel, lun, tag, role)) { 5888 /* 5889 * We found an scb that needs to be removed. 5890 */ 5891 switch (action) { 5892 case SEARCH_COMPLETE: 5893 if (ahc_ccb_status(scbp->ccb) == CAM_REQ_INPROG) 5894 ahcsetccbstatus(scbp->ccb, status); 5895 ahc_freeze_ccb(scbp->ccb); 5896 ahc_done(ahc, scbp); 5897 break; 5898 case SEARCH_COUNT: 5899 ahc->qinfifo[ahc->qinfifonext++] = 5900 scbp->hscb->tag; 5901 break; 5902 case SEARCH_REMOVE: 5903 break; 5904 } 5905 found++; 5906 } else { 5907 ahc->qinfifo[ahc->qinfifonext++] = scbp->hscb->tag; 5908 } 5909 qinpos++; 5910 } 5911 5912 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5913 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 5914 } else { 5915 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 5916 } 5917 5918 return (found); 5919 } 5920 5921 5922 static void 5923 ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) 5924 { 5925 union ccb *abort_ccb; 5926 5927 abort_ccb = ccb->cab.abort_ccb; 5928 switch (abort_ccb->ccb_h.func_code) { 5929 case XPT_ACCEPT_TARGET_IO: 5930 case XPT_IMMED_NOTIFY: 5931 case XPT_CONT_TARGET_IO: 5932 { 5933 struct tmode_tstate *tstate; 5934 struct tmode_lstate *lstate; 5935 struct ccb_hdr_slist *list; 5936 cam_status status; 5937 5938 status = ahc_find_tmode_devs(ahc, sim, abort_ccb, &tstate, 5939 &lstate, TRUE); 5940 5941 if (status != CAM_REQ_CMP) { 5942 ccb->ccb_h.status = status; 5943 break; 5944 } 5945 5946 if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) 5947 list = &lstate->accept_tios; 5948 else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) 5949 list = &lstate->immed_notifies; 5950 else 5951 list = NULL; 5952 5953 if (list != NULL) { 5954 struct ccb_hdr *curelm; 5955 int found; 5956 5957 curelm = SLIST_FIRST(list); 5958 found = 0; 5959 if (curelm == &abort_ccb->ccb_h) { 5960 found = 1; 5961 SLIST_REMOVE_HEAD(list, sim_links.sle); 5962 } else { 5963 while(curelm != NULL) { 5964 struct ccb_hdr *nextelm; 5965 5966 nextelm = 5967 SLIST_NEXT(curelm, sim_links.sle); 5968 5969 if (nextelm == &abort_ccb->ccb_h) { 5970 found = 1; 5971 SLIST_NEXT(curelm, 5972 sim_links.sle) = 5973 SLIST_NEXT(nextelm, 5974 sim_links.sle); 5975 break; 5976 } 5977 curelm = nextelm; 5978 } 5979 } 5980 5981 if (found) { 5982 abort_ccb->ccb_h.status = CAM_REQ_ABORTED; 5983 xpt_done(abort_ccb); 5984 ccb->ccb_h.status = CAM_REQ_CMP; 5985 } else { 5986 printf("Not found\n"); 5987 ccb->ccb_h.status = CAM_PATH_INVALID; 5988 } 5989 break; 5990 } 5991 /* FALLTHROUGH */ 5992 } 5993 case XPT_SCSI_IO: 5994 /* XXX Fully implement the hard ones */ 5995 ccb->ccb_h.status = CAM_UA_ABORT; 5996 break; 5997 default: 5998 ccb->ccb_h.status = CAM_REQ_INVALID; 5999 break; 6000 } 6001 xpt_done(ccb); 6002 } 6003 6004 /* 6005 * Abort all SCBs that match the given description (target/channel/lun/tag), 6006 * setting their status to the passed in status if the status has not already 6007 * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer 6008 * is paused before it is called. 6009 */ 6010 static int 6011 ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, 6012 int lun, u_int tag, role_t role, u_int32_t status) 6013 { 6014 struct scb *scbp; 6015 u_int active_scb; 6016 int i; 6017 int found; 6018 6019 /* restore this when we're done */ 6020 active_scb = ahc_inb(ahc, SCBPTR); 6021 6022 found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL, 6023 role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); 6024 6025 /* 6026 * Search waiting for selection list. 6027 */ 6028 { 6029 u_int8_t next, prev; 6030 6031 next = ahc_inb(ahc, WAITING_SCBH); /* Start at head of list. */ 6032 prev = SCB_LIST_NULL; 6033 6034 while (next != SCB_LIST_NULL) { 6035 u_int8_t scb_index; 6036 6037 ahc_outb(ahc, SCBPTR, next); 6038 scb_index = ahc_inb(ahc, SCB_TAG); 6039 if (scb_index >= ahc->scb_data->numscbs) { 6040 panic("Waiting List inconsistency. " 6041 "SCB index == %d, yet numscbs == %d.", 6042 scb_index, ahc->scb_data->numscbs); 6043 } 6044 scbp = &ahc->scb_data->scbarray[scb_index]; 6045 if (ahc_match_scb(scbp, target, channel, 6046 lun, SCB_LIST_NULL, role)) { 6047 6048 next = ahc_abort_wscb(ahc, next, prev); 6049 } else { 6050 6051 prev = next; 6052 next = ahc_inb(ahc, SCB_NEXT); 6053 } 6054 } 6055 } 6056 /* 6057 * Go through the disconnected list and remove any entries we 6058 * have queued for completion, 0'ing their control byte too. 6059 * We save the active SCB and restore it ourselves, so there 6060 * is no reason for this search to restore it too. 6061 */ 6062 ahc_search_disc_list(ahc, target, channel, lun, tag, 6063 /*stop_on_first*/FALSE, /*remove*/TRUE, 6064 /*save_state*/FALSE); 6065 6066 /* 6067 * Go through the hardware SCB array looking for commands that 6068 * were active but not on any list. 6069 */ 6070 for(i = 0; i < ahc->scb_data->maxhscbs; i++) { 6071 u_int scbid; 6072 6073 ahc_outb(ahc, SCBPTR, i); 6074 scbid = ahc_inb(ahc, SCB_TAG); 6075 scbp = &ahc->scb_data->scbarray[scbid]; 6076 if (scbid < ahc->scb_data->numscbs 6077 && ahc_match_scb(scbp, target, channel, lun, tag, role)) 6078 ahc_add_curscb_to_free_list(ahc); 6079 } 6080 6081 /* 6082 * Go through the pending CCB list and look for 6083 * commands for this target that are still active. 6084 * These are other tagged commands that were 6085 * disconnected when the reset occured. 6086 */ 6087 { 6088 struct ccb_hdr *ccb_h; 6089 6090 ccb_h = ahc->pending_ccbs.lh_first; 6091 while (ccb_h != NULL) { 6092 scbp = (struct scb *)ccb_h->ccb_scb_ptr; 6093 ccb_h = ccb_h->sim_links.le.le_next; 6094 if (ahc_match_scb(scbp, target, channel, 6095 lun, tag, role)) { 6096 if (ahc_ccb_status(scbp->ccb) == CAM_REQ_INPROG) 6097 ahcsetccbstatus(scbp->ccb, status); 6098 ahc_freeze_ccb(scbp->ccb); 6099 ahc_done(ahc, scbp); 6100 found++; 6101 } 6102 } 6103 } 6104 ahc_outb(ahc, SCBPTR, active_scb); 6105 return found; 6106 } 6107 6108 static int 6109 ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel, 6110 int lun, u_int tag, int stop_on_first, int remove, 6111 int save_state) 6112 { 6113 struct scb *scbp; 6114 u_int next; 6115 u_int prev; 6116 u_int count; 6117 u_int active_scb; 6118 6119 count = 0; 6120 next = ahc_inb(ahc, DISCONNECTED_SCBH); 6121 prev = SCB_LIST_NULL; 6122 6123 if (save_state) { 6124 /* restore this when we're done */ 6125 active_scb = ahc_inb(ahc, SCBPTR); 6126 } else 6127 /* Silence compiler */ 6128 active_scb = SCB_LIST_NULL; 6129 6130 while (next != SCB_LIST_NULL) { 6131 u_int scb_index; 6132 6133 ahc_outb(ahc, SCBPTR, next); 6134 scb_index = ahc_inb(ahc, SCB_TAG); 6135 if (scb_index >= ahc->scb_data->numscbs) { 6136 panic("Disconnected List inconsistency. " 6137 "SCB index == %d, yet numscbs == %d.", 6138 scb_index, ahc->scb_data->numscbs); 6139 } 6140 scbp = &ahc->scb_data->scbarray[scb_index]; 6141 if (ahc_match_scb(scbp, target, channel, lun, 6142 tag, ROLE_INITIATOR)) { 6143 count++; 6144 if (remove) { 6145 next = 6146 ahc_rem_scb_from_disc_list(ahc, prev, next); 6147 } else { 6148 prev = next; 6149 next = ahc_inb(ahc, SCB_NEXT); 6150 } 6151 if (stop_on_first) 6152 break; 6153 } else { 6154 prev = next; 6155 next = ahc_inb(ahc, SCB_NEXT); 6156 } 6157 } 6158 if (save_state) 6159 ahc_outb(ahc, SCBPTR, active_scb); 6160 return (count); 6161 } 6162 6163 static u_int 6164 ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr) 6165 { 6166 u_int next; 6167 6168 ahc_outb(ahc, SCBPTR, scbptr); 6169 next = ahc_inb(ahc, SCB_NEXT); 6170 6171 ahc_outb(ahc, SCB_CONTROL, 0); 6172 6173 ahc_add_curscb_to_free_list(ahc); 6174 6175 if (prev != SCB_LIST_NULL) { 6176 ahc_outb(ahc, SCBPTR, prev); 6177 ahc_outb(ahc, SCB_NEXT, next); 6178 } else 6179 ahc_outb(ahc, DISCONNECTED_SCBH, next); 6180 6181 return (next); 6182 } 6183 6184 static void 6185 ahc_add_curscb_to_free_list(struct ahc_softc *ahc) 6186 { 6187 /* Invalidate the tag so that ahc_find_scb doesn't think it's active */ 6188 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 6189 6190 ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH)); 6191 ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR)); 6192 } 6193 6194 /* 6195 * Manipulate the waiting for selection list and return the 6196 * scb that follows the one that we remove. 6197 */ 6198 static u_int 6199 ahc_abort_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev) 6200 { 6201 u_int curscb, next; 6202 6203 /* 6204 * Select the SCB we want to abort and 6205 * pull the next pointer out of it. 6206 */ 6207 curscb = ahc_inb(ahc, SCBPTR); 6208 ahc_outb(ahc, SCBPTR, scbpos); 6209 next = ahc_inb(ahc, SCB_NEXT); 6210 6211 /* Clear the necessary fields */ 6212 ahc_outb(ahc, SCB_CONTROL, 0); 6213 6214 ahc_add_curscb_to_free_list(ahc); 6215 6216 /* update the waiting list */ 6217 if (prev == SCB_LIST_NULL) { 6218 /* First in the list */ 6219 ahc_outb(ahc, WAITING_SCBH, next); 6220 6221 /* 6222 * Ensure we aren't attempting to perform 6223 * selection for this entry. 6224 */ 6225 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 6226 } else { 6227 /* 6228 * Select the scb that pointed to us 6229 * and update its next pointer. 6230 */ 6231 ahc_outb(ahc, SCBPTR, prev); 6232 ahc_outb(ahc, SCB_NEXT, next); 6233 } 6234 6235 /* 6236 * Point us back at the original scb position. 6237 */ 6238 ahc_outb(ahc, SCBPTR, curscb); 6239 return next; 6240 } 6241 6242 static void 6243 ahc_clear_intstat(struct ahc_softc *ahc) 6244 { 6245 /* Clear any interrupt conditions this may have caused */ 6246 ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO); 6247 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI 6248 |CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG| 6249 CLRREQINIT); 6250 ahc_outb(ahc, CLRINT, CLRSCSIINT); 6251 } 6252 6253 static void 6254 ahc_reset_current_bus(struct ahc_softc *ahc) 6255 { 6256 u_int8_t scsiseq; 6257 6258 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST); 6259 scsiseq = ahc_inb(ahc, SCSISEQ); 6260 ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO); 6261 DELAY(AHC_BUSRESET_DELAY); 6262 /* Turn off the bus reset */ 6263 ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO); 6264 6265 ahc_clear_intstat(ahc); 6266 6267 /* Re-enable reset interrupts */ 6268 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST); 6269 } 6270 6271 static int 6272 ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset) 6273 { 6274 struct cam_path *path; 6275 u_int initiator, target, max_scsiid; 6276 u_int sblkctl; 6277 u_int our_id; 6278 int found; 6279 int restart_needed; 6280 char cur_channel; 6281 6282 ahc->pending_device = NULL; 6283 6284 pause_sequencer(ahc); 6285 6286 /* 6287 * Run our command complete fifos to ensure that we perform 6288 * completion processing on any commands that 'completed' 6289 * before the reset occurred. 6290 */ 6291 ahc_run_qoutfifo(ahc); 6292 if ((ahc->flags & AHC_TARGETMODE) != 0) { 6293 ahc_run_tqinfifo(ahc, /*paused*/TRUE); 6294 } 6295 6296 /* 6297 * Reset the bus if we are initiating this reset 6298 */ 6299 sblkctl = ahc_inb(ahc, SBLKCTL); 6300 cur_channel = 'A'; 6301 if ((ahc->features & AHC_TWIN) != 0 6302 && ((sblkctl & SELBUSB) != 0)) 6303 cur_channel = 'B'; 6304 if (cur_channel != channel) { 6305 /* Case 1: Command for another bus is active 6306 * Stealthily reset the other bus without 6307 * upsetting the current bus. 6308 */ 6309 ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB); 6310 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 6311 ahc_outb(ahc, SCSISEQ, 6312 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 6313 if (initiate_reset) 6314 ahc_reset_current_bus(ahc); 6315 ahc_clear_intstat(ahc); 6316 ahc_outb(ahc, SBLKCTL, sblkctl); 6317 restart_needed = FALSE; 6318 } else { 6319 /* Case 2: A command from this bus is active or we're idle */ 6320 ahc_clear_msg_state(ahc); 6321 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 6322 ahc_outb(ahc, SCSISEQ, 6323 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 6324 if (initiate_reset) 6325 ahc_reset_current_bus(ahc); 6326 ahc_clear_intstat(ahc); 6327 6328 /* 6329 * Since we are going to restart the sequencer, avoid 6330 * a race in the sequencer that could cause corruption 6331 * of our Q pointers by starting over from index 0. 6332 */ 6333 ahc->qoutfifonext = 0; 6334 if ((ahc->features & AHC_QUEUE_REGS) != 0) 6335 ahc_outb(ahc, SDSCB_QOFF, 0); 6336 else 6337 ahc_outb(ahc, QOUTPOS, 0); 6338 if ((ahc->flags & AHC_TARGETMODE) != 0) { 6339 ahc->tqinfifonext = 0; 6340 ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1); 6341 ahc_outb(ahc, TQINPOS, 0); 6342 } 6343 restart_needed = TRUE; 6344 } 6345 6346 /* 6347 * Clean up all the state information for the 6348 * pending transactions on this bus. 6349 */ 6350 found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel, 6351 CAM_LUN_WILDCARD, SCB_LIST_NULL, 6352 ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); 6353 if (channel == 'B') { 6354 path = ahc->path_b; 6355 our_id = ahc->our_id_b; 6356 } else { 6357 path = ahc->path; 6358 our_id = ahc->our_id; 6359 } 6360 6361 max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7; 6362 6363 /* 6364 * Send an immediate notify ccb to all target more peripheral 6365 * drivers affected by this action. 6366 */ 6367 for (target = 0; target <= max_scsiid; target++) { 6368 struct tmode_tstate* tstate; 6369 u_int lun; 6370 6371 tstate = ahc->enabled_targets[target]; 6372 if (tstate == NULL) 6373 continue; 6374 for (lun = 0; lun <= 7; lun++) { 6375 struct tmode_lstate* lstate; 6376 6377 lstate = tstate->enabled_luns[lun]; 6378 if (lstate == NULL) 6379 continue; 6380 6381 ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD, 6382 EVENT_TYPE_BUS_RESET, /*arg*/0); 6383 ahc_send_lstate_events(ahc, lstate); 6384 } 6385 } 6386 6387 /* Notify the XPT that a bus reset occurred */ 6388 xpt_async(AC_BUS_RESET, path, NULL); 6389 6390 /* 6391 * Revert to async/narrow transfers until we renegotiate. 6392 */ 6393 for (target = 0; target <= max_scsiid; target++) { 6394 6395 if (ahc->enabled_targets[target] == NULL) 6396 continue; 6397 for (initiator = 0; initiator <= max_scsiid; initiator++) { 6398 struct ahc_devinfo devinfo; 6399 6400 ahc_compile_devinfo(&devinfo, target, initiator, 6401 CAM_LUN_WILDCARD, 6402 channel, ROLE_UNKNOWN); 6403 ahc_set_width(ahc, &devinfo, path, 6404 MSG_EXT_WDTR_BUS_8_BIT, 6405 AHC_TRANS_CUR, /*paused*/TRUE); 6406 ahc_set_syncrate(ahc, &devinfo, path, 6407 /*syncrate*/NULL, /*period*/0, 6408 /*offset*/0, AHC_TRANS_CUR, 6409 /*paused*/TRUE); 6410 } 6411 } 6412 6413 if (restart_needed) 6414 restart_sequencer(ahc); 6415 else 6416 unpause_sequencer(ahc); 6417 return found; 6418 } 6419 6420 static int 6421 ahc_match_scb(struct scb *scb, int target, char channel, 6422 int lun, u_int tag, role_t role) 6423 { 6424 int targ = SCB_TARGET(scb); 6425 char chan = SCB_CHANNEL(scb); 6426 int slun = SCB_LUN(scb); 6427 int match; 6428 6429 match = ((chan == channel) || (channel == ALL_CHANNELS)); 6430 if (match != 0) 6431 match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); 6432 if (match != 0) 6433 match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); 6434 if (match != 0) { 6435 int group; 6436 6437 group = XPT_FC_GROUP(scb->ccb->ccb_h.func_code); 6438 if (role == ROLE_INITIATOR) { 6439 match = (group == XPT_FC_GROUP_COMMON) 6440 && ((tag == scb->hscb->tag) 6441 || (tag == SCB_LIST_NULL)); 6442 } else if (role == ROLE_TARGET) { 6443 match = (group == XPT_FC_GROUP_TMODE) 6444 && ((tag == scb->ccb->csio.tag_id) 6445 || (tag == SCB_LIST_NULL)); 6446 } 6447 } 6448 6449 return match; 6450 } 6451 6452 static void 6453 ahc_construct_sdtr(struct ahc_softc *ahc, u_int period, u_int offset) 6454 { 6455 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 6456 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN; 6457 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR; 6458 ahc->msgout_buf[ahc->msgout_index++] = period; 6459 ahc->msgout_buf[ahc->msgout_index++] = offset; 6460 ahc->msgout_len += 5; 6461 } 6462 6463 static void 6464 ahc_construct_wdtr(struct ahc_softc *ahc, u_int bus_width) 6465 { 6466 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 6467 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN; 6468 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR; 6469 ahc->msgout_buf[ahc->msgout_index++] = bus_width; 6470 ahc->msgout_len += 4; 6471 } 6472 6473 static void 6474 ahc_calc_residual(struct scb *scb) 6475 { 6476 struct hardware_scb *hscb; 6477 6478 hscb = scb->hscb; 6479 6480 /* 6481 * If the disconnected flag is still set, this is bogus 6482 * residual information left over from a sequencer 6483 * pagin/pageout, so ignore this case. 6484 */ 6485 if ((scb->hscb->control & DISCONNECTED) == 0) { 6486 u_int32_t resid; 6487 int resid_sgs; 6488 int sg; 6489 6490 /* 6491 * Remainder of the SG where the transfer 6492 * stopped. 6493 */ 6494 resid = (hscb->residual_data_count[2] << 16) 6495 | (hscb->residual_data_count[1] <<8) 6496 | (hscb->residual_data_count[0]); 6497 6498 /* 6499 * Add up the contents of all residual 6500 * SG segments that are after the SG where 6501 * the transfer stopped. 6502 */ 6503 resid_sgs = scb->hscb->residual_SG_count - 1/*current*/; 6504 sg = scb->sg_count - resid_sgs; 6505 while (resid_sgs > 0) { 6506 6507 resid += scb->sg_list[sg].len; 6508 sg++; 6509 resid_sgs--; 6510 } 6511 if ((scb->flags & SCB_SENSE) == 0) { 6512 6513 scb->ccb->csio.resid = resid; 6514 } else { 6515 6516 scb->ccb->csio.sense_resid = resid; 6517 } 6518 } 6519 6520 /* 6521 * Clean out the residual information in this SCB for its 6522 * next consumer. 6523 */ 6524 hscb->residual_SG_count = 0; 6525 6526 #ifdef AHC_DEBUG 6527 if (ahc_debug & AHC_SHOWMISC) { 6528 sc_print_addr(xs->sc_link); 6529 printf("Handled Residual of %ld bytes\n" ,xs->resid); 6530 } 6531 #endif 6532 } 6533 6534 static void 6535 ahc_update_pending_syncrates(struct ahc_softc *ahc) 6536 { 6537 struct ccb_hdr *ccbh; 6538 int pending_ccb_count; 6539 int i; 6540 u_int saved_scbptr; 6541 6542 /* 6543 * Traverse the pending SCB list and ensure that all of the 6544 * SCBs there have the proper settings. 6545 */ 6546 ccbh = LIST_FIRST(&ahc->pending_ccbs); 6547 pending_ccb_count = 0; 6548 while (ccbh != NULL) { 6549 struct ahc_devinfo devinfo; 6550 union ccb *ccb; 6551 struct scb *pending_scb; 6552 struct hardware_scb *pending_hscb; 6553 struct ahc_initiator_tinfo *tinfo; 6554 struct tmode_tstate *tstate; 6555 u_int our_id, remote_id; 6556 6557 ccb = (union ccb*)ccbh; 6558 pending_scb = (struct scb *)ccbh->ccb_scb_ptr; 6559 pending_hscb = pending_scb->hscb; 6560 if (ccbh->func_code == XPT_CONT_TARGET_IO) { 6561 our_id = ccb->ccb_h.target_id; 6562 remote_id = ccb->ctio.init_id; 6563 } else { 6564 our_id = SCB_IS_SCSIBUS_B(pending_scb) 6565 ? ahc->our_id_b : ahc->our_id; 6566 remote_id = ccb->ccb_h.target_id; 6567 } 6568 ahc_compile_devinfo(&devinfo, our_id, remote_id, 6569 SCB_LUN(pending_scb), 6570 SCB_CHANNEL(pending_scb), 6571 ROLE_UNKNOWN); 6572 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 6573 our_id, remote_id, &tstate); 6574 pending_hscb->control &= ~ULTRAENB; 6575 if ((tstate->ultraenb & devinfo.target_mask) != 0) 6576 pending_hscb->control |= ULTRAENB; 6577 pending_hscb->scsirate = tinfo->scsirate; 6578 pending_hscb->scsioffset = tinfo->current.offset; 6579 pending_ccb_count++; 6580 ccbh = LIST_NEXT(ccbh, sim_links.le); 6581 } 6582 6583 if (pending_ccb_count == 0) 6584 return; 6585 6586 saved_scbptr = ahc_inb(ahc, SCBPTR); 6587 /* Ensure that the hscbs down on the card match the new information */ 6588 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 6589 u_int scb_tag; 6590 6591 ahc_outb(ahc, SCBPTR, i); 6592 scb_tag = ahc_inb(ahc, SCB_TAG); 6593 if (scb_tag != SCB_LIST_NULL) { 6594 struct ahc_devinfo devinfo; 6595 union ccb *ccb; 6596 struct scb *pending_scb; 6597 struct hardware_scb *pending_hscb; 6598 struct ahc_initiator_tinfo *tinfo; 6599 struct tmode_tstate *tstate; 6600 u_int our_id, remote_id; 6601 u_int control; 6602 6603 pending_scb = &ahc->scb_data->scbarray[scb_tag]; 6604 if (pending_scb->flags == SCB_FREE) 6605 continue; 6606 pending_hscb = pending_scb->hscb; 6607 ccb = pending_scb->ccb; 6608 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 6609 our_id = ccb->ccb_h.target_id; 6610 remote_id = ccb->ctio.init_id; 6611 } else { 6612 our_id = SCB_IS_SCSIBUS_B(pending_scb) 6613 ? ahc->our_id_b : ahc->our_id; 6614 remote_id = ccb->ccb_h.target_id; 6615 } 6616 ahc_compile_devinfo(&devinfo, our_id, remote_id, 6617 SCB_LUN(pending_scb), 6618 SCB_CHANNEL(pending_scb), 6619 ROLE_UNKNOWN); 6620 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 6621 our_id, remote_id, &tstate); 6622 control = ahc_inb(ahc, SCB_CONTROL); 6623 control &= ~ULTRAENB; 6624 if ((tstate->ultraenb & devinfo.target_mask) != 0) 6625 control |= ULTRAENB; 6626 ahc_outb(ahc, SCB_CONTROL, control); 6627 ahc_outb(ahc, SCB_SCSIRATE, tinfo->scsirate); 6628 ahc_outb(ahc, SCB_SCSIOFFSET, tinfo->current.offset); 6629 } 6630 } 6631 ahc_outb(ahc, SCBPTR, saved_scbptr); 6632 } 6633 6634 #if UNUSED 6635 static void 6636 ahc_dump_targcmd(struct target_cmd *cmd) 6637 { 6638 u_int8_t *byte; 6639 u_int8_t *last_byte; 6640 int i; 6641 6642 byte = &cmd->initiator_channel; 6643 /* Debugging info for received commands */ 6644 last_byte = &cmd[1].initiator_channel; 6645 6646 i = 0; 6647 while (byte < last_byte) { 6648 if (i == 0) 6649 printf("\t"); 6650 printf("%#x", *byte++); 6651 i++; 6652 if (i == 8) { 6653 printf("\n"); 6654 i = 0; 6655 } else { 6656 printf(", "); 6657 } 6658 } 6659 } 6660 #endif 6661 6662 static void 6663 ahc_shutdown(void *arg, int howto) 6664 { 6665 struct ahc_softc *ahc; 6666 int i; 6667 u_int sxfrctl1_a, sxfrctl1_b; 6668 6669 ahc = (struct ahc_softc *)arg; 6670 6671 pause_sequencer(ahc); 6672 6673 /* 6674 * Preserve the value of the SXFRCTL1 register for all channels. 6675 * It contains settings that affect termination and we don't want 6676 * to disturb the integrity of the bus during shutdown in case 6677 * we are in a multi-initiator setup. 6678 */ 6679 sxfrctl1_b = 0; 6680 if ((ahc->features & AHC_TWIN) != 0) { 6681 u_int sblkctl; 6682 6683 sblkctl = ahc_inb(ahc, SBLKCTL); 6684 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); 6685 sxfrctl1_b = ahc_inb(ahc, SXFRCTL1); 6686 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); 6687 } 6688 6689 sxfrctl1_a = ahc_inb(ahc, SXFRCTL1); 6690 6691 /* This will reset most registers to 0, but not all */ 6692 ahc_reset(ahc); 6693 6694 if ((ahc->features & AHC_TWIN) != 0) { 6695 u_int sblkctl; 6696 6697 sblkctl = ahc_inb(ahc, SBLKCTL); 6698 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); 6699 ahc_outb(ahc, SXFRCTL1, sxfrctl1_b); 6700 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); 6701 } 6702 ahc_outb(ahc, SXFRCTL1, sxfrctl1_a); 6703 6704 ahc_outb(ahc, SCSISEQ, 0); 6705 ahc_outb(ahc, SXFRCTL0, 0); 6706 ahc_outb(ahc, DSPCISTATUS, 0); 6707 6708 for (i = TARG_SCSIRATE; i < HA_274_BIOSCTRL; i++) 6709 ahc_outb(ahc, i, 0); 6710 } 6711 6712 /* 6713 * Add a target mode event to this lun's queue 6714 */ 6715 static void 6716 ahc_queue_lstate_event(struct ahc_softc *ahc, struct tmode_lstate *lstate, 6717 u_int initiator_id, u_int event_type, u_int event_arg) 6718 { 6719 struct ahc_tmode_event *event; 6720 int pending; 6721 6722 xpt_freeze_devq(lstate->path, /*count*/1); 6723 if (lstate->event_w_idx >= lstate->event_r_idx) 6724 pending = lstate->event_w_idx - lstate->event_r_idx; 6725 else 6726 pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1 6727 - (lstate->event_r_idx - lstate->event_w_idx); 6728 6729 if (event_type == EVENT_TYPE_BUS_RESET 6730 || event_type == MSG_BUS_DEV_RESET) { 6731 /* 6732 * Any earlier events are irrelevant, so reset our buffer. 6733 * This has the effect of allowing us to deal with reset 6734 * floods (an external device holding down the reset line) 6735 * without losing the event that is really interesting. 6736 */ 6737 lstate->event_r_idx = 0; 6738 lstate->event_w_idx = 0; 6739 xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); 6740 } 6741 6742 if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) { 6743 xpt_print_path(lstate->path); 6744 printf("immediate event %x:%x lost\n", 6745 lstate->event_buffer[lstate->event_r_idx].event_type, 6746 lstate->event_buffer[lstate->event_r_idx].event_arg); 6747 lstate->event_r_idx++; 6748 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6749 lstate->event_r_idx = 0; 6750 xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); 6751 } 6752 6753 event = &lstate->event_buffer[lstate->event_w_idx]; 6754 event->initiator_id = initiator_id; 6755 event->event_type = event_type; 6756 event->event_arg = event_arg; 6757 lstate->event_w_idx++; 6758 if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6759 lstate->event_w_idx = 0; 6760 } 6761 6762 /* 6763 * Send any target mode events queued up waiting 6764 * for immediate notify resources. 6765 */ 6766 static void 6767 ahc_send_lstate_events(struct ahc_softc *ahc, struct tmode_lstate *lstate) 6768 { 6769 struct ccb_hdr *ccbh; 6770 struct ccb_immed_notify *inot; 6771 6772 while (lstate->event_r_idx != lstate->event_w_idx 6773 && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { 6774 struct ahc_tmode_event *event; 6775 6776 event = &lstate->event_buffer[lstate->event_r_idx]; 6777 SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); 6778 inot = (struct ccb_immed_notify *)ccbh; 6779 switch (event->event_type) { 6780 case EVENT_TYPE_BUS_RESET: 6781 ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; 6782 break; 6783 default: 6784 ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 6785 inot->message_args[0] = event->event_type; 6786 inot->message_args[1] = event->event_arg; 6787 break; 6788 } 6789 inot->initiator_id = event->initiator_id; 6790 inot->sense_len = 0; 6791 xpt_done((union ccb *)inot); 6792 lstate->event_r_idx++; 6793 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6794 lstate->event_r_idx = 0; 6795 } 6796 } 6797