1 /*- 2 * Inline routines shareable across OS platforms. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 * 6 * Copyright (c) 1994-2001 Justin T. Gibbs. 7 * Copyright (c) 2000-2001 Adaptec Inc. 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 3. Neither the names of the above-listed copyright holders nor the names 22 * of any contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * Alternatively, this software may be distributed under the terms of the 26 * GNU General Public License ("GPL") version 2 as published by the Free 27 * Software Foundation. 28 * 29 * NO WARRANTY 30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 34 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 38 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 39 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 40 * POSSIBILITY OF SUCH DAMAGES. 41 * 42 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_inline.h#47 $ 43 * 44 * $FreeBSD$ 45 */ 46 47 #ifndef _AIC7XXX_INLINE_H_ 48 #define _AIC7XXX_INLINE_H_ 49 50 /************************* Sequencer Execution Control ************************/ 51 static __inline void ahc_pause_bug_fix(struct ahc_softc *ahc); 52 static __inline int ahc_is_paused(struct ahc_softc *ahc); 53 static __inline void ahc_pause(struct ahc_softc *ahc); 54 static __inline void ahc_unpause(struct ahc_softc *ahc); 55 56 /* 57 * Work around any chip bugs related to halting sequencer execution. 58 * On Ultra2 controllers, we must clear the CIOBUS stretch signal by 59 * reading a register that will set this signal and deassert it. 60 * Without this workaround, if the chip is paused, by an interrupt or 61 * manual pause while accessing scb ram, accesses to certain registers 62 * will hang the system (infinite pci retries). 63 */ 64 static __inline void 65 ahc_pause_bug_fix(struct ahc_softc *ahc) 66 { 67 if ((ahc->features & AHC_ULTRA2) != 0) 68 (void)ahc_inb(ahc, CCSCBCTL); 69 } 70 71 /* 72 * Determine whether the sequencer has halted code execution. 73 * Returns non-zero status if the sequencer is stopped. 74 */ 75 static __inline int 76 ahc_is_paused(struct ahc_softc *ahc) 77 { 78 return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0); 79 } 80 81 /* 82 * Request that the sequencer stop and wait, indefinitely, for it 83 * to stop. The sequencer will only acknowledge that it is paused 84 * once it has reached an instruction boundary and PAUSEDIS is 85 * cleared in the SEQCTL register. The sequencer may use PAUSEDIS 86 * for critical sections. 87 */ 88 static __inline void 89 ahc_pause(struct ahc_softc *ahc) 90 { 91 ahc_outb(ahc, HCNTRL, ahc->pause); 92 93 /* 94 * Since the sequencer can disable pausing in a critical section, we 95 * must loop until it actually stops. 96 */ 97 while (ahc_is_paused(ahc) == 0) 98 ; 99 100 ahc_pause_bug_fix(ahc); 101 } 102 103 /* 104 * Allow the sequencer to continue program execution. 105 * We check here to ensure that no additional interrupt 106 * sources that would cause the sequencer to halt have been 107 * asserted. If, for example, a SCSI bus reset is detected 108 * while we are fielding a different, pausing, interrupt type, 109 * we don't want to release the sequencer before going back 110 * into our interrupt handler and dealing with this new 111 * condition. 112 */ 113 static __inline void 114 ahc_unpause(struct ahc_softc *ahc) 115 { 116 if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0) 117 ahc_outb(ahc, HCNTRL, ahc->unpause); 118 } 119 120 /*********************** Untagged Transaction Routines ************************/ 121 static __inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc); 122 static __inline void ahc_release_untagged_queues(struct ahc_softc *ahc); 123 124 /* 125 * Block our completion routine from starting the next untagged 126 * transaction for this target or target lun. 127 */ 128 static __inline void 129 ahc_freeze_untagged_queues(struct ahc_softc *ahc) 130 { 131 if ((ahc->flags & AHC_SCB_BTT) == 0) 132 ahc->untagged_queue_lock++; 133 } 134 135 /* 136 * Allow the next untagged transaction for this target or target lun 137 * to be executed. We use a counting semaphore to allow the lock 138 * to be acquired recursively. Once the count drops to zero, the 139 * transaction queues will be run. 140 */ 141 static __inline void 142 ahc_release_untagged_queues(struct ahc_softc *ahc) 143 { 144 if ((ahc->flags & AHC_SCB_BTT) == 0) { 145 ahc->untagged_queue_lock--; 146 if (ahc->untagged_queue_lock == 0) 147 ahc_run_untagged_queues(ahc); 148 } 149 } 150 151 /************************** Memory mapping routines ***************************/ 152 static __inline struct ahc_dma_seg * 153 ahc_sg_bus_to_virt(struct scb *scb, 154 uint32_t sg_busaddr); 155 static __inline uint32_t 156 ahc_sg_virt_to_bus(struct scb *scb, 157 struct ahc_dma_seg *sg); 158 static __inline uint32_t 159 ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index); 160 static __inline void ahc_sync_scb(struct ahc_softc *ahc, 161 struct scb *scb, int op); 162 static __inline void ahc_sync_sglist(struct ahc_softc *ahc, 163 struct scb *scb, int op); 164 static __inline uint32_t 165 ahc_targetcmd_offset(struct ahc_softc *ahc, 166 u_int index); 167 168 static __inline struct ahc_dma_seg * 169 ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr) 170 { 171 int sg_index; 172 173 sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg); 174 /* sg_list_phys points to entry 1, not 0 */ 175 sg_index++; 176 177 return (&scb->sg_list[sg_index]); 178 } 179 180 static __inline uint32_t 181 ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg) 182 { 183 int sg_index; 184 185 /* sg_list_phys points to entry 1, not 0 */ 186 sg_index = sg - &scb->sg_list[1]; 187 188 return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list))); 189 } 190 191 static __inline uint32_t 192 ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index) 193 { 194 return (ahc->scb_data->hscb_busaddr 195 + (sizeof(struct hardware_scb) * index)); 196 } 197 198 static __inline void 199 ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op) 200 { 201 aic_dmamap_sync(ahc, ahc->scb_data->hscb_dmat, 202 ahc->scb_data->hscb_dmamap, 203 /*offset*/(scb->hscb - ahc->hscbs) * sizeof(*scb->hscb), 204 /*len*/sizeof(*scb->hscb), op); 205 } 206 207 static __inline void 208 ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op) 209 { 210 if (scb->sg_count == 0) 211 return; 212 213 aic_dmamap_sync(ahc, ahc->scb_data->sg_dmat, scb->sg_map->sg_dmamap, 214 /*offset*/(scb->sg_list - scb->sg_map->sg_vaddr) 215 * sizeof(struct ahc_dma_seg), 216 /*len*/sizeof(struct ahc_dma_seg) * scb->sg_count, op); 217 } 218 219 static __inline uint32_t 220 ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index) 221 { 222 return (((uint8_t *)&ahc->targetcmds[index]) - ahc->qoutfifo); 223 } 224 225 /******************************** Debugging ***********************************/ 226 static __inline char *ahc_name(struct ahc_softc *ahc); 227 228 static __inline char * 229 ahc_name(struct ahc_softc *ahc) 230 { 231 return (ahc->name); 232 } 233 234 /********************** Miscellaneous Support Functions ***********************/ 235 236 static __inline void ahc_update_residual(struct ahc_softc *ahc, 237 struct scb *scb); 238 static __inline struct ahc_initiator_tinfo * 239 ahc_fetch_transinfo(struct ahc_softc *ahc, 240 char channel, u_int our_id, 241 u_int remote_id, 242 struct ahc_tmode_tstate **tstate); 243 static __inline uint16_t 244 ahc_inw(struct ahc_softc *ahc, u_int port); 245 static __inline void ahc_outw(struct ahc_softc *ahc, u_int port, 246 u_int value); 247 static __inline uint32_t 248 ahc_inl(struct ahc_softc *ahc, u_int port); 249 static __inline void ahc_outl(struct ahc_softc *ahc, u_int port, 250 uint32_t value); 251 static __inline uint64_t 252 ahc_inq(struct ahc_softc *ahc, u_int port); 253 static __inline void ahc_outq(struct ahc_softc *ahc, u_int port, 254 uint64_t value); 255 static __inline struct scb* 256 ahc_get_scb(struct ahc_softc *ahc); 257 static __inline void ahc_free_scb(struct ahc_softc *ahc, struct scb *scb); 258 static __inline void ahc_swap_with_next_hscb(struct ahc_softc *ahc, 259 struct scb *scb); 260 static __inline void ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb); 261 static __inline struct scsi_sense_data * 262 ahc_get_sense_buf(struct ahc_softc *ahc, 263 struct scb *scb); 264 static __inline uint32_t 265 ahc_get_sense_bufaddr(struct ahc_softc *ahc, 266 struct scb *scb); 267 268 /* 269 * Determine whether the sequencer reported a residual 270 * for this SCB/transaction. 271 */ 272 static __inline void 273 ahc_update_residual(struct ahc_softc *ahc, struct scb *scb) 274 { 275 uint32_t sgptr; 276 277 sgptr = aic_le32toh(scb->hscb->sgptr); 278 if ((sgptr & SG_RESID_VALID) != 0) 279 ahc_calc_residual(ahc, scb); 280 } 281 282 /* 283 * Return pointers to the transfer negotiation information 284 * for the specified our_id/remote_id pair. 285 */ 286 static __inline struct ahc_initiator_tinfo * 287 ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id, 288 u_int remote_id, struct ahc_tmode_tstate **tstate) 289 { 290 /* 291 * Transfer data structures are stored from the perspective 292 * of the target role. Since the parameters for a connection 293 * in the initiator role to a given target are the same as 294 * when the roles are reversed, we pretend we are the target. 295 */ 296 if (channel == 'B') 297 our_id += 8; 298 *tstate = ahc->enabled_targets[our_id]; 299 return (&(*tstate)->transinfo[remote_id]); 300 } 301 302 static __inline uint16_t 303 ahc_inw(struct ahc_softc *ahc, u_int port) 304 { 305 return ((ahc_inb(ahc, port+1) << 8) | ahc_inb(ahc, port)); 306 } 307 308 static __inline void 309 ahc_outw(struct ahc_softc *ahc, u_int port, u_int value) 310 { 311 ahc_outb(ahc, port, value & 0xFF); 312 ahc_outb(ahc, port+1, (value >> 8) & 0xFF); 313 } 314 315 static __inline uint32_t 316 ahc_inl(struct ahc_softc *ahc, u_int port) 317 { 318 return ((ahc_inb(ahc, port)) 319 | (ahc_inb(ahc, port+1) << 8) 320 | (ahc_inb(ahc, port+2) << 16) 321 | (ahc_inb(ahc, port+3) << 24)); 322 } 323 324 static __inline void 325 ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value) 326 { 327 ahc_outb(ahc, port, (value) & 0xFF); 328 ahc_outb(ahc, port+1, ((value) >> 8) & 0xFF); 329 ahc_outb(ahc, port+2, ((value) >> 16) & 0xFF); 330 ahc_outb(ahc, port+3, ((value) >> 24) & 0xFF); 331 } 332 333 static __inline uint64_t 334 ahc_inq(struct ahc_softc *ahc, u_int port) 335 { 336 return ((ahc_inb(ahc, port)) 337 | (ahc_inb(ahc, port+1) << 8) 338 | (ahc_inb(ahc, port+2) << 16) 339 | (ahc_inb(ahc, port+3) << 24) 340 | (((uint64_t)ahc_inb(ahc, port+4)) << 32) 341 | (((uint64_t)ahc_inb(ahc, port+5)) << 40) 342 | (((uint64_t)ahc_inb(ahc, port+6)) << 48) 343 | (((uint64_t)ahc_inb(ahc, port+7)) << 56)); 344 } 345 346 static __inline void 347 ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value) 348 { 349 ahc_outb(ahc, port, value & 0xFF); 350 ahc_outb(ahc, port+1, (value >> 8) & 0xFF); 351 ahc_outb(ahc, port+2, (value >> 16) & 0xFF); 352 ahc_outb(ahc, port+3, (value >> 24) & 0xFF); 353 ahc_outb(ahc, port+4, (value >> 32) & 0xFF); 354 ahc_outb(ahc, port+5, (value >> 40) & 0xFF); 355 ahc_outb(ahc, port+6, (value >> 48) & 0xFF); 356 ahc_outb(ahc, port+7, (value >> 56) & 0xFF); 357 } 358 359 /* 360 * Get a free scb. If there are none, see if we can allocate a new SCB. 361 */ 362 static __inline struct scb * 363 ahc_get_scb(struct ahc_softc *ahc) 364 { 365 struct scb *scb; 366 367 if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) { 368 if (ahc_alloc_scbs(ahc) == 0) 369 return (NULL); 370 scb = SLIST_FIRST(&ahc->scb_data->free_scbs); 371 if (scb == NULL) 372 return (NULL); 373 } 374 SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle); 375 return (scb); 376 } 377 378 /* 379 * Return an SCB resource to the free list. 380 */ 381 static __inline void 382 ahc_free_scb(struct ahc_softc *ahc, struct scb *scb) 383 { 384 struct hardware_scb *hscb; 385 386 hscb = scb->hscb; 387 /* Clean up for the next user */ 388 ahc->scb_data->scbindex[hscb->tag] = NULL; 389 scb->flags = SCB_FLAG_NONE; 390 hscb->control = 0; 391 392 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle); 393 394 /* Notify the OSM that a resource is now available. */ 395 aic_platform_scb_free(ahc, scb); 396 } 397 398 static __inline struct scb * 399 ahc_lookup_scb(struct ahc_softc *ahc, u_int tag) 400 { 401 struct scb* scb; 402 403 scb = ahc->scb_data->scbindex[tag]; 404 if (scb != NULL) 405 ahc_sync_scb(ahc, scb, 406 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 407 return (scb); 408 } 409 410 static __inline void 411 ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb) 412 { 413 struct hardware_scb *q_hscb; 414 u_int saved_tag; 415 416 /* 417 * Our queuing method is a bit tricky. The card 418 * knows in advance which HSCB to download, and we 419 * can't disappoint it. To achieve this, the next 420 * SCB to download is saved off in ahc->next_queued_scb. 421 * When we are called to queue "an arbitrary scb", 422 * we copy the contents of the incoming HSCB to the one 423 * the sequencer knows about, swap HSCB pointers and 424 * finally assign the SCB to the tag indexed location 425 * in the scb_array. This makes sure that we can still 426 * locate the correct SCB by SCB_TAG. 427 */ 428 q_hscb = ahc->next_queued_scb->hscb; 429 saved_tag = q_hscb->tag; 430 memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb)); 431 if ((scb->flags & SCB_CDB32_PTR) != 0) { 432 q_hscb->shared_data.cdb_ptr = 433 aic_htole32(ahc_hscb_busaddr(ahc, q_hscb->tag) 434 + offsetof(struct hardware_scb, cdb32)); 435 } 436 q_hscb->tag = saved_tag; 437 q_hscb->next = scb->hscb->tag; 438 439 /* Now swap HSCB pointers. */ 440 ahc->next_queued_scb->hscb = scb->hscb; 441 scb->hscb = q_hscb; 442 443 /* Now define the mapping from tag to SCB in the scbindex */ 444 ahc->scb_data->scbindex[scb->hscb->tag] = scb; 445 } 446 447 /* 448 * Tell the sequencer about a new transaction to execute. 449 */ 450 static __inline void 451 ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb) 452 { 453 ahc_swap_with_next_hscb(ahc, scb); 454 455 if (scb->hscb->tag == SCB_LIST_NULL 456 || scb->hscb->next == SCB_LIST_NULL) 457 panic("Attempt to queue invalid SCB tag %x:%x\n", 458 scb->hscb->tag, scb->hscb->next); 459 460 /* 461 * Setup data "oddness". 462 */ 463 scb->hscb->lun &= LID; 464 if (aic_get_transfer_length(scb) & 0x1) 465 scb->hscb->lun |= SCB_XFERLEN_ODD; 466 467 /* 468 * Keep a history of SCBs we've downloaded in the qinfifo. 469 */ 470 ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; 471 472 /* 473 * Make sure our data is consistent from the 474 * perspective of the adapter. 475 */ 476 ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 477 478 /* Tell the adapter about the newly queued SCB */ 479 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 480 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 481 } else { 482 if ((ahc->features & AHC_AUTOPAUSE) == 0) 483 ahc_pause(ahc); 484 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 485 if ((ahc->features & AHC_AUTOPAUSE) == 0) 486 ahc_unpause(ahc); 487 } 488 } 489 490 static __inline struct scsi_sense_data * 491 ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb) 492 { 493 int offset; 494 495 offset = scb - ahc->scb_data->scbarray; 496 return (&ahc->scb_data->sense[offset]); 497 } 498 499 static __inline uint32_t 500 ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb) 501 { 502 int offset; 503 504 offset = scb - ahc->scb_data->scbarray; 505 return (ahc->scb_data->sense_busaddr 506 + (offset * sizeof(struct scsi_sense_data))); 507 } 508 509 /************************** Interrupt Processing ******************************/ 510 static __inline void ahc_sync_qoutfifo(struct ahc_softc *ahc, int op); 511 static __inline void ahc_sync_tqinfifo(struct ahc_softc *ahc, int op); 512 static __inline u_int ahc_check_cmdcmpltqueues(struct ahc_softc *ahc); 513 static __inline int ahc_intr(struct ahc_softc *ahc); 514 515 static __inline void 516 ahc_sync_qoutfifo(struct ahc_softc *ahc, int op) 517 { 518 aic_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, 519 /*offset*/0, /*len*/256, op); 520 } 521 522 static __inline void 523 ahc_sync_tqinfifo(struct ahc_softc *ahc, int op) 524 { 525 #ifdef AHC_TARGET_MODE 526 if ((ahc->flags & AHC_TARGETROLE) != 0) { 527 aic_dmamap_sync(ahc, ahc->shared_data_dmat, 528 ahc->shared_data_dmamap, 529 ahc_targetcmd_offset(ahc, 0), 530 sizeof(struct target_cmd) * AHC_TMODE_CMDS, 531 op); 532 } 533 #endif 534 } 535 536 /* 537 * See if the firmware has posted any completed commands 538 * into our in-core command complete fifos. 539 */ 540 #define AHC_RUN_QOUTFIFO 0x1 541 #define AHC_RUN_TQINFIFO 0x2 542 static __inline u_int 543 ahc_check_cmdcmpltqueues(struct ahc_softc *ahc) 544 { 545 u_int retval; 546 547 retval = 0; 548 aic_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, 549 /*offset*/ahc->qoutfifonext, /*len*/1, 550 BUS_DMASYNC_POSTREAD); 551 if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) 552 retval |= AHC_RUN_QOUTFIFO; 553 #ifdef AHC_TARGET_MODE 554 if ((ahc->flags & AHC_TARGETROLE) != 0 555 && (ahc->flags & AHC_TQINFIFO_BLOCKED) == 0) { 556 aic_dmamap_sync(ahc, ahc->shared_data_dmat, 557 ahc->shared_data_dmamap, 558 ahc_targetcmd_offset(ahc, ahc->tqinfifofnext), 559 /*len*/sizeof(struct target_cmd), 560 BUS_DMASYNC_POSTREAD); 561 if (ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0) 562 retval |= AHC_RUN_TQINFIFO; 563 } 564 #endif 565 return (retval); 566 } 567 568 /* 569 * Catch an interrupt from the adapter 570 */ 571 static __inline int 572 ahc_intr(struct ahc_softc *ahc) 573 { 574 u_int intstat; 575 576 if ((ahc->pause & INTEN) == 0) { 577 /* 578 * Our interrupt is not enabled on the chip 579 * and may be disabled for re-entrancy reasons, 580 * so just return. This is likely just a shared 581 * interrupt. 582 */ 583 return (0); 584 } 585 /* 586 * Instead of directly reading the interrupt status register, 587 * infer the cause of the interrupt by checking our in-core 588 * completion queues. This avoids a costly PCI bus read in 589 * most cases. 590 */ 591 if ((ahc->flags & (AHC_ALL_INTERRUPTS|AHC_EDGE_INTERRUPT)) == 0 592 && (ahc_check_cmdcmpltqueues(ahc) != 0)) 593 intstat = CMDCMPLT; 594 else { 595 intstat = ahc_inb(ahc, INTSTAT); 596 } 597 598 if ((intstat & INT_PEND) == 0) { 599 #if AIC_PCI_CONFIG > 0 600 if (ahc->unsolicited_ints > 500) { 601 ahc->unsolicited_ints = 0; 602 if ((ahc->chip & AHC_PCI) != 0 603 && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0) 604 ahc->bus_intr(ahc); 605 } 606 #endif 607 ahc->unsolicited_ints++; 608 return (0); 609 } 610 ahc->unsolicited_ints = 0; 611 612 if (intstat & CMDCMPLT) { 613 ahc_outb(ahc, CLRINT, CLRCMDINT); 614 615 /* 616 * Ensure that the chip sees that we've cleared 617 * this interrupt before we walk the output fifo. 618 * Otherwise, we may, due to posted bus writes, 619 * clear the interrupt after we finish the scan, 620 * and after the sequencer has added new entries 621 * and asserted the interrupt again. 622 */ 623 ahc_flush_device_writes(ahc); 624 ahc_run_qoutfifo(ahc); 625 #ifdef AHC_TARGET_MODE 626 if ((ahc->flags & AHC_TARGETROLE) != 0) 627 ahc_run_tqinfifo(ahc, /*paused*/FALSE); 628 #endif 629 } 630 631 /* 632 * Handle statuses that may invalidate our cached 633 * copy of INTSTAT separately. 634 */ 635 if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) { 636 /* Hot eject. Do nothing */ 637 } else if (intstat & BRKADRINT) { 638 ahc_handle_brkadrint(ahc); 639 } else if ((intstat & (SEQINT|SCSIINT)) != 0) { 640 ahc_pause_bug_fix(ahc); 641 642 if ((intstat & SEQINT) != 0) 643 ahc_handle_seqint(ahc, intstat); 644 645 if ((intstat & SCSIINT) != 0) 646 ahc_handle_scsiint(ahc, intstat); 647 } 648 return (1); 649 } 650 651 #endif /* _AIC7XXX_INLINE_H_ */ 652