1 /* 2 * Inline routines shareable across OS platforms. 3 * 4 * Copyright (c) 1994-2001 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * Alternatively, this software may be distributed under the terms of the 17 * GNU Public License ("GPL"). 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * $Id: //depot/src/aic7xxx/aic7xxx_inline.h#27 $ 32 * 33 * $FreeBSD$ 34 */ 35 36 #ifndef _AIC7XXX_INLINE_H_ 37 #define _AIC7XXX_INLINE_H_ 38 39 /************************* Sequencer Execution Control ************************/ 40 static __inline void ahc_pause_bug_fix(struct ahc_softc *ahc); 41 static __inline int ahc_is_paused(struct ahc_softc *ahc); 42 static __inline void ahc_pause(struct ahc_softc *ahc); 43 static __inline void ahc_unpause(struct ahc_softc *ahc); 44 45 /* 46 * Work around any chip bugs related to halting sequencer execution. 47 * On Ultra2 controllers, we must clear the CIOBUS stretch signal by 48 * reading a register that will set this signal and deassert it. 49 * Without this workaround, if the chip is paused, by an interrupt or 50 * manual pause while accessing scb ram, accesses to certain registers 51 * will hang the system (infinite pci retries). 52 */ 53 static __inline void 54 ahc_pause_bug_fix(struct ahc_softc *ahc) 55 { 56 if ((ahc->features & AHC_ULTRA2) != 0) 57 (void)ahc_inb(ahc, CCSCBCTL); 58 } 59 60 /* 61 * Determine whether the sequencer has halted code execution. 62 * Returns non-zero status if the sequencer is stopped. 63 */ 64 static __inline int 65 ahc_is_paused(struct ahc_softc *ahc) 66 { 67 return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0); 68 } 69 70 /* 71 * Request that the sequencer stop and wait, indefinitely, for it 72 * to stop. The sequencer will only acknowledge that it is paused 73 * once it has reached an instruction boundary and PAUSEDIS is 74 * cleared in the SEQCTL register. The sequencer may use PAUSEDIS 75 * for critical sections. 76 */ 77 static __inline void 78 ahc_pause(struct ahc_softc *ahc) 79 { 80 ahc_outb(ahc, HCNTRL, ahc->pause); 81 82 /* 83 * Since the sequencer can disable pausing in a critical section, we 84 * must loop until it actually stops. 85 */ 86 while (ahc_is_paused(ahc) == 0) 87 ; 88 89 ahc_pause_bug_fix(ahc); 90 } 91 92 /* 93 * Allow the sequencer to continue program execution. 94 * We check here to ensure that no additional interrupt 95 * sources that would cause the sequencer to halt have been 96 * asserted. If, for example, a SCSI bus reset is detected 97 * while we are fielding a different, pausing, interrupt type, 98 * we don't want to release the sequencer before going back 99 * into our interrupt handler and dealing with this new 100 * condition. 101 */ 102 static __inline void 103 ahc_unpause(struct ahc_softc *ahc) 104 { 105 if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0) 106 ahc_outb(ahc, HCNTRL, ahc->unpause); 107 } 108 109 /*********************** Untagged Transaction Routines ************************/ 110 static __inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc); 111 static __inline void ahc_release_untagged_queues(struct ahc_softc *ahc); 112 113 /* 114 * Block our completion routine from starting the next untagged 115 * transaction for this target or target lun. 116 */ 117 static __inline void 118 ahc_freeze_untagged_queues(struct ahc_softc *ahc) 119 { 120 if ((ahc->flags & AHC_SCB_BTT) == 0) 121 ahc->untagged_queue_lock++; 122 } 123 124 /* 125 * Allow the next untagged transaction for this target or target lun 126 * to be executed. We use a counting semaphore to allow the lock 127 * to be acquired recursively. Once the count drops to zero, the 128 * transaction queues will be run. 129 */ 130 static __inline void 131 ahc_release_untagged_queues(struct ahc_softc *ahc) 132 { 133 if ((ahc->flags & AHC_SCB_BTT) == 0) { 134 ahc->untagged_queue_lock--; 135 if (ahc->untagged_queue_lock == 0) 136 ahc_run_untagged_queues(ahc); 137 } 138 } 139 140 /************************** Memory mapping routines ***************************/ 141 static __inline struct ahc_dma_seg * 142 ahc_sg_bus_to_virt(struct scb *scb, 143 uint32_t sg_busaddr); 144 static __inline uint32_t 145 ahc_sg_virt_to_bus(struct scb *scb, 146 struct ahc_dma_seg *sg); 147 static __inline uint32_t 148 ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index); 149 static __inline void ahc_sync_scb(struct ahc_softc *ahc, 150 struct scb *scb, int op); 151 static __inline void ahc_sync_sglist(struct ahc_softc *ahc, 152 struct scb *scb, int op); 153 static __inline uint32_t 154 ahc_targetcmd_offset(struct ahc_softc *ahc, 155 u_int index); 156 157 static __inline struct ahc_dma_seg * 158 ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr) 159 { 160 int sg_index; 161 162 sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg); 163 /* sg_list_phys points to entry 1, not 0 */ 164 sg_index++; 165 166 return (&scb->sg_list[sg_index]); 167 } 168 169 static __inline uint32_t 170 ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg) 171 { 172 int sg_index; 173 174 /* sg_list_phys points to entry 1, not 0 */ 175 sg_index = sg - &scb->sg_list[1]; 176 177 return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list))); 178 } 179 180 static __inline uint32_t 181 ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index) 182 { 183 return (ahc->scb_data->hscb_busaddr 184 + (sizeof(struct hardware_scb) * index)); 185 } 186 187 static __inline void 188 ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op) 189 { 190 ahc_dmamap_sync(ahc, ahc->scb_data->hscb_dmat, 191 ahc->scb_data->hscb_dmamap, 192 /*offset*/(scb->hscb - ahc->hscbs) * sizeof(*scb->hscb), 193 /*len*/sizeof(*scb->hscb), op); 194 } 195 196 static __inline void 197 ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op) 198 { 199 if (scb->sg_count == 0) 200 return; 201 202 ahc_dmamap_sync(ahc, ahc->scb_data->sg_dmat, scb->sg_map->sg_dmamap, 203 /*offset*/(scb->sg_list - scb->sg_map->sg_vaddr) 204 * sizeof(struct ahc_dma_seg), 205 /*len*/sizeof(struct ahc_dma_seg) * scb->sg_count, op); 206 } 207 208 static __inline uint32_t 209 ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index) 210 { 211 return (((uint8_t *)&ahc->targetcmds[index]) - ahc->qoutfifo); 212 } 213 214 /******************************** Debugging ***********************************/ 215 static __inline char *ahc_name(struct ahc_softc *ahc); 216 217 static __inline char * 218 ahc_name(struct ahc_softc *ahc) 219 { 220 return (ahc->name); 221 } 222 223 /*********************** Miscelaneous Support Functions ***********************/ 224 225 static __inline void ahc_update_residual(struct scb *scb); 226 static __inline struct ahc_initiator_tinfo * 227 ahc_fetch_transinfo(struct ahc_softc *ahc, 228 char channel, u_int our_id, 229 u_int remote_id, 230 struct ahc_tmode_tstate **tstate); 231 static __inline struct scb* 232 ahc_get_scb(struct ahc_softc *ahc); 233 static __inline void ahc_free_scb(struct ahc_softc *ahc, struct scb *scb); 234 static __inline void ahc_swap_with_next_hscb(struct ahc_softc *ahc, 235 struct scb *scb); 236 static __inline void ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb); 237 static __inline struct scsi_sense_data * 238 ahc_get_sense_buf(struct ahc_softc *ahc, 239 struct scb *scb); 240 static __inline uint32_t 241 ahc_get_sense_bufaddr(struct ahc_softc *ahc, 242 struct scb *scb); 243 244 /* 245 * Determine whether the sequencer reported a residual 246 * for this SCB/transaction. 247 */ 248 static __inline void 249 ahc_update_residual(struct scb *scb) 250 { 251 uint32_t sgptr; 252 253 sgptr = ahc_le32toh(scb->hscb->sgptr); 254 if ((sgptr & SG_RESID_VALID) != 0) 255 ahc_calc_residual(scb); 256 } 257 258 /* 259 * Return pointers to the transfer negotiation information 260 * for the specified our_id/remote_id pair. 261 */ 262 static __inline struct ahc_initiator_tinfo * 263 ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id, 264 u_int remote_id, struct ahc_tmode_tstate **tstate) 265 { 266 /* 267 * Transfer data structures are stored from the perspective 268 * of the target role. Since the parameters for a connection 269 * in the initiator role to a given target are the same as 270 * when the roles are reversed, we pretend we are the target. 271 */ 272 if (channel == 'B') 273 our_id += 8; 274 *tstate = ahc->enabled_targets[our_id]; 275 return (&(*tstate)->transinfo[remote_id]); 276 } 277 278 /* 279 * Get a free scb. If there are none, see if we can allocate a new SCB. 280 */ 281 static __inline struct scb * 282 ahc_get_scb(struct ahc_softc *ahc) 283 { 284 struct scb *scb; 285 286 if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) { 287 ahc_alloc_scbs(ahc); 288 scb = SLIST_FIRST(&ahc->scb_data->free_scbs); 289 if (scb == NULL) 290 return (NULL); 291 } 292 SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle); 293 return (scb); 294 } 295 296 /* 297 * Return an SCB resource to the free list. 298 */ 299 static __inline void 300 ahc_free_scb(struct ahc_softc *ahc, struct scb *scb) 301 { 302 struct hardware_scb *hscb; 303 304 hscb = scb->hscb; 305 /* Clean up for the next user */ 306 ahc->scb_data->scbindex[hscb->tag] = NULL; 307 scb->flags = SCB_FREE; 308 hscb->control = 0; 309 310 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle); 311 312 /* Notify the OSM that a resource is now available. */ 313 ahc_platform_scb_free(ahc, scb); 314 } 315 316 static __inline struct scb * 317 ahc_lookup_scb(struct ahc_softc *ahc, u_int tag) 318 { 319 struct scb* scb; 320 321 scb = ahc->scb_data->scbindex[tag]; 322 if (scb != NULL) 323 ahc_sync_scb(ahc, scb, 324 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 325 return (scb); 326 } 327 328 static __inline void 329 ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb) 330 { 331 struct hardware_scb *q_hscb; 332 u_int saved_tag; 333 334 /* 335 * Our queuing method is a bit tricky. The card 336 * knows in advance which HSCB to download, and we 337 * can't disappoint it. To achieve this, the next 338 * SCB to download is saved off in ahc->next_queued_scb. 339 * When we are called to queue "an arbitrary scb", 340 * we copy the contents of the incoming HSCB to the one 341 * the sequencer knows about, swap HSCB pointers and 342 * finally assign the SCB to the tag indexed location 343 * in the scb_array. This makes sure that we can still 344 * locate the correct SCB by SCB_TAG. 345 */ 346 q_hscb = ahc->next_queued_scb->hscb; 347 saved_tag = q_hscb->tag; 348 memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb)); 349 if ((scb->flags & SCB_CDB32_PTR) != 0) { 350 q_hscb->shared_data.cdb_ptr = 351 ahc_hscb_busaddr(ahc, q_hscb->tag) 352 + offsetof(struct hardware_scb, cdb32); 353 } 354 q_hscb->tag = saved_tag; 355 q_hscb->next = scb->hscb->tag; 356 357 /* Now swap HSCB pointers. */ 358 ahc->next_queued_scb->hscb = scb->hscb; 359 scb->hscb = q_hscb; 360 361 /* Now define the mapping from tag to SCB in the scbindex */ 362 ahc->scb_data->scbindex[scb->hscb->tag] = scb; 363 } 364 365 /* 366 * Tell the sequencer about a new transaction to execute. 367 */ 368 static __inline void 369 ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb) 370 { 371 ahc_swap_with_next_hscb(ahc, scb); 372 373 if (scb->hscb->tag == SCB_LIST_NULL 374 || scb->hscb->next == SCB_LIST_NULL) 375 panic("Attempt to queue invalid SCB tag %x:%x\n", 376 scb->hscb->tag, scb->hscb->next); 377 378 /* 379 * Keep a history of SCBs we've downloaded in the qinfifo. 380 */ 381 ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; 382 383 /* 384 * Make sure our data is consistant from the 385 * perspective of the adapter. 386 */ 387 ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 388 389 /* Tell the adapter about the newly queued SCB */ 390 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 391 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 392 } else { 393 if ((ahc->features & AHC_AUTOPAUSE) == 0) 394 ahc_pause(ahc); 395 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 396 if ((ahc->features & AHC_AUTOPAUSE) == 0) 397 ahc_unpause(ahc); 398 } 399 } 400 401 static __inline struct scsi_sense_data * 402 ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb) 403 { 404 int offset; 405 406 offset = scb - ahc->scb_data->scbarray; 407 return (&ahc->scb_data->sense[offset]); 408 } 409 410 static __inline uint32_t 411 ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb) 412 { 413 int offset; 414 415 offset = scb - ahc->scb_data->scbarray; 416 return (ahc->scb_data->sense_busaddr 417 + (offset * sizeof(struct scsi_sense_data))); 418 } 419 420 /************************** Interrupt Processing ******************************/ 421 static __inline void ahc_sync_qoutfifo(struct ahc_softc *ahc, int op); 422 static __inline void ahc_sync_tqinfifo(struct ahc_softc *ahc, int op); 423 static __inline u_int ahc_check_cmdcmpltqueues(struct ahc_softc *ahc); 424 static __inline void ahc_intr(struct ahc_softc *ahc); 425 426 static __inline void 427 ahc_sync_qoutfifo(struct ahc_softc *ahc, int op) 428 { 429 ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, 430 /*offset*/0, /*len*/256, op); 431 } 432 433 static __inline void 434 ahc_sync_tqinfifo(struct ahc_softc *ahc, int op) 435 { 436 #ifdef AHC_TARGET_MODE 437 if ((ahc->flags & AHC_TARGETROLE) != 0) { 438 ahc_dmamap_sync(ahc, ahc->shared_data_dmat, 439 ahc->shared_data_dmamap, 440 ahc_targetcmd_offset(ahc, 0), 441 sizeof(struct target_cmd) * AHC_TMODE_CMDS, 442 op); 443 } 444 #endif 445 } 446 447 /* 448 * See if the firmware has posted any completed commands 449 * into our in-core command complete fifos. 450 */ 451 #define AHC_RUN_QOUTFIFO 0x1 452 #define AHC_RUN_TQINFIFO 0x2 453 static __inline u_int 454 ahc_check_cmdcmpltqueues(struct ahc_softc *ahc) 455 { 456 u_int retval; 457 458 retval = 0; 459 ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, 460 /*offset*/ahc->qoutfifonext, /*len*/1, 461 BUS_DMASYNC_POSTREAD); 462 if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) 463 retval |= AHC_RUN_QOUTFIFO; 464 #ifdef AHC_TARGET_MODE 465 if ((ahc->flags & AHC_TARGETROLE) != 0) { 466 ahc_dmamap_sync(ahc, ahc->shared_data_dmat, 467 ahc->shared_data_dmamap, 468 ahc_targetcmd_offset(ahc, ahc->tqinfifofnext), 469 /*len*/sizeof(struct target_cmd), 470 BUS_DMASYNC_POSTREAD); 471 if (ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0) 472 retval |= AHC_RUN_TQINFIFO; 473 } 474 #endif 475 return (retval); 476 } 477 478 /* 479 * Catch an interrupt from the adapter 480 */ 481 static __inline void 482 ahc_intr(struct ahc_softc *ahc) 483 { 484 u_int intstat; 485 u_int queuestat; 486 487 /* 488 * Instead of directly reading the interrupt status register, 489 * infer the cause of the interrupt by checking our in-core 490 * completion queues. This avoids a costly PCI bus read in 491 * most cases. 492 */ 493 if ((ahc->flags & (AHC_ALL_INTERRUPTS|AHC_EDGE_INTERRUPT)) == 0 494 && (queuestat = ahc_check_cmdcmpltqueues(ahc)) != 0) 495 intstat = CMDCMPLT; 496 else { 497 intstat = ahc_inb(ahc, INTSTAT); 498 queuestat = AHC_RUN_QOUTFIFO; 499 #ifdef AHC_TARGET_MODE 500 if ((ahc->flags & AHC_TARGETROLE) != 0) 501 queuestat |= AHC_RUN_TQINFIFO; 502 #endif 503 } 504 505 if (intstat & CMDCMPLT) { 506 ahc_outb(ahc, CLRINT, CLRCMDINT); 507 508 /* 509 * Ensure that the chip sees that we've cleared 510 * this interrupt before we walk the output fifo. 511 * Otherwise, we may, due to posted bus writes, 512 * clear the interrupt after we finish the scan, 513 * and after the sequencer has added new entries 514 * and asserted the interrupt again. 515 */ 516 ahc_flush_device_writes(ahc); 517 #ifdef AHC_TARGET_MODE 518 if ((queuestat & AHC_RUN_QOUTFIFO) != 0) 519 #endif 520 ahc_run_qoutfifo(ahc); 521 #ifdef AHC_TARGET_MODE 522 if ((queuestat & AHC_RUN_TQINFIFO) != 0) 523 ahc_run_tqinfifo(ahc, /*paused*/FALSE); 524 #endif 525 } 526 527 if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) 528 /* Hot eject */ 529 return; 530 531 if ((intstat & INT_PEND) == 0) { 532 #if AHC_PCI_CONFIG > 0 533 if (ahc->unsolicited_ints > 500) { 534 ahc->unsolicited_ints = 0; 535 if ((ahc->chip & AHC_PCI) != 0 536 && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0) 537 ahc->bus_intr(ahc); 538 } 539 #endif 540 ahc->unsolicited_ints++; 541 return; 542 } 543 ahc->unsolicited_ints = 0; 544 545 if (intstat & BRKADRINT) { 546 ahc_handle_brkadrint(ahc); 547 /* Fatal error, no more interrupts to handle. */ 548 return; 549 } 550 551 if ((intstat & (SEQINT|SCSIINT)) != 0) 552 ahc_pause_bug_fix(ahc); 553 554 if ((intstat & SEQINT) != 0) 555 ahc_handle_seqint(ahc, intstat); 556 557 if ((intstat & SCSIINT) != 0) 558 ahc_handle_scsiint(ahc, intstat); 559 } 560 561 #endif /* _AIC7XXX_INLINE_H_ */ 562