1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/kmem.h> 31 #include <sys/debug.h> 32 #include <sys/scsi/scsi.h> 33 34 #include "ghd.h" 35 36 /* ghd_poll() function codes: */ 37 typedef enum { 38 GHD_POLL_REQUEST, /* wait for a specific request */ 39 GHD_POLL_DEVICE, /* wait for a specific device to idle */ 40 GHD_POLL_ALL /* wait for the whole bus to idle */ 41 } gpoll_t; 42 43 /* 44 * Local functions: 45 */ 46 static gcmd_t *ghd_doneq_get(ccc_t *cccp); 47 static void ghd_doneq_pollmode_enter(ccc_t *cccp); 48 static void ghd_doneq_pollmode_exit(ccc_t *cccp); 49 static uint_t ghd_doneq_process(caddr_t arg); 50 static void ghd_do_reset_notify_callbacks(ccc_t *cccp); 51 52 static int ghd_poll(ccc_t *cccp, gpoll_t polltype, ulong_t polltime, 53 gcmd_t *poll_gcmdp, gtgt_t *gtgtp, void *intr_status); 54 55 56 /* 57 * Local configuration variables 58 */ 59 60 ulong_t ghd_tran_abort_timeout = 5; 61 ulong_t ghd_tran_abort_lun_timeout = 5; 62 ulong_t ghd_tran_reset_target_timeout = 5; 63 ulong_t ghd_tran_reset_bus_timeout = 5; 64 65 static int 66 ghd_doneq_init(ccc_t *cccp) 67 { 68 ddi_iblock_cookie_t iblock; 69 70 L2_INIT(&cccp->ccc_doneq); 71 cccp->ccc_hba_pollmode = TRUE; 72 73 if (ddi_add_softintr(cccp->ccc_hba_dip, DDI_SOFTINT_LOW, 74 &cccp->ccc_doneq_softid, &iblock, NULL, 75 ghd_doneq_process, (caddr_t)cccp) != DDI_SUCCESS) { 76 GDBG_ERROR(("ghd_doneq_init: add softintr failed cccp 0x%p\n", 77 (void *)cccp)); 78 return (FALSE); 79 } 80 81 mutex_init(&cccp->ccc_doneq_mutex, NULL, MUTEX_DRIVER, iblock); 82 ghd_doneq_pollmode_exit(cccp); 83 return (TRUE); 84 } 85 86 /* 87 * ghd_complete(): 88 * 89 * The HBA driver calls this entry point when it's completely 90 * done processing a request. 91 * 92 * See the GHD_COMPLETE_INLINE() macro in ghd.h for the actual code. 93 */ 94 95 void 96 ghd_complete(ccc_t *cccp, gcmd_t *gcmdp) 97 { 98 ASSERT(mutex_owned(&cccp->ccc_hba_mutex)); 99 GHD_COMPLETE_INLINE(cccp, gcmdp); 100 } 101 102 103 /* 104 * ghd_doneq_put_head(): 105 * 106 * Mark the request done and prepend it to the doneq. 107 * See the GHD_DONEQ_PUT_HEAD_INLINE() macros in ghd.h for 108 * the actual code. 109 */ 110 void 111 ghd_doneq_put_head(ccc_t *cccp, gcmd_t *gcmdp) 112 { 113 GHD_DONEQ_PUT_HEAD_INLINE(cccp, gcmdp) 114 } 115 116 /* 117 * ghd_doneq_put_tail(): 118 * 119 * Mark the request done and append it to the doneq. 120 * See the GHD_DONEQ_PUT_TAIL_INLINE() macros in ghd.h for 121 * the actual code. 122 */ 123 void 124 ghd_doneq_put_tail(ccc_t *cccp, gcmd_t *gcmdp) 125 { 126 GHD_DONEQ_PUT_TAIL_INLINE(cccp, gcmdp) 127 } 128 129 static gcmd_t * 130 ghd_doneq_get(ccc_t *cccp) 131 { 132 kmutex_t *doneq_mutexp = &cccp->ccc_doneq_mutex; 133 gcmd_t *gcmdp; 134 135 mutex_enter(doneq_mutexp); 136 if ((gcmdp = L2_next(&cccp->ccc_doneq)) != NULL) 137 L2_delete(&gcmdp->cmd_q); 138 mutex_exit(doneq_mutexp); 139 return (gcmdp); 140 } 141 142 143 static void 144 ghd_doneq_pollmode_enter(ccc_t *cccp) 145 { 146 kmutex_t *doneq_mutexp = &cccp->ccc_doneq_mutex; 147 148 mutex_enter(doneq_mutexp); 149 cccp->ccc_hba_pollmode = TRUE; 150 mutex_exit(doneq_mutexp); 151 } 152 153 154 static void 155 ghd_doneq_pollmode_exit(ccc_t *cccp) 156 { 157 kmutex_t *doneq_mutexp = &cccp->ccc_doneq_mutex; 158 159 mutex_enter(doneq_mutexp); 160 cccp->ccc_hba_pollmode = FALSE; 161 mutex_exit(doneq_mutexp); 162 163 /* trigger software interrupt for the completion callbacks */ 164 if (!L2_EMPTY(&cccp->ccc_doneq)) { 165 /* 166 * If we are panicking we should just call the completion 167 * function directly as we can not use soft interrupts 168 * or timeouts during panic. 169 */ 170 if (!ddi_in_panic()) 171 ddi_trigger_softintr(cccp->ccc_doneq_softid); 172 else 173 (void) ghd_doneq_process((caddr_t)cccp); 174 } 175 } 176 177 178 /* ***************************************************************** */ 179 180 /* 181 * 182 * ghd_doneq_process() 183 * 184 * This function is called directly from the software interrupt 185 * handler. 186 * 187 * The doneq is protected by a separate mutex than the 188 * HBA mutex in order to avoid mutex contention on MP systems. 189 * 190 */ 191 192 static uint_t 193 ghd_doneq_process(caddr_t arg) 194 { 195 ccc_t *cccp = (ccc_t *)arg; 196 kmutex_t *doneq_mutexp; 197 gcmd_t *gcmdp; 198 int rc = DDI_INTR_UNCLAIMED; 199 200 doneq_mutexp = &cccp->ccc_doneq_mutex; 201 202 for (;;) { 203 mutex_enter(doneq_mutexp); 204 /* skip if FLAG_NOINTR request in progress */ 205 if (cccp->ccc_hba_pollmode) 206 break; 207 /* pop the first one from the done Q */ 208 if ((gcmdp = L2_next(&cccp->ccc_doneq)) == NULL) 209 break; 210 L2_delete(&gcmdp->cmd_q); 211 212 if (gcmdp->cmd_flags & GCMDFLG_RESET_NOTIFY) { 213 /* special request; processed here and discarded */ 214 ghd_do_reset_notify_callbacks(cccp); 215 ghd_gcmd_free(gcmdp); 216 mutex_exit(doneq_mutexp); 217 continue; 218 } 219 220 /* 221 * drop the mutex since completion 222 * function can re-enter the top half via 223 * ghd_transport() 224 */ 225 mutex_exit(doneq_mutexp); 226 gcmdp->cmd_state = GCMD_STATE_IDLE; 227 (*cccp->ccc_hba_complete)(cccp->ccc_hba_handle, gcmdp, TRUE); 228 #ifdef notyet 229 /* I don't think this is ever necessary */ 230 rc = DDI_INTR_CLAIMED; 231 #endif 232 } 233 mutex_exit(doneq_mutexp); 234 return (rc); 235 } 236 237 static void 238 ghd_do_reset_notify_callbacks(ccc_t *cccp) 239 { 240 ghd_reset_notify_list_t *rnp; 241 L2el_t *rnl = &cccp->ccc_reset_notify_list; 242 243 ASSERT(mutex_owned(&cccp->ccc_doneq_mutex)); 244 245 /* lock the reset notify list while we operate on it */ 246 mutex_enter(&cccp->ccc_reset_notify_mutex); 247 248 for (rnp = (ghd_reset_notify_list_t *)L2_next(rnl); 249 rnp != NULL; 250 rnp = (ghd_reset_notify_list_t *)L2_next(&rnp->l2_link)) { 251 252 /* don't call if HBA driver didn't set it */ 253 if (cccp->ccc_hba_reset_notify_callback) { 254 (*cccp->ccc_hba_reset_notify_callback)(rnp->gtgtp, 255 rnp->callback, rnp->arg); 256 } 257 } 258 mutex_exit(&cccp->ccc_reset_notify_mutex); 259 } 260 261 262 /* ***************************************************************** */ 263 264 265 /* 266 * ghd_register() 267 * 268 * Do the usual interrupt handler setup stuff. 269 * 270 * Also, set up three mutexes: the wait queue mutex, the HBA 271 * mutex, and the done queue mutex. The permitted locking 272 * orders are: 273 * 274 * 1. enter(waitq) 275 * 2. enter(activel) 276 * 3. enter(doneq) 277 * 4. enter(HBA) then enter(activel) 278 * 5. enter(HBA) then enter(doneq) 279 * 6. enter(HBA) then enter(waitq) 280 * 7. enter(waitq) then tryenter(HBA) 281 * 282 * Note: cases 6 and 7 won't deadlock because case 7 is always 283 * mutex_tryenter() call. 284 * 285 */ 286 287 288 int 289 ghd_register(char *labelp, 290 ccc_t *cccp, 291 dev_info_t *dip, 292 int inumber, 293 void *hba_handle, 294 int (*ccballoc)(gtgt_t *, gcmd_t *, int, int, int, int), 295 void (*ccbfree)(gcmd_t *), 296 void (*sg_func)(gcmd_t *, ddi_dma_cookie_t *, int, int), 297 int (*hba_start)(void *, gcmd_t *), 298 void (*hba_complete)(void *, gcmd_t *, int), 299 uint_t (*int_handler)(caddr_t), 300 int (*get_status)(void *, void *), 301 void (*process_intr)(void *, void *), 302 int (*timeout_func)(void *, gcmd_t *, gtgt_t *, gact_t, int), 303 tmr_t *tmrp, 304 void (*hba_reset_notify_callback)(gtgt_t *, 305 void (*)(caddr_t), caddr_t)) 306 { 307 308 cccp->ccc_label = labelp; 309 cccp->ccc_hba_dip = dip; 310 cccp->ccc_ccballoc = ccballoc; 311 cccp->ccc_ccbfree = ccbfree; 312 cccp->ccc_sg_func = sg_func; 313 cccp->ccc_hba_start = hba_start; 314 cccp->ccc_hba_complete = hba_complete; 315 cccp->ccc_process_intr = process_intr; 316 cccp->ccc_get_status = get_status; 317 cccp->ccc_hba_handle = hba_handle; 318 cccp->ccc_hba_reset_notify_callback = hba_reset_notify_callback; 319 320 /* initialize the HBA's list headers */ 321 CCCP_INIT(cccp); 322 323 if (ddi_get_iblock_cookie(dip, inumber, &cccp->ccc_iblock) 324 != DDI_SUCCESS) { 325 326 return (FALSE); 327 } 328 329 mutex_init(&cccp->ccc_hba_mutex, NULL, MUTEX_DRIVER, cccp->ccc_iblock); 330 331 mutex_init(&cccp->ccc_waitq_mutex, NULL, MUTEX_DRIVER, 332 cccp->ccc_iblock); 333 334 mutex_init(&cccp->ccc_reset_notify_mutex, NULL, MUTEX_DRIVER, 335 cccp->ccc_iblock); 336 337 /* Establish interrupt handler */ 338 if (ddi_add_intr(dip, inumber, &cccp->ccc_iblock, NULL, 339 int_handler, (caddr_t)hba_handle) != DDI_SUCCESS) { 340 mutex_destroy(&cccp->ccc_hba_mutex); 341 mutex_destroy(&cccp->ccc_waitq_mutex); 342 mutex_destroy(&cccp->ccc_reset_notify_mutex); 343 344 return (FALSE); 345 } 346 347 if (ghd_timer_attach(cccp, tmrp, timeout_func) == FALSE) { 348 ddi_remove_intr(cccp->ccc_hba_dip, 0, cccp->ccc_iblock); 349 mutex_destroy(&cccp->ccc_hba_mutex); 350 mutex_destroy(&cccp->ccc_waitq_mutex); 351 mutex_destroy(&cccp->ccc_reset_notify_mutex); 352 353 return (FALSE); 354 } 355 356 if (ghd_doneq_init(cccp)) { 357 358 return (TRUE); 359 } 360 361 /* 362 * ghd_doneq_init() returned error: 363 */ 364 365 ghd_timer_detach(cccp); 366 ddi_remove_intr(cccp->ccc_hba_dip, 0, cccp->ccc_iblock); 367 mutex_destroy(&cccp->ccc_hba_mutex); 368 mutex_destroy(&cccp->ccc_waitq_mutex); 369 mutex_destroy(&cccp->ccc_reset_notify_mutex); 370 371 return (FALSE); 372 373 } 374 375 376 void 377 ghd_unregister(ccc_t *cccp) 378 { 379 ghd_timer_detach(cccp); 380 ddi_remove_intr(cccp->ccc_hba_dip, 0, cccp->ccc_iblock); 381 ddi_remove_softintr(cccp->ccc_doneq_softid); 382 mutex_destroy(&cccp->ccc_hba_mutex); 383 mutex_destroy(&cccp->ccc_waitq_mutex); 384 mutex_destroy(&cccp->ccc_doneq_mutex); 385 } 386 387 388 389 int 390 ghd_intr(ccc_t *cccp, void *intr_status) 391 { 392 int (*statfunc)(void *, void *) = cccp->ccc_get_status; 393 void (*processfunc)(void *, void *) = cccp->ccc_process_intr; 394 kmutex_t *waitq_mutexp = &cccp->ccc_waitq_mutex; 395 kmutex_t *hba_mutexp = &cccp->ccc_hba_mutex; 396 void *handle = cccp->ccc_hba_handle; 397 int rc = DDI_INTR_UNCLAIMED; 398 int more; 399 400 401 mutex_enter(hba_mutexp); 402 403 GDBG_INTR(("ghd_intr(): cccp=0x%p status=0x%p\n", 404 (void *)cccp, intr_status)); 405 406 for (;;) { 407 more = FALSE; 408 409 /* process the interrupt status */ 410 while ((*statfunc)(handle, intr_status)) { 411 (*processfunc)(handle, intr_status); 412 rc = DDI_INTR_CLAIMED; 413 more = TRUE; 414 } 415 mutex_enter(waitq_mutexp); 416 if (ghd_waitq_process_and_mutex_hold(cccp)) { 417 ASSERT(mutex_owned(hba_mutexp)); 418 mutex_exit(waitq_mutexp); 419 continue; 420 } 421 if (more) { 422 mutex_exit(waitq_mutexp); 423 continue; 424 } 425 GDBG_INTR(("ghd_intr(): done cccp=0x%p status=0x%p rc %d\n", 426 (void *)cccp, intr_status, rc)); 427 /* 428 * Release the mutexes in the opposite order that they 429 * were acquired to prevent requests queued by 430 * ghd_transport() from getting hung up in the wait queue. 431 */ 432 mutex_exit(hba_mutexp); 433 mutex_exit(waitq_mutexp); 434 return (rc); 435 } 436 } 437 438 static int 439 ghd_poll(ccc_t *cccp, 440 gpoll_t polltype, 441 ulong_t polltime, 442 gcmd_t *poll_gcmdp, 443 gtgt_t *gtgtp, 444 void *intr_status) 445 { 446 gcmd_t *gcmdp; 447 L2el_t gcmd_hold_queue; 448 int got_it = FALSE; 449 clock_t start_lbolt; 450 clock_t current_lbolt; 451 452 453 ASSERT(mutex_owned(&cccp->ccc_hba_mutex)); 454 L2_INIT(&gcmd_hold_queue); 455 456 /* Que hora es? */ 457 start_lbolt = ddi_get_lbolt(); 458 459 /* unqueue and save all CMD/CCBs until I find the right one */ 460 while (!got_it) { 461 462 /* Give up yet? */ 463 current_lbolt = ddi_get_lbolt(); 464 if (polltime && (current_lbolt - start_lbolt >= polltime)) 465 break; 466 467 /* 468 * delay 1 msec each time around the loop (this is an 469 * arbitrary delay value, any value should work) except 470 * zero because some devices don't like being polled too 471 * fast and it saturates the bus on an MP system. 472 */ 473 drv_usecwait(1000); 474 475 /* 476 * check for any new device status 477 */ 478 if ((*cccp->ccc_get_status)(cccp->ccc_hba_handle, intr_status)) 479 (*cccp->ccc_process_intr)(cccp->ccc_hba_handle, 480 intr_status); 481 482 /* 483 * If something completed then try to start the 484 * next request from the wait queue. Don't release 485 * the HBA mutex because I don't know whether my 486 * request(s) is/are on the done queue yet. 487 */ 488 mutex_enter(&cccp->ccc_waitq_mutex); 489 (void) ghd_waitq_process_and_mutex_hold(cccp); 490 mutex_exit(&cccp->ccc_waitq_mutex); 491 492 /* 493 * Process the first of any timed-out requests. 494 */ 495 ghd_timer_poll(cccp, GHD_TIMER_POLL_ONE); 496 497 /* 498 * Unqueue all the completed requests, look for mine 499 */ 500 while (gcmdp = ghd_doneq_get(cccp)) { 501 /* 502 * If we got one and it's my request, then 503 * we're done. 504 */ 505 if (gcmdp == poll_gcmdp) { 506 poll_gcmdp->cmd_state = GCMD_STATE_IDLE; 507 got_it = TRUE; 508 continue; 509 } 510 /* fifo queue the other cmds on my local list */ 511 L2_add(&gcmd_hold_queue, &gcmdp->cmd_q, gcmdp); 512 } 513 514 515 /* 516 * Check whether we're done yet. 517 */ 518 switch (polltype) { 519 case GHD_POLL_DEVICE: 520 /* 521 * wait for everything queued on a specific device 522 */ 523 if (GDEV_NACTIVE(gtgtp->gt_gdevp) == 0) 524 got_it = TRUE; 525 break; 526 527 case GHD_POLL_ALL: 528 /* 529 * if waiting for all outstanding requests and 530 * if active list is now empty then exit 531 */ 532 if (GHBA_NACTIVE(cccp) == 0) 533 got_it = TRUE; 534 break; 535 536 case GHD_POLL_REQUEST: 537 break; 538 539 } 540 } 541 542 if (L2_EMPTY(&gcmd_hold_queue)) { 543 ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex)); 544 ASSERT(mutex_owned(&cccp->ccc_hba_mutex)); 545 return (got_it); 546 } 547 548 /* 549 * copy the local gcmd_hold_queue back to the doneq so 550 * that the order of completion callbacks is preserved 551 */ 552 while (gcmdp = L2_next(&gcmd_hold_queue)) { 553 L2_delete(&gcmdp->cmd_q); 554 GHD_DONEQ_PUT_TAIL(cccp, gcmdp); 555 } 556 557 ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex)); 558 ASSERT(mutex_owned(&cccp->ccc_hba_mutex)); 559 return (got_it); 560 } 561 562 563 /* 564 * ghd_tran_abort() 565 * 566 * Abort specific command on a target. 567 * 568 */ 569 570 int 571 ghd_tran_abort(ccc_t *cccp, gcmd_t *gcmdp, gtgt_t *gtgtp, void *intr_status) 572 { 573 gact_t action; 574 int rc; 575 576 /* 577 * call the driver's abort_cmd function 578 */ 579 580 mutex_enter(&cccp->ccc_hba_mutex); 581 ghd_doneq_pollmode_enter(cccp); 582 583 switch (gcmdp->cmd_state) { 584 case GCMD_STATE_WAITQ: 585 /* not yet started */ 586 action = GACTION_EARLY_ABORT; 587 break; 588 589 case GCMD_STATE_ACTIVE: 590 /* in progress */ 591 action = GACTION_ABORT_CMD; 592 break; 593 594 default: 595 /* everything else, probably already being aborted */ 596 rc = FALSE; 597 goto exit; 598 } 599 600 /* stop the timer and remove it from the active list */ 601 GHD_TIMER_STOP(cccp, gcmdp); 602 603 /* start a new timer and send out the abort command */ 604 ghd_timer_newstate(cccp, gcmdp, gtgtp, action, GHD_TGTREQ); 605 606 /* wait for the abort to complete */ 607 if (rc = ghd_poll(cccp, GHD_POLL_REQUEST, ghd_tran_abort_timeout, 608 gcmdp, gtgtp, intr_status)) { 609 gcmdp->cmd_state = GCMD_STATE_DONEQ; 610 GHD_DONEQ_PUT_TAIL(cccp, gcmdp); 611 } 612 613 exit: 614 ghd_doneq_pollmode_exit(cccp); 615 616 mutex_enter(&cccp->ccc_waitq_mutex); 617 ghd_waitq_process_and_mutex_exit(cccp); 618 619 return (rc); 620 } 621 622 623 /* 624 * ghd_tran_abort_lun() 625 * 626 * Abort all commands on a specific target. 627 * 628 */ 629 630 int 631 ghd_tran_abort_lun(ccc_t *cccp, gtgt_t *gtgtp, void *intr_status) 632 { 633 int rc; 634 635 /* 636 * call the HBA driver's abort_device function 637 */ 638 639 mutex_enter(&cccp->ccc_hba_mutex); 640 ghd_doneq_pollmode_enter(cccp); 641 642 /* send out the abort device request */ 643 ghd_timer_newstate(cccp, NULL, gtgtp, GACTION_ABORT_DEV, GHD_TGTREQ); 644 645 /* wait for the device to go idle */ 646 rc = ghd_poll(cccp, GHD_POLL_DEVICE, ghd_tran_abort_lun_timeout, 647 NULL, gtgtp, intr_status); 648 649 ghd_doneq_pollmode_exit(cccp); 650 651 mutex_enter(&cccp->ccc_waitq_mutex); 652 ghd_waitq_process_and_mutex_exit(cccp); 653 654 return (rc); 655 } 656 657 658 659 /* 660 * ghd_tran_reset_target() 661 * 662 * reset the target device 663 * 664 * 665 */ 666 667 int 668 ghd_tran_reset_target(ccc_t *cccp, gtgt_t *gtgtp, void *intr_status) 669 { 670 int rc = TRUE; 671 672 673 mutex_enter(&cccp->ccc_hba_mutex); 674 ghd_doneq_pollmode_enter(cccp); 675 676 /* send out the device reset request */ 677 ghd_timer_newstate(cccp, NULL, gtgtp, GACTION_RESET_TARGET, GHD_TGTREQ); 678 679 /* wait for the device to reset */ 680 rc = ghd_poll(cccp, GHD_POLL_DEVICE, ghd_tran_reset_target_timeout, 681 NULL, gtgtp, intr_status); 682 683 ghd_doneq_pollmode_exit(cccp); 684 685 mutex_enter(&cccp->ccc_waitq_mutex); 686 ghd_waitq_process_and_mutex_exit(cccp); 687 688 return (rc); 689 } 690 691 692 693 /* 694 * ghd_tran_reset_bus() 695 * 696 * reset the scsi bus 697 * 698 */ 699 700 int 701 ghd_tran_reset_bus(ccc_t *cccp, gtgt_t *gtgtp, void *intr_status) 702 { 703 int rc; 704 705 mutex_enter(&cccp->ccc_hba_mutex); 706 ghd_doneq_pollmode_enter(cccp); 707 708 /* send out the bus reset request */ 709 ghd_timer_newstate(cccp, NULL, gtgtp, GACTION_RESET_BUS, GHD_TGTREQ); 710 711 /* 712 * Wait for all active requests on this HBA to complete 713 */ 714 rc = ghd_poll(cccp, GHD_POLL_ALL, ghd_tran_reset_bus_timeout, 715 NULL, NULL, intr_status); 716 717 718 ghd_doneq_pollmode_exit(cccp); 719 720 mutex_enter(&cccp->ccc_waitq_mutex); 721 ghd_waitq_process_and_mutex_exit(cccp); 722 723 return (rc); 724 } 725 726 727 int 728 ghd_transport(ccc_t *cccp, 729 gcmd_t *gcmdp, 730 gtgt_t *gtgtp, 731 ulong_t timeout, 732 int polled, 733 void *intr_status) 734 { 735 gdev_t *gdevp = gtgtp->gt_gdevp; 736 737 ASSERT(!mutex_owned(&cccp->ccc_hba_mutex)); 738 ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex)); 739 740 if (polled) { 741 /* 742 * Grab the HBA mutex so no other requests are started 743 * until after this one completes. 744 */ 745 mutex_enter(&cccp->ccc_hba_mutex); 746 747 GDBG_START(("ghd_transport: polled" 748 " cccp 0x%p gdevp 0x%p gtgtp 0x%p gcmdp 0x%p\n", 749 (void *)cccp, (void *)gdevp, (void *)gtgtp, (void *)gcmdp)); 750 751 /* 752 * Lock the doneq so no other thread flushes the Q. 753 */ 754 ghd_doneq_pollmode_enter(cccp); 755 } 756 #if defined(GHD_DEBUG) || defined(__lint) 757 else { 758 GDBG_START(("ghd_transport: non-polled" 759 " cccp 0x%p gdevp 0x%p gtgtp 0x%p gcmdp 0x%p\n", 760 (void *)cccp, (void *)gdevp, (void *)gtgtp, (void *)gcmdp)); 761 } 762 #endif 763 /* 764 * add this request to the tail of the waitq 765 */ 766 gcmdp->cmd_waitq_level = 1; 767 mutex_enter(&cccp->ccc_waitq_mutex); 768 L2_add(&GDEV_QHEAD(gdevp), &gcmdp->cmd_q, gcmdp); 769 770 /* 771 * Add this request to the packet timer active list and start its 772 * abort timer. 773 */ 774 gcmdp->cmd_state = GCMD_STATE_WAITQ; 775 ghd_timer_start(cccp, gcmdp, timeout); 776 777 778 /* 779 * Check the device wait queue throttle and perhaps move 780 * some requests to the end of the HBA wait queue. 781 */ 782 ghd_waitq_shuffle_up(cccp, gdevp); 783 784 if (!polled) { 785 /* 786 * See if the HBA mutex is available but use the 787 * tryenter so I don't deadlock. 788 */ 789 if (!mutex_tryenter(&cccp->ccc_hba_mutex)) { 790 /* The HBA mutex isn't available */ 791 GDBG_START(("ghd_transport: !mutex cccp 0x%p\n", 792 (void *)cccp)); 793 mutex_exit(&cccp->ccc_waitq_mutex); 794 return (TRAN_ACCEPT); 795 } 796 GDBG_START(("ghd_transport: got mutex cccp 0x%p\n", 797 (void *)cccp)); 798 799 /* 800 * start as many requests as possible from the head 801 * of the HBA wait queue 802 */ 803 804 ghd_waitq_process_and_mutex_exit(cccp); 805 806 ASSERT(!mutex_owned(&cccp->ccc_hba_mutex)); 807 ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex)); 808 809 return (TRAN_ACCEPT); 810 } 811 812 813 /* 814 * If polled mode (FLAG_NOINTR specified in scsi_pkt flags), 815 * then ghd_poll() waits until the request completes or times out 816 * before returning. 817 */ 818 819 mutex_exit(&cccp->ccc_waitq_mutex); 820 (void) ghd_poll(cccp, GHD_POLL_REQUEST, 0, gcmdp, gtgtp, intr_status); 821 ghd_doneq_pollmode_exit(cccp); 822 823 mutex_enter(&cccp->ccc_waitq_mutex); 824 ghd_waitq_process_and_mutex_exit(cccp); 825 826 /* call HBA's completion function but don't do callback to target */ 827 (*cccp->ccc_hba_complete)(cccp->ccc_hba_handle, gcmdp, FALSE); 828 829 GDBG_START(("ghd_transport: polled done cccp 0x%p\n", (void *)cccp)); 830 return (TRAN_ACCEPT); 831 } 832 833 int ghd_reset_notify(ccc_t *cccp, 834 gtgt_t *gtgtp, 835 int flag, 836 void (*callback)(caddr_t), 837 caddr_t arg) 838 { 839 ghd_reset_notify_list_t *rnp; 840 int rc = FALSE; 841 842 switch (flag) { 843 844 case SCSI_RESET_NOTIFY: 845 846 rnp = (ghd_reset_notify_list_t *)kmem_zalloc(sizeof (*rnp), 847 KM_SLEEP); 848 rnp->gtgtp = gtgtp; 849 rnp->callback = callback; 850 rnp->arg = arg; 851 852 mutex_enter(&cccp->ccc_reset_notify_mutex); 853 L2_add(&cccp->ccc_reset_notify_list, &rnp->l2_link, 854 (void *)rnp); 855 mutex_exit(&cccp->ccc_reset_notify_mutex); 856 857 rc = TRUE; 858 859 break; 860 861 case SCSI_RESET_CANCEL: 862 863 mutex_enter(&cccp->ccc_reset_notify_mutex); 864 for (rnp = (ghd_reset_notify_list_t *) 865 L2_next(&cccp->ccc_reset_notify_list); 866 rnp != NULL; 867 rnp = (ghd_reset_notify_list_t *)L2_next(&rnp->l2_link)) { 868 if (rnp->gtgtp == gtgtp && 869 rnp->callback == callback && 870 rnp->arg == arg) { 871 L2_delete(&rnp->l2_link); 872 kmem_free(rnp, sizeof (*rnp)); 873 rc = TRUE; 874 } 875 } 876 mutex_exit(&cccp->ccc_reset_notify_mutex); 877 break; 878 879 default: 880 rc = FALSE; 881 break; 882 } 883 884 return (rc); 885 } 886 887 /* 888 * freeze the HBA waitq output (see ghd_waitq_process_and_mutex_hold), 889 * presumably because of a SCSI reset, for delay milliseconds. 890 */ 891 892 void 893 ghd_freeze_waitq(ccc_t *cccp, int delay) 894 { 895 ASSERT(mutex_owned(&cccp->ccc_hba_mutex)); 896 897 /* freeze the waitq for delay milliseconds */ 898 899 mutex_enter(&cccp->ccc_waitq_mutex); 900 cccp->ccc_waitq_freezetime = ddi_get_lbolt(); 901 cccp->ccc_waitq_freezedelay = delay; 902 cccp->ccc_waitq_frozen = 1; 903 mutex_exit(&cccp->ccc_waitq_mutex); 904 } 905 906 void 907 ghd_queue_hold(ccc_t *cccp) 908 { 909 ASSERT(mutex_owned(&cccp->ccc_hba_mutex)); 910 911 mutex_enter(&cccp->ccc_waitq_mutex); 912 cccp->ccc_waitq_held = 1; 913 mutex_exit(&cccp->ccc_waitq_mutex); 914 } 915 916 void 917 ghd_queue_unhold(ccc_t *cccp) 918 { 919 ASSERT(mutex_owned(&cccp->ccc_hba_mutex)); 920 921 mutex_enter(&cccp->ccc_waitq_mutex); 922 cccp->ccc_waitq_held = 0; 923 mutex_exit(&cccp->ccc_waitq_mutex); 924 } 925 926 927 928 /* 929 * Trigger previously-registered reset notifications 930 */ 931 932 void 933 ghd_trigger_reset_notify(ccc_t *cccp) 934 { 935 gcmd_t *gcmdp; 936 937 ASSERT(mutex_owned(&cccp->ccc_hba_mutex)); 938 939 /* create magic doneq entry */ 940 941 gcmdp = ghd_gcmd_alloc((gtgt_t *)NULL, 0, TRUE); 942 gcmdp->cmd_flags = GCMDFLG_RESET_NOTIFY; 943 944 /* put at head of doneq so it's processed ASAP */ 945 946 GHD_DONEQ_PUT_HEAD(cccp, gcmdp); 947 } 948