1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/scsi/scsi.h> 28 #include <sys/vtrace.h> 29 30 31 #define A_TO_TRAN(ap) ((ap)->a_hba_tran) 32 #define P_TO_TRAN(pkt) ((pkt)->pkt_address.a_hba_tran) 33 #define P_TO_ADDR(pkt) (&((pkt)->pkt_address)) 34 35 /* 36 * Callback id 37 */ 38 uintptr_t scsi_callback_id = 0; 39 40 extern ddi_dma_attr_t scsi_alloc_attr; 41 42 struct buf * 43 scsi_alloc_consistent_buf(struct scsi_address *ap, 44 struct buf *in_bp, size_t datalen, uint_t bflags, 45 int (*callback)(caddr_t), caddr_t callback_arg) 46 { 47 dev_info_t *pdip; 48 struct buf *bp; 49 int kmflag; 50 size_t rlen; 51 52 TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_ALLOC_CONSISTENT_BUF_START, 53 "scsi_alloc_consistent_buf_start"); 54 55 if (!in_bp) { 56 kmflag = (callback == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP; 57 if ((bp = getrbuf(kmflag)) == NULL) { 58 goto no_resource; 59 } 60 } else { 61 bp = in_bp; 62 63 /* we are establishing a new buffer memory association */ 64 bp->b_flags &= ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW); 65 bp->b_proc = NULL; 66 bp->b_pages = NULL; 67 bp->b_shadow = NULL; 68 } 69 70 /* limit bits that can be set by bflags argument */ 71 ASSERT(!(bflags & ~(B_READ | B_WRITE))); 72 bflags &= (B_READ | B_WRITE); 73 bp->b_un.b_addr = 0; 74 75 if (datalen) { 76 pdip = (A_TO_TRAN(ap))->tran_hba_dip; 77 78 /* 79 * use i_ddi_mem_alloc() for now until we have an interface to 80 * allocate memory for DMA which doesn't require a DMA handle. 81 * ddi_iopb_alloc() is obsolete and we want more flexibility in 82 * controlling the DMA address constraints. 83 */ 84 while (i_ddi_mem_alloc(pdip, &scsi_alloc_attr, datalen, 85 ((callback == SLEEP_FUNC) ? 1 : 0), 0, NULL, 86 &bp->b_un.b_addr, &rlen, NULL) != DDI_SUCCESS) { 87 if (callback == SLEEP_FUNC) { 88 delay(drv_usectohz(10000)); 89 } else { 90 if (!in_bp) 91 freerbuf(bp); 92 goto no_resource; 93 } 94 } 95 bp->b_flags |= bflags; 96 } 97 bp->b_bcount = datalen; 98 bp->b_resid = 0; 99 100 TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_ALLOC_CONSISTENT_BUF_END, 101 "scsi_alloc_consistent_buf_end"); 102 return (bp); 103 104 no_resource: 105 106 if (callback != NULL_FUNC && callback != SLEEP_FUNC) { 107 ddi_set_callback(callback, callback_arg, 108 &scsi_callback_id); 109 } 110 TRACE_0(TR_FAC_SCSI_RES, 111 TR_SCSI_ALLOC_CONSISTENT_BUF_RETURN1_END, 112 "scsi_alloc_consistent_buf_end (return1)"); 113 return (NULL); 114 } 115 116 void 117 scsi_free_consistent_buf(struct buf *bp) 118 { 119 TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_FREE_CONSISTENT_BUF_START, 120 "scsi_free_consistent_buf_start"); 121 if (!bp) 122 return; 123 if (bp->b_un.b_addr) 124 i_ddi_mem_free((caddr_t)bp->b_un.b_addr, NULL); 125 freerbuf(bp); 126 if (scsi_callback_id != 0) { 127 ddi_run_callback(&scsi_callback_id); 128 } 129 TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_FREE_CONSISTENT_BUF_END, 130 "scsi_free_consistent_buf_end"); 131 } 132 133 void 134 scsi_dmafree_attr(struct scsi_pkt *pktp) 135 { 136 struct scsi_pkt_cache_wrapper *pktw = 137 (struct scsi_pkt_cache_wrapper *)pktp; 138 139 if (pktw->pcw_flags & PCW_BOUND) { 140 if (ddi_dma_unbind_handle(pktp->pkt_handle) != 141 DDI_SUCCESS) 142 cmn_err(CE_WARN, "scsi_dmafree_attr: " 143 "unbind handle failed"); 144 pktw->pcw_flags &= ~PCW_BOUND; 145 } 146 pktp->pkt_numcookies = 0; 147 } 148 149 struct buf * 150 scsi_pkt2bp(struct scsi_pkt *pkt) 151 { 152 return (((struct scsi_pkt_cache_wrapper *)pkt)->pcw_bp); 153 } 154 155 int 156 scsi_dma_buf_bind_attr(struct scsi_pkt_cache_wrapper *pktw, 157 struct buf *bp, 158 int dma_flags, 159 int (*callback)(), 160 caddr_t arg) 161 { 162 struct scsi_pkt *pktp = &(pktw->pcw_pkt); 163 int status; 164 165 /* 166 * First time, need to establish the handle. 167 */ 168 169 ASSERT(pktp->pkt_numcookies == 0); 170 ASSERT(pktw->pcw_totalwin == 0); 171 172 status = ddi_dma_buf_bind_handle(pktp->pkt_handle, bp, dma_flags, 173 callback, arg, &pktw->pcw_cookie, 174 &pktp->pkt_numcookies); 175 176 switch (status) { 177 case DDI_DMA_MAPPED: 178 pktw->pcw_totalwin = 1; 179 break; 180 181 case DDI_DMA_PARTIAL_MAP: 182 /* enable first call to ddi_dma_getwin */ 183 if (ddi_dma_numwin(pktp->pkt_handle, 184 &pktw->pcw_totalwin) != DDI_SUCCESS) { 185 bp->b_error = 0; 186 return (0); 187 } 188 break; 189 190 case DDI_DMA_NORESOURCES: 191 bp->b_error = 0; 192 return (0); 193 194 case DDI_DMA_TOOBIG: 195 bioerror(bp, EINVAL); 196 return (0); 197 198 case DDI_DMA_NOMAPPING: 199 case DDI_DMA_INUSE: 200 default: 201 bioerror(bp, EFAULT); 202 return (0); 203 } 204 205 /* initialize the loop controls for scsi_dmaget_attr() */ 206 pktw->pcw_curwin = 0; 207 pktw->pcw_total_xfer = 0; 208 pktp->pkt_dma_flags = dma_flags; 209 return (1); 210 } 211 212 #if defined(_DMA_USES_PHYSADDR) 213 int 214 scsi_dmaget_attr(struct scsi_pkt_cache_wrapper *pktw) 215 { 216 struct scsi_pkt *pktp = &(pktw->pcw_pkt); 217 218 int status; 219 int num_segs = 0; 220 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)pktp->pkt_handle; 221 ddi_dma_cookie_t *cp; 222 223 if (pktw->pcw_curwin != 0) { 224 ddi_dma_cookie_t cookie; 225 226 /* 227 * start the next window, and get its first cookie 228 */ 229 status = ddi_dma_getwin(pktp->pkt_handle, 230 pktw->pcw_curwin, &pktp->pkt_dma_offset, 231 &pktp->pkt_dma_len, &cookie, 232 &pktp->pkt_numcookies); 233 if (status != DDI_SUCCESS) 234 return (0); 235 } 236 237 /* 238 * start the Scatter/Gather loop 239 */ 240 cp = hp->dmai_cookie - 1; 241 pktp->pkt_dma_len = 0; 242 for (;;) { 243 244 /* take care of the loop-bookkeeping */ 245 pktp->pkt_dma_len += cp->dmac_size; 246 num_segs++; 247 /* 248 * if this was the last cookie in the current window 249 * set the loop controls start the next window and 250 * exit so the HBA can do this partial transfer 251 */ 252 if (num_segs >= pktp->pkt_numcookies) { 253 pktw->pcw_curwin++; 254 break; 255 } 256 257 cp++; 258 } 259 pktw->pcw_total_xfer += pktp->pkt_dma_len; 260 pktp->pkt_cookies = hp->dmai_cookie - 1; 261 hp->dmai_cookie = cp; 262 263 return (1); 264 } 265 #endif 266 267 void scsi_free_cache_pkt(struct scsi_address *, struct scsi_pkt *); 268 269 struct scsi_pkt * 270 scsi_init_cache_pkt(struct scsi_address *ap, struct scsi_pkt *in_pktp, 271 struct buf *bp, int cmdlen, int statuslen, int pplen, 272 int flags, int (*callback)(caddr_t), caddr_t callback_arg) 273 { 274 struct scsi_pkt_cache_wrapper *pktw; 275 scsi_hba_tran_t *tranp = ap->a_hba_tran; 276 int (*func)(caddr_t); 277 278 func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC; 279 280 if (in_pktp == NULL) { 281 int kf; 282 283 if (callback == SLEEP_FUNC) 284 kf = KM_SLEEP; 285 else 286 kf = KM_NOSLEEP; 287 pktw = kmem_cache_alloc(tranp->tran_pkt_cache_ptr, 288 kf); 289 if (pktw == NULL) 290 goto fail1; 291 292 pktw->pcw_flags = 0; 293 in_pktp = &(pktw->pcw_pkt); 294 in_pktp->pkt_address = *ap; 295 /* 296 * target drivers should initialize pkt_comp and 297 * pkt_time, but sometimes they don't so initialize 298 * them here to be safe. 299 */ 300 in_pktp->pkt_address = *ap; 301 in_pktp->pkt_flags = 0; 302 in_pktp->pkt_time = 0; 303 in_pktp->pkt_resid = 0; 304 in_pktp->pkt_state = 0; 305 in_pktp->pkt_statistics = 0; 306 in_pktp->pkt_reason = 0; 307 in_pktp->pkt_dma_offset = 0; 308 in_pktp->pkt_dma_len = 0; 309 in_pktp->pkt_dma_flags = 0; 310 in_pktp->pkt_path_instance = 0; 311 ASSERT(in_pktp->pkt_numcookies == 0); 312 pktw->pcw_curwin = 0; 313 pktw->pcw_totalwin = 0; 314 pktw->pcw_total_xfer = 0; 315 316 in_pktp->pkt_cdblen = cmdlen; 317 if ((tranp->tran_hba_flags & SCSI_HBA_TRAN_CDB) && 318 (cmdlen > DEFAULT_CDBLEN)) { 319 pktw->pcw_flags |= PCW_NEED_EXT_CDB; 320 in_pktp->pkt_cdbp = kmem_alloc(cmdlen, kf); 321 if (in_pktp->pkt_cdbp == NULL) 322 goto fail2; 323 } 324 in_pktp->pkt_tgtlen = pplen; 325 if (pplen > DEFAULT_PRIVLEN) { 326 pktw->pcw_flags |= PCW_NEED_EXT_TGT; 327 in_pktp->pkt_private = kmem_alloc(pplen, kf); 328 if (in_pktp->pkt_private == NULL) 329 goto fail3; 330 } 331 in_pktp->pkt_scblen = statuslen; 332 if ((tranp->tran_hba_flags & SCSI_HBA_TRAN_SCB) && 333 (statuslen > DEFAULT_SCBLEN)) { 334 pktw->pcw_flags |= PCW_NEED_EXT_SCB; 335 in_pktp->pkt_scbp = kmem_alloc(statuslen, kf); 336 if (in_pktp->pkt_scbp == NULL) 337 goto fail4; 338 } 339 if ((*tranp->tran_setup_pkt) (in_pktp, 340 func, NULL) == -1) { 341 goto fail5; 342 } 343 if (cmdlen) 344 bzero((void *)in_pktp->pkt_cdbp, cmdlen); 345 if (pplen) 346 bzero((void *)in_pktp->pkt_private, pplen); 347 if (statuslen) 348 bzero((void *)in_pktp->pkt_scbp, statuslen); 349 } else 350 pktw = (struct scsi_pkt_cache_wrapper *)in_pktp; 351 352 if (bp && bp->b_bcount) { 353 354 int dma_flags = 0; 355 356 /* 357 * we need to transfer data, so we alloc dma resources 358 * for this packet 359 */ 360 /*CONSTCOND*/ 361 ASSERT(SLEEP_FUNC == DDI_DMA_SLEEP); 362 /*CONSTCOND*/ 363 ASSERT(NULL_FUNC == DDI_DMA_DONTWAIT); 364 365 #if defined(_DMA_USES_PHYSADDR) 366 /* 367 * with an IOMMU we map everything, so we don't 368 * need to bother with this 369 */ 370 if (tranp->tran_dma_attr.dma_attr_granular != 371 pktw->pcw_granular) { 372 373 ddi_dma_free_handle(&in_pktp->pkt_handle); 374 if (ddi_dma_alloc_handle(tranp->tran_hba_dip, 375 &tranp->tran_dma_attr, 376 func, NULL, 377 &in_pktp->pkt_handle) != DDI_SUCCESS) { 378 379 in_pktp->pkt_handle = NULL; 380 return (NULL); 381 } 382 pktw->pcw_granular = 383 tranp->tran_dma_attr.dma_attr_granular; 384 } 385 #endif 386 387 if (in_pktp->pkt_numcookies == 0) { 388 pktw->pcw_bp = bp; 389 /* 390 * set dma flags; the "read" case must be first 391 * since B_WRITE isn't always be set for writes. 392 */ 393 if (bp->b_flags & B_READ) { 394 dma_flags |= DDI_DMA_READ; 395 } else { 396 dma_flags |= DDI_DMA_WRITE; 397 } 398 if (flags & PKT_CONSISTENT) 399 dma_flags |= DDI_DMA_CONSISTENT; 400 if (flags & PKT_DMA_PARTIAL) 401 dma_flags |= DDI_DMA_PARTIAL; 402 403 #if defined(__sparc) 404 /* 405 * workaround for byte hole issue on psycho and 406 * schizo pre 2.1 407 */ 408 if ((bp->b_flags & B_READ) && ((bp->b_flags & 409 (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) && 410 (((uintptr_t)bp->b_un.b_addr & 0x7) || 411 ((uintptr_t)bp->b_bcount & 0x7))) { 412 dma_flags |= DDI_DMA_CONSISTENT; 413 } 414 #endif 415 if (!scsi_dma_buf_bind_attr(pktw, bp, 416 dma_flags, callback, callback_arg)) { 417 return (NULL); 418 } else { 419 pktw->pcw_flags |= PCW_BOUND; 420 } 421 } 422 423 #if defined(_DMA_USES_PHYSADDR) 424 if (!scsi_dmaget_attr(pktw)) { 425 scsi_dmafree_attr(in_pktp); 426 goto fail5; 427 } 428 #else 429 in_pktp->pkt_cookies = &pktw->pcw_cookie; 430 in_pktp->pkt_dma_len = pktw->pcw_cookie.dmac_size; 431 pktw->pcw_total_xfer += in_pktp->pkt_dma_len; 432 #endif 433 ASSERT(in_pktp->pkt_numcookies <= 434 tranp->tran_dma_attr.dma_attr_sgllen); 435 ASSERT(pktw->pcw_total_xfer <= bp->b_bcount); 436 in_pktp->pkt_resid = bp->b_bcount - 437 pktw->pcw_total_xfer; 438 439 ASSERT((in_pktp->pkt_resid % pktw->pcw_granular) == 440 0); 441 } else { 442 /* !bp or no b_bcount */ 443 in_pktp->pkt_resid = 0; 444 } 445 return (in_pktp); 446 447 fail5: 448 if (pktw->pcw_flags & PCW_NEED_EXT_SCB) { 449 kmem_free(in_pktp->pkt_scbp, statuslen); 450 in_pktp->pkt_scbp = (opaque_t)((char *)in_pktp + 451 tranp->tran_hba_len + DEFAULT_PRIVLEN + 452 sizeof (struct scsi_pkt)); 453 if ((A_TO_TRAN(ap))->tran_hba_flags & SCSI_HBA_TRAN_CDB) 454 in_pktp->pkt_scbp = (opaque_t)((in_pktp->pkt_scbp) + 455 DEFAULT_CDBLEN); 456 in_pktp->pkt_scblen = 0; 457 } 458 fail4: 459 if (pktw->pcw_flags & PCW_NEED_EXT_TGT) { 460 kmem_free(in_pktp->pkt_private, pplen); 461 in_pktp->pkt_tgtlen = 0; 462 in_pktp->pkt_private = NULL; 463 } 464 fail3: 465 if (pktw->pcw_flags & PCW_NEED_EXT_CDB) { 466 kmem_free(in_pktp->pkt_cdbp, cmdlen); 467 in_pktp->pkt_cdbp = (opaque_t)((char *)in_pktp + 468 tranp->tran_hba_len + 469 sizeof (struct scsi_pkt)); 470 in_pktp->pkt_cdblen = 0; 471 } 472 pktw->pcw_flags &= 473 ~(PCW_NEED_EXT_CDB|PCW_NEED_EXT_TGT|PCW_NEED_EXT_SCB); 474 fail2: 475 kmem_cache_free(tranp->tran_pkt_cache_ptr, pktw); 476 fail1: 477 if (callback != NULL_FUNC && callback != SLEEP_FUNC) { 478 ddi_set_callback(callback, callback_arg, 479 &scsi_callback_id); 480 } 481 482 return (NULL); 483 } 484 485 void 486 scsi_free_cache_pkt(struct scsi_address *ap, struct scsi_pkt *pktp) 487 { 488 struct scsi_pkt_cache_wrapper *pktw; 489 490 (*A_TO_TRAN(ap)->tran_teardown_pkt)(pktp); 491 pktw = (struct scsi_pkt_cache_wrapper *)pktp; 492 if (pktw->pcw_flags & PCW_BOUND) 493 scsi_dmafree_attr(pktp); 494 495 /* 496 * if we allocated memory for anything that wouldn't fit, free 497 * the memory and restore the pointers 498 */ 499 if (pktw->pcw_flags & PCW_NEED_EXT_SCB) { 500 kmem_free(pktp->pkt_scbp, pktp->pkt_scblen); 501 pktp->pkt_scbp = (opaque_t)((char *)pktp + 502 (A_TO_TRAN(ap))->tran_hba_len + 503 DEFAULT_PRIVLEN + sizeof (struct scsi_pkt_cache_wrapper)); 504 if ((A_TO_TRAN(ap))->tran_hba_flags & SCSI_HBA_TRAN_CDB) 505 pktp->pkt_scbp = (opaque_t)((pktp->pkt_scbp) + 506 DEFAULT_CDBLEN); 507 pktp->pkt_scblen = 0; 508 } 509 if (pktw->pcw_flags & PCW_NEED_EXT_TGT) { 510 kmem_free(pktp->pkt_private, pktp->pkt_tgtlen); 511 pktp->pkt_tgtlen = 0; 512 pktp->pkt_private = NULL; 513 } 514 if (pktw->pcw_flags & PCW_NEED_EXT_CDB) { 515 kmem_free(pktp->pkt_cdbp, pktp->pkt_cdblen); 516 pktp->pkt_cdbp = (opaque_t)((char *)pktp + 517 (A_TO_TRAN(ap))->tran_hba_len + 518 sizeof (struct scsi_pkt_cache_wrapper)); 519 pktp->pkt_cdblen = 0; 520 } 521 pktw->pcw_flags &= 522 ~(PCW_NEED_EXT_CDB|PCW_NEED_EXT_TGT|PCW_NEED_EXT_SCB); 523 kmem_cache_free(A_TO_TRAN(ap)->tran_pkt_cache_ptr, pktw); 524 525 if (scsi_callback_id != 0) { 526 ddi_run_callback(&scsi_callback_id); 527 } 528 529 } 530 531 532 struct scsi_pkt * 533 scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *in_pktp, 534 struct buf *bp, int cmdlen, int statuslen, int pplen, 535 int flags, int (*callback)(caddr_t), caddr_t callback_arg) 536 { 537 struct scsi_pkt *pktp; 538 scsi_hba_tran_t *tranp = ap->a_hba_tran; 539 int (*func)(caddr_t); 540 541 TRACE_5(TR_FAC_SCSI_RES, TR_SCSI_INIT_PKT_START, 542 "scsi_init_pkt_start: addr %p in_pktp %p cmdlen %d statuslen %d pplen %d", 543 ap, in_pktp, cmdlen, statuslen, pplen); 544 545 #if defined(__i386) || defined(__amd64) 546 if (flags & PKT_CONSISTENT_OLD) { 547 flags &= ~PKT_CONSISTENT_OLD; 548 flags |= PKT_CONSISTENT; 549 } 550 #endif 551 552 func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC; 553 554 pktp = (*tranp->tran_init_pkt) (ap, in_pktp, bp, cmdlen, 555 statuslen, pplen, flags, func, NULL); 556 if (pktp == NULL) { 557 if (callback != NULL_FUNC && callback != SLEEP_FUNC) { 558 ddi_set_callback(callback, callback_arg, 559 &scsi_callback_id); 560 } 561 } 562 563 TRACE_1(TR_FAC_SCSI_RES, TR_SCSI_INIT_PKT_END, 564 "scsi_init_pkt_end: pktp %p", pktp); 565 return (pktp); 566 } 567 568 void 569 scsi_destroy_pkt(struct scsi_pkt *pkt) 570 { 571 struct scsi_address *ap = P_TO_ADDR(pkt); 572 573 TRACE_1(TR_FAC_SCSI_RES, TR_SCSI_DESTROY_PKT_START, 574 "scsi_destroy_pkt_start: pkt %p", pkt); 575 576 (*A_TO_TRAN(ap)->tran_destroy_pkt)(ap, pkt); 577 578 if (scsi_callback_id != 0) { 579 ddi_run_callback(&scsi_callback_id); 580 } 581 582 TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_DESTROY_PKT_END, 583 "scsi_destroy_pkt_end"); 584 } 585 586 587 /* 588 * Generic Resource Allocation Routines 589 */ 590 591 struct scsi_pkt * 592 scsi_resalloc(struct scsi_address *ap, int cmdlen, int statuslen, 593 opaque_t dmatoken, int (*callback)()) 594 { 595 register struct scsi_pkt *pkt; 596 register scsi_hba_tran_t *tranp = ap->a_hba_tran; 597 register int (*func)(caddr_t); 598 599 func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC; 600 601 pkt = (*tranp->tran_init_pkt) (ap, NULL, (struct buf *)dmatoken, 602 cmdlen, statuslen, 0, 0, func, NULL); 603 if (pkt == NULL) { 604 if (callback != NULL_FUNC && callback != SLEEP_FUNC) { 605 ddi_set_callback(callback, NULL, &scsi_callback_id); 606 } 607 } 608 609 return (pkt); 610 } 611 612 struct scsi_pkt * 613 scsi_pktalloc(struct scsi_address *ap, int cmdlen, int statuslen, 614 int (*callback)()) 615 { 616 struct scsi_pkt *pkt; 617 struct scsi_hba_tran *tran = ap->a_hba_tran; 618 register int (*func)(caddr_t); 619 620 func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC; 621 622 pkt = (*tran->tran_init_pkt) (ap, NULL, NULL, cmdlen, 623 statuslen, 0, 0, func, NULL); 624 if (pkt == NULL) { 625 if (callback != NULL_FUNC && callback != SLEEP_FUNC) { 626 ddi_set_callback(callback, NULL, &scsi_callback_id); 627 } 628 } 629 630 return (pkt); 631 } 632 633 struct scsi_pkt * 634 scsi_dmaget(struct scsi_pkt *pkt, opaque_t dmatoken, int (*callback)()) 635 { 636 struct scsi_pkt *new_pkt; 637 register int (*func)(caddr_t); 638 639 func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC; 640 641 new_pkt = (*P_TO_TRAN(pkt)->tran_init_pkt) (&pkt->pkt_address, 642 pkt, (struct buf *)dmatoken, 643 0, 0, 0, 0, func, NULL); 644 ASSERT(new_pkt == pkt || new_pkt == NULL); 645 if (new_pkt == NULL) { 646 if (callback != NULL_FUNC && callback != SLEEP_FUNC) { 647 ddi_set_callback(callback, NULL, &scsi_callback_id); 648 } 649 } 650 651 return (new_pkt); 652 } 653 654 655 /* 656 * Generic Resource Deallocation Routines 657 */ 658 659 void 660 scsi_dmafree(struct scsi_pkt *pkt) 661 { 662 register struct scsi_address *ap = P_TO_ADDR(pkt); 663 664 (*A_TO_TRAN(ap)->tran_dmafree)(ap, pkt); 665 666 if (scsi_callback_id != 0) { 667 ddi_run_callback(&scsi_callback_id); 668 } 669 } 670 671 /*ARGSUSED*/ 672 void 673 scsi_cache_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 674 { 675 ASSERT(pkt->pkt_numcookies == 0 || 676 ((struct scsi_pkt_cache_wrapper *)pkt)->pcw_flags & PCW_BOUND); 677 ASSERT(pkt->pkt_handle != NULL); 678 scsi_dmafree_attr(pkt); 679 680 if (scsi_callback_id != 0) { 681 ddi_run_callback(&scsi_callback_id); 682 } 683 } 684 685 void 686 scsi_sync_pkt(struct scsi_pkt *pkt) 687 { 688 register struct scsi_address *ap = P_TO_ADDR(pkt); 689 690 if (pkt->pkt_state & STATE_XFERRED_DATA) 691 (*A_TO_TRAN(ap)->tran_sync_pkt)(ap, pkt); 692 } 693 694 /*ARGSUSED*/ 695 void 696 scsi_sync_cache_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 697 { 698 if (pkt->pkt_handle && 699 (pkt->pkt_dma_flags & (DDI_DMA_WRITE | DDI_DMA_READ))) { 700 (void) ddi_dma_sync(pkt->pkt_handle, 701 pkt->pkt_dma_offset, pkt->pkt_dma_len, 702 (pkt->pkt_dma_flags & DDI_DMA_WRITE) ? 703 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU); 704 } 705 } 706 707 void 708 scsi_resfree(struct scsi_pkt *pkt) 709 { 710 register struct scsi_address *ap = P_TO_ADDR(pkt); 711 (*A_TO_TRAN(ap)->tran_destroy_pkt)(ap, pkt); 712 713 if (scsi_callback_id != 0) { 714 ddi_run_callback(&scsi_callback_id); 715 } 716 } 717