1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <sys/scsi/scsi.h>
28 #include <sys/vtrace.h>
29
30
31 #define A_TO_TRAN(ap) ((ap)->a_hba_tran)
32 #define P_TO_TRAN(pkt) ((pkt)->pkt_address.a_hba_tran)
33 #define P_TO_ADDR(pkt) (&((pkt)->pkt_address))
34
35 /*
36 * Callback id
37 */
38 uintptr_t scsi_callback_id = 0;
39
40 extern ddi_dma_attr_t scsi_alloc_attr;
41
42 struct buf *
scsi_alloc_consistent_buf(struct scsi_address * ap,struct buf * in_bp,size_t datalen,uint_t bflags,int (* callback)(caddr_t),caddr_t callback_arg)43 scsi_alloc_consistent_buf(struct scsi_address *ap,
44 struct buf *in_bp, size_t datalen, uint_t bflags,
45 int (*callback)(caddr_t), caddr_t callback_arg)
46 {
47 dev_info_t *pdip;
48 struct buf *bp;
49 int kmflag;
50 size_t rlen;
51
52 TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_ALLOC_CONSISTENT_BUF_START,
53 "scsi_alloc_consistent_buf_start");
54
55 if (!in_bp) {
56 kmflag = (callback == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP;
57 if ((bp = getrbuf(kmflag)) == NULL) {
58 goto no_resource;
59 }
60 } else {
61 bp = in_bp;
62
63 /* we are establishing a new buffer memory association */
64 bp->b_flags &= ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW);
65 bp->b_proc = NULL;
66 bp->b_pages = NULL;
67 bp->b_shadow = NULL;
68 }
69
70 /* limit bits that can be set by bflags argument */
71 ASSERT(!(bflags & ~(B_READ | B_WRITE)));
72 bflags &= (B_READ | B_WRITE);
73 bp->b_un.b_addr = 0;
74
75 if (datalen) {
76 pdip = (A_TO_TRAN(ap))->tran_hba_dip;
77
78 /*
79 * use i_ddi_mem_alloc() for now until we have an interface to
80 * allocate memory for DMA which doesn't require a DMA handle.
81 */
82 while (i_ddi_mem_alloc(pdip, &scsi_alloc_attr, datalen,
83 ((callback == SLEEP_FUNC) ? 1 : 0), 0, NULL,
84 &bp->b_un.b_addr, &rlen, NULL) != DDI_SUCCESS) {
85 if (callback == SLEEP_FUNC) {
86 delay(drv_usectohz(10000));
87 } else {
88 if (!in_bp)
89 freerbuf(bp);
90 goto no_resource;
91 }
92 }
93 bp->b_flags |= bflags;
94 }
95 bp->b_bcount = datalen;
96 bp->b_resid = 0;
97
98 TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_ALLOC_CONSISTENT_BUF_END,
99 "scsi_alloc_consistent_buf_end");
100 return (bp);
101
102 no_resource:
103
104 if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
105 ddi_set_callback(callback, callback_arg,
106 &scsi_callback_id);
107 }
108 TRACE_0(TR_FAC_SCSI_RES,
109 TR_SCSI_ALLOC_CONSISTENT_BUF_RETURN1_END,
110 "scsi_alloc_consistent_buf_end (return1)");
111 return (NULL);
112 }
113
114 void
scsi_free_consistent_buf(struct buf * bp)115 scsi_free_consistent_buf(struct buf *bp)
116 {
117 TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_FREE_CONSISTENT_BUF_START,
118 "scsi_free_consistent_buf_start");
119 if (!bp)
120 return;
121 if (bp->b_un.b_addr)
122 i_ddi_mem_free((caddr_t)bp->b_un.b_addr, NULL);
123 freerbuf(bp);
124 if (scsi_callback_id != 0) {
125 ddi_run_callback(&scsi_callback_id);
126 }
127 TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_FREE_CONSISTENT_BUF_END,
128 "scsi_free_consistent_buf_end");
129 }
130
131 void
scsi_dmafree_attr(struct scsi_pkt * pktp)132 scsi_dmafree_attr(struct scsi_pkt *pktp)
133 {
134 struct scsi_pkt_cache_wrapper *pktw =
135 (struct scsi_pkt_cache_wrapper *)pktp;
136
137 if (pktw->pcw_flags & PCW_BOUND) {
138 if (ddi_dma_unbind_handle(pktp->pkt_handle) !=
139 DDI_SUCCESS)
140 cmn_err(CE_WARN, "scsi_dmafree_attr: "
141 "unbind handle failed");
142 pktw->pcw_flags &= ~PCW_BOUND;
143 }
144 pktp->pkt_numcookies = 0;
145 pktw->pcw_totalwin = 0;
146 }
147
148 struct buf *
scsi_pkt2bp(struct scsi_pkt * pkt)149 scsi_pkt2bp(struct scsi_pkt *pkt)
150 {
151 return (((struct scsi_pkt_cache_wrapper *)pkt)->pcw_bp);
152 }
153
154 int
scsi_dma_buf_bind_attr(struct scsi_pkt_cache_wrapper * pktw,struct buf * bp,int dma_flags,int (* callback)(),caddr_t arg)155 scsi_dma_buf_bind_attr(struct scsi_pkt_cache_wrapper *pktw,
156 struct buf *bp,
157 int dma_flags,
158 int (*callback)(),
159 caddr_t arg)
160 {
161 struct scsi_pkt *pktp = &(pktw->pcw_pkt);
162 int status;
163
164 /*
165 * First time, need to establish the handle.
166 */
167
168 ASSERT(pktp->pkt_numcookies == 0);
169 ASSERT(pktw->pcw_totalwin == 0);
170
171 status = ddi_dma_buf_bind_handle(pktp->pkt_handle, bp, dma_flags,
172 callback, arg, &pktw->pcw_cookie,
173 &pktp->pkt_numcookies);
174
175 switch (status) {
176 case DDI_DMA_MAPPED:
177 pktw->pcw_totalwin = 1;
178 break;
179
180 case DDI_DMA_PARTIAL_MAP:
181 /* enable first call to ddi_dma_getwin */
182 if (ddi_dma_numwin(pktp->pkt_handle,
183 &pktw->pcw_totalwin) != DDI_SUCCESS) {
184 bp->b_error = 0;
185 return (0);
186 }
187 break;
188
189 case DDI_DMA_NORESOURCES:
190 bp->b_error = 0;
191 return (0);
192
193 case DDI_DMA_TOOBIG:
194 bioerror(bp, EINVAL);
195 return (0);
196
197 case DDI_DMA_NOMAPPING:
198 case DDI_DMA_INUSE:
199 default:
200 bioerror(bp, EFAULT);
201 return (0);
202 }
203
204 /* initialize the loop controls for scsi_dmaget_attr() */
205 pktw->pcw_curwin = 0;
206 pktw->pcw_total_xfer = 0;
207 pktp->pkt_dma_flags = dma_flags;
208 return (1);
209 }
210
211 #if defined(_DMA_USES_PHYSADDR)
212 int
scsi_dmaget_attr(struct scsi_pkt_cache_wrapper * pktw)213 scsi_dmaget_attr(struct scsi_pkt_cache_wrapper *pktw)
214 {
215 struct scsi_pkt *pktp = &(pktw->pcw_pkt);
216
217 int status;
218 int num_segs = 0;
219 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)pktp->pkt_handle;
220 ddi_dma_cookie_t *cp;
221
222 if (pktw->pcw_curwin != 0) {
223 ddi_dma_cookie_t cookie;
224
225 /*
226 * start the next window, and get its first cookie
227 */
228 status = ddi_dma_getwin(pktp->pkt_handle,
229 pktw->pcw_curwin, &pktp->pkt_dma_offset,
230 &pktp->pkt_dma_len, &cookie,
231 &pktp->pkt_numcookies);
232 if (status != DDI_SUCCESS)
233 return (0);
234 }
235
236 /*
237 * start the Scatter/Gather loop
238 */
239 cp = hp->dmai_cookie - 1;
240 pktp->pkt_dma_len = 0;
241 for (;;) {
242
243 /* take care of the loop-bookkeeping */
244 pktp->pkt_dma_len += cp->dmac_size;
245 num_segs++;
246 /*
247 * if this was the last cookie in the current window
248 * set the loop controls start the next window and
249 * exit so the HBA can do this partial transfer
250 */
251 if (num_segs >= pktp->pkt_numcookies) {
252 pktw->pcw_curwin++;
253 break;
254 }
255
256 cp++;
257 }
258 pktw->pcw_total_xfer += pktp->pkt_dma_len;
259 pktp->pkt_cookies = hp->dmai_cookie - 1;
260 hp->dmai_cookie = cp;
261 hp->dmai_curcookie = num_segs;
262
263 return (1);
264 }
265 #endif
266
267 void scsi_free_cache_pkt(struct scsi_address *, struct scsi_pkt *);
268
269 struct scsi_pkt *
scsi_init_cache_pkt(struct scsi_address * ap,struct scsi_pkt * in_pktp,struct buf * bp,int cmdlen,int statuslen,int pplen,int flags,int (* callback)(caddr_t),caddr_t callback_arg)270 scsi_init_cache_pkt(struct scsi_address *ap, struct scsi_pkt *in_pktp,
271 struct buf *bp, int cmdlen, int statuslen, int pplen,
272 int flags, int (*callback)(caddr_t), caddr_t callback_arg)
273 {
274 struct scsi_pkt_cache_wrapper *pktw;
275 scsi_hba_tran_t *tranp = ap->a_hba_tran;
276 int (*func)(caddr_t);
277
278 func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC;
279
280 if (in_pktp == NULL) {
281 int kf;
282
283 if (callback == SLEEP_FUNC)
284 kf = KM_SLEEP;
285 else
286 kf = KM_NOSLEEP;
287 /*
288 * By using kmem_cache_alloc(), the layout of the
289 * scsi_pkt, scsi_pkt_cache_wrapper, hba private data,
290 * cdb, tgt driver private data, and status block is
291 * as below.
292 *
293 * This is a piece of contiguous memory starting from
294 * the first structure field scsi_pkt in the struct
295 * scsi_pkt_cache_wrapper, followed by the hba private
296 * data, pkt_cdbp, the tgt driver private data and
297 * pkt_scbp.
298 *
299 * |----------------------------|--------------------->
300 * | struct scsi_pkt | struct
301 * | ...... |scsi_pkt_cache_wrapper
302 * | pcw_flags |
303 * |----------------------------|<---------------------
304 * | hba private data |tranp->tran_hba_len
305 * |----------------------------|
306 * | pkt_cdbp |DEFAULT_CDBLEN
307 * |----------------------------|
308 * | tgt private data |DEFAULT_PRIVLEN
309 * |----------------------------|
310 * | pkt_scbp |DEFAULT_SCBLEN
311 * |----------------------------|
312 *
313 * If the actual data length of the cdb, or the tgt
314 * driver private data, or the status block is bigger
315 * than the default data length, kmem_alloc() will be
316 * called to get extra space.
317 */
318 pktw = kmem_cache_alloc(tranp->tran_pkt_cache_ptr,
319 kf);
320 if (pktw == NULL)
321 goto fail1;
322
323 pktw->pcw_flags = 0;
324 in_pktp = &(pktw->pcw_pkt);
325 in_pktp->pkt_address = *ap;
326
327 /*
328 * target drivers should initialize pkt_comp and
329 * pkt_time, but sometimes they don't so initialize
330 * them here to be safe.
331 */
332 in_pktp->pkt_flags = 0;
333 in_pktp->pkt_time = 0;
334 in_pktp->pkt_resid = 0;
335 in_pktp->pkt_state = 0;
336 in_pktp->pkt_statistics = 0;
337 in_pktp->pkt_reason = 0;
338 in_pktp->pkt_dma_offset = 0;
339 in_pktp->pkt_dma_len = 0;
340 in_pktp->pkt_dma_flags = 0;
341 in_pktp->pkt_path_instance = 0;
342 ASSERT(in_pktp->pkt_numcookies == 0);
343 pktw->pcw_curwin = 0;
344 pktw->pcw_totalwin = 0;
345 pktw->pcw_total_xfer = 0;
346
347 in_pktp->pkt_cdblen = cmdlen;
348 if ((tranp->tran_hba_flags & SCSI_HBA_TRAN_CDB) &&
349 (cmdlen > DEFAULT_CDBLEN)) {
350 pktw->pcw_flags |= PCW_NEED_EXT_CDB;
351 in_pktp->pkt_cdbp = kmem_alloc(cmdlen, kf);
352 if (in_pktp->pkt_cdbp == NULL)
353 goto fail2;
354 }
355 in_pktp->pkt_tgtlen = pplen;
356 if (pplen > DEFAULT_PRIVLEN) {
357 pktw->pcw_flags |= PCW_NEED_EXT_TGT;
358 in_pktp->pkt_private = kmem_alloc(pplen, kf);
359 if (in_pktp->pkt_private == NULL)
360 goto fail3;
361 }
362 in_pktp->pkt_scblen = statuslen;
363 if ((tranp->tran_hba_flags & SCSI_HBA_TRAN_SCB) &&
364 (statuslen > DEFAULT_SCBLEN)) {
365 pktw->pcw_flags |= PCW_NEED_EXT_SCB;
366 in_pktp->pkt_scbp = kmem_alloc(statuslen, kf);
367 if (in_pktp->pkt_scbp == NULL)
368 goto fail4;
369 }
370 if ((*tranp->tran_setup_pkt) (in_pktp,
371 func, NULL) == -1) {
372 goto fail5;
373 }
374 if (cmdlen)
375 bzero((void *)in_pktp->pkt_cdbp, cmdlen);
376 if (pplen)
377 bzero((void *)in_pktp->pkt_private, pplen);
378 if (statuslen)
379 bzero((void *)in_pktp->pkt_scbp, statuslen);
380 } else
381 pktw = (struct scsi_pkt_cache_wrapper *)in_pktp;
382
383 if (bp && bp->b_bcount) {
384
385 int dma_flags = 0;
386
387 /*
388 * we need to transfer data, so we alloc dma resources
389 * for this packet
390 */
391 /*CONSTCOND*/
392 ASSERT(SLEEP_FUNC == DDI_DMA_SLEEP);
393 /*CONSTCOND*/
394 ASSERT(NULL_FUNC == DDI_DMA_DONTWAIT);
395
396 #if defined(_DMA_USES_PHYSADDR)
397 /*
398 * with an IOMMU we map everything, so we don't
399 * need to bother with this
400 */
401 if (tranp->tran_dma_attr.dma_attr_granular !=
402 pktw->pcw_granular) {
403
404 ddi_dma_free_handle(&in_pktp->pkt_handle);
405 if (ddi_dma_alloc_handle(tranp->tran_hba_dip,
406 &tranp->tran_dma_attr,
407 func, NULL,
408 &in_pktp->pkt_handle) != DDI_SUCCESS) {
409
410 in_pktp->pkt_handle = NULL;
411 return (NULL);
412 }
413 pktw->pcw_granular =
414 tranp->tran_dma_attr.dma_attr_granular;
415 }
416 #endif
417
418 if (in_pktp->pkt_numcookies == 0) {
419 pktw->pcw_bp = bp;
420 /*
421 * set dma flags; the "read" case must be first
422 * since B_WRITE isn't always be set for writes.
423 */
424 if (bp->b_flags & B_READ) {
425 dma_flags |= DDI_DMA_READ;
426 } else {
427 dma_flags |= DDI_DMA_WRITE;
428 }
429 if (flags & PKT_CONSISTENT)
430 dma_flags |= DDI_DMA_CONSISTENT;
431 if (flags & PKT_DMA_PARTIAL)
432 dma_flags |= DDI_DMA_PARTIAL;
433
434 #if defined(__sparc)
435 /*
436 * workaround for byte hole issue on psycho and
437 * schizo pre 2.1
438 */
439 if ((bp->b_flags & B_READ) && ((bp->b_flags &
440 (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
441 (((uintptr_t)bp->b_un.b_addr & 0x7) ||
442 ((uintptr_t)bp->b_bcount & 0x7))) {
443 dma_flags |= DDI_DMA_CONSISTENT;
444 }
445 #endif
446 if (!scsi_dma_buf_bind_attr(pktw, bp,
447 dma_flags, callback, callback_arg)) {
448 return (NULL);
449 } else {
450 pktw->pcw_flags |= PCW_BOUND;
451 }
452 }
453
454 #if defined(_DMA_USES_PHYSADDR)
455 if (!scsi_dmaget_attr(pktw)) {
456 scsi_dmafree_attr(in_pktp);
457 goto fail5;
458 }
459 #else
460 in_pktp->pkt_cookies = &pktw->pcw_cookie;
461 in_pktp->pkt_dma_len = pktw->pcw_cookie.dmac_size;
462 pktw->pcw_total_xfer += in_pktp->pkt_dma_len;
463 #endif
464 ASSERT(in_pktp->pkt_numcookies <=
465 tranp->tran_dma_attr.dma_attr_sgllen);
466 ASSERT(pktw->pcw_total_xfer <= bp->b_bcount);
467 in_pktp->pkt_resid = bp->b_bcount -
468 pktw->pcw_total_xfer;
469
470 ASSERT((in_pktp->pkt_resid % pktw->pcw_granular) ==
471 0);
472 } else {
473 /* !bp or no b_bcount */
474 in_pktp->pkt_resid = 0;
475 }
476 return (in_pktp);
477
478 fail5:
479 if (pktw->pcw_flags & PCW_NEED_EXT_SCB) {
480 kmem_free(in_pktp->pkt_scbp, statuslen);
481 in_pktp->pkt_scbp = (opaque_t)((char *)in_pktp +
482 tranp->tran_hba_len + DEFAULT_PRIVLEN +
483 sizeof (struct scsi_pkt_cache_wrapper));
484 if ((A_TO_TRAN(ap))->tran_hba_flags & SCSI_HBA_TRAN_CDB)
485 in_pktp->pkt_scbp = (opaque_t)((in_pktp->pkt_scbp) +
486 DEFAULT_CDBLEN);
487 in_pktp->pkt_scblen = 0;
488 }
489 fail4:
490 if (pktw->pcw_flags & PCW_NEED_EXT_TGT) {
491 kmem_free(in_pktp->pkt_private, pplen);
492 in_pktp->pkt_tgtlen = 0;
493 in_pktp->pkt_private = NULL;
494 }
495 fail3:
496 if (pktw->pcw_flags & PCW_NEED_EXT_CDB) {
497 kmem_free(in_pktp->pkt_cdbp, cmdlen);
498 in_pktp->pkt_cdbp = (opaque_t)((char *)in_pktp +
499 tranp->tran_hba_len +
500 sizeof (struct scsi_pkt_cache_wrapper));
501 in_pktp->pkt_cdblen = 0;
502 }
503 pktw->pcw_flags &=
504 ~(PCW_NEED_EXT_CDB|PCW_NEED_EXT_TGT|PCW_NEED_EXT_SCB);
505 fail2:
506 kmem_cache_free(tranp->tran_pkt_cache_ptr, pktw);
507 fail1:
508 if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
509 ddi_set_callback(callback, callback_arg,
510 &scsi_callback_id);
511 }
512
513 return (NULL);
514 }
515
516 void
scsi_free_cache_pkt(struct scsi_address * ap,struct scsi_pkt * pktp)517 scsi_free_cache_pkt(struct scsi_address *ap, struct scsi_pkt *pktp)
518 {
519 struct scsi_pkt_cache_wrapper *pktw;
520
521 (*A_TO_TRAN(ap)->tran_teardown_pkt)(pktp);
522 pktw = (struct scsi_pkt_cache_wrapper *)pktp;
523 if (pktw->pcw_flags & PCW_BOUND)
524 scsi_dmafree_attr(pktp);
525
526 /*
527 * if we allocated memory for anything that wouldn't fit, free
528 * the memory and restore the pointers
529 */
530 if (pktw->pcw_flags & PCW_NEED_EXT_SCB) {
531 kmem_free(pktp->pkt_scbp, pktp->pkt_scblen);
532 pktp->pkt_scbp = (opaque_t)((char *)pktp +
533 (A_TO_TRAN(ap))->tran_hba_len +
534 DEFAULT_PRIVLEN + sizeof (struct scsi_pkt_cache_wrapper));
535 if ((A_TO_TRAN(ap))->tran_hba_flags & SCSI_HBA_TRAN_CDB)
536 pktp->pkt_scbp = (opaque_t)((pktp->pkt_scbp) +
537 DEFAULT_CDBLEN);
538 pktp->pkt_scblen = 0;
539 }
540 if (pktw->pcw_flags & PCW_NEED_EXT_TGT) {
541 kmem_free(pktp->pkt_private, pktp->pkt_tgtlen);
542 pktp->pkt_tgtlen = 0;
543 pktp->pkt_private = NULL;
544 }
545 if (pktw->pcw_flags & PCW_NEED_EXT_CDB) {
546 kmem_free(pktp->pkt_cdbp, pktp->pkt_cdblen);
547 pktp->pkt_cdbp = (opaque_t)((char *)pktp +
548 (A_TO_TRAN(ap))->tran_hba_len +
549 sizeof (struct scsi_pkt_cache_wrapper));
550 pktp->pkt_cdblen = 0;
551 }
552 pktw->pcw_flags &=
553 ~(PCW_NEED_EXT_CDB|PCW_NEED_EXT_TGT|PCW_NEED_EXT_SCB);
554 kmem_cache_free(A_TO_TRAN(ap)->tran_pkt_cache_ptr, pktw);
555
556 if (scsi_callback_id != 0) {
557 ddi_run_callback(&scsi_callback_id);
558 }
559
560 }
561
562
563 struct scsi_pkt *
scsi_init_pkt(struct scsi_address * ap,struct scsi_pkt * in_pktp,struct buf * bp,int cmdlen,int statuslen,int pplen,int flags,int (* callback)(caddr_t),caddr_t callback_arg)564 scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *in_pktp,
565 struct buf *bp, int cmdlen, int statuslen, int pplen,
566 int flags, int (*callback)(caddr_t), caddr_t callback_arg)
567 {
568 struct scsi_pkt *pktp;
569 scsi_hba_tran_t *tranp = ap->a_hba_tran;
570 int (*func)(caddr_t);
571
572 TRACE_5(TR_FAC_SCSI_RES, TR_SCSI_INIT_PKT_START,
573 "scsi_init_pkt_start: addr %p in_pktp %p cmdlen %d statuslen %d pplen %d",
574 ap, in_pktp, cmdlen, statuslen, pplen);
575
576 #if defined(__x86)
577 if (flags & PKT_CONSISTENT_OLD) {
578 flags &= ~PKT_CONSISTENT_OLD;
579 flags |= PKT_CONSISTENT;
580 }
581 #endif
582
583 func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC;
584
585 pktp = (*tranp->tran_init_pkt) (ap, in_pktp, bp, cmdlen,
586 statuslen, pplen, flags, func, NULL);
587 if (pktp == NULL) {
588 if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
589 ddi_set_callback(callback, callback_arg,
590 &scsi_callback_id);
591 }
592 }
593
594 TRACE_1(TR_FAC_SCSI_RES, TR_SCSI_INIT_PKT_END,
595 "scsi_init_pkt_end: pktp %p", pktp);
596 return (pktp);
597 }
598
599 void
scsi_destroy_pkt(struct scsi_pkt * pkt)600 scsi_destroy_pkt(struct scsi_pkt *pkt)
601 {
602 struct scsi_address *ap = P_TO_ADDR(pkt);
603
604 TRACE_1(TR_FAC_SCSI_RES, TR_SCSI_DESTROY_PKT_START,
605 "scsi_destroy_pkt_start: pkt %p", pkt);
606
607 (*A_TO_TRAN(ap)->tran_destroy_pkt)(ap, pkt);
608
609 if (scsi_callback_id != 0) {
610 ddi_run_callback(&scsi_callback_id);
611 }
612
613 TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_DESTROY_PKT_END,
614 "scsi_destroy_pkt_end");
615 }
616
617
618 /*
619 * Generic Resource Allocation Routines
620 */
621
622 struct scsi_pkt *
scsi_resalloc(struct scsi_address * ap,int cmdlen,int statuslen,opaque_t dmatoken,int (* callback)())623 scsi_resalloc(struct scsi_address *ap, int cmdlen, int statuslen,
624 opaque_t dmatoken, int (*callback)())
625 {
626 register struct scsi_pkt *pkt;
627 register scsi_hba_tran_t *tranp = ap->a_hba_tran;
628 register int (*func)(caddr_t);
629
630 func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC;
631
632 pkt = (*tranp->tran_init_pkt) (ap, NULL, (struct buf *)dmatoken,
633 cmdlen, statuslen, 0, 0, func, NULL);
634 if (pkt == NULL) {
635 if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
636 ddi_set_callback(callback, NULL, &scsi_callback_id);
637 }
638 }
639
640 return (pkt);
641 }
642
643 struct scsi_pkt *
scsi_pktalloc(struct scsi_address * ap,int cmdlen,int statuslen,int (* callback)())644 scsi_pktalloc(struct scsi_address *ap, int cmdlen, int statuslen,
645 int (*callback)())
646 {
647 struct scsi_pkt *pkt;
648 struct scsi_hba_tran *tran = ap->a_hba_tran;
649 register int (*func)(caddr_t);
650
651 func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC;
652
653 pkt = (*tran->tran_init_pkt) (ap, NULL, NULL, cmdlen,
654 statuslen, 0, 0, func, NULL);
655 if (pkt == NULL) {
656 if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
657 ddi_set_callback(callback, NULL, &scsi_callback_id);
658 }
659 }
660
661 return (pkt);
662 }
663
664 struct scsi_pkt *
scsi_dmaget(struct scsi_pkt * pkt,opaque_t dmatoken,int (* callback)())665 scsi_dmaget(struct scsi_pkt *pkt, opaque_t dmatoken, int (*callback)())
666 {
667 struct scsi_pkt *new_pkt;
668 register int (*func)(caddr_t);
669
670 func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC;
671
672 new_pkt = (*P_TO_TRAN(pkt)->tran_init_pkt) (&pkt->pkt_address,
673 pkt, (struct buf *)dmatoken,
674 0, 0, 0, 0, func, NULL);
675 ASSERT(new_pkt == pkt || new_pkt == NULL);
676 if (new_pkt == NULL) {
677 if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
678 ddi_set_callback(callback, NULL, &scsi_callback_id);
679 }
680 }
681
682 return (new_pkt);
683 }
684
685
686 /*
687 * Generic Resource Deallocation Routines
688 */
689
690 void
scsi_dmafree(struct scsi_pkt * pkt)691 scsi_dmafree(struct scsi_pkt *pkt)
692 {
693 register struct scsi_address *ap = P_TO_ADDR(pkt);
694
695 (*A_TO_TRAN(ap)->tran_dmafree)(ap, pkt);
696
697 if (scsi_callback_id != 0) {
698 ddi_run_callback(&scsi_callback_id);
699 }
700 }
701
702 /*ARGSUSED*/
703 void
scsi_cache_dmafree(struct scsi_address * ap,struct scsi_pkt * pkt)704 scsi_cache_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
705 {
706 ASSERT(pkt->pkt_numcookies == 0 ||
707 ((struct scsi_pkt_cache_wrapper *)pkt)->pcw_flags & PCW_BOUND);
708 ASSERT(pkt->pkt_handle != NULL);
709 scsi_dmafree_attr(pkt);
710
711 if (scsi_callback_id != 0) {
712 ddi_run_callback(&scsi_callback_id);
713 }
714 }
715
716 void
scsi_sync_pkt(struct scsi_pkt * pkt)717 scsi_sync_pkt(struct scsi_pkt *pkt)
718 {
719 register struct scsi_address *ap = P_TO_ADDR(pkt);
720
721 if (pkt->pkt_state & STATE_XFERRED_DATA)
722 (*A_TO_TRAN(ap)->tran_sync_pkt)(ap, pkt);
723 }
724
725 /*ARGSUSED*/
726 void
scsi_sync_cache_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)727 scsi_sync_cache_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
728 {
729 if (pkt->pkt_handle &&
730 (pkt->pkt_dma_flags & (DDI_DMA_WRITE | DDI_DMA_READ))) {
731 (void) ddi_dma_sync(pkt->pkt_handle,
732 pkt->pkt_dma_offset, pkt->pkt_dma_len,
733 (pkt->pkt_dma_flags & DDI_DMA_WRITE) ?
734 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
735 }
736 }
737
738 void
scsi_resfree(struct scsi_pkt * pkt)739 scsi_resfree(struct scsi_pkt *pkt)
740 {
741 register struct scsi_address *ap = P_TO_ADDR(pkt);
742 (*A_TO_TRAN(ap)->tran_destroy_pkt)(ap, pkt);
743
744 if (scsi_callback_id != 0) {
745 ddi_run_callback(&scsi_callback_id);
746 }
747 }
748