xref: /titanic_51/usr/src/uts/common/io/comstar/port/qlt/qlt_dma.c (revision 4558d122136f151d62acbbc02ddb42df89a5ef66)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 QLogic Corporation.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
29  */
30 
31 #include <sys/conf.h>
32 #include <sys/ddi.h>
33 #include <sys/sunddi.h>
34 #include <sys/modctl.h>
35 
36 #include <sys/stmf_defines.h>
37 #include <sys/fct_defines.h>
38 #include <sys/stmf.h>
39 #include <sys/portif.h>
40 #include <sys/fct.h>
41 
42 #include "qlt.h"
43 #include "qlt_dma.h"
44 
45 /*
46  *  Local Function Prototypes.
47  */
48 static void
49 qlt_dma_free_handles(qlt_state_t *qlt, qlt_dma_handle_t *first_handle);
50 
51 #define	BUF_COUNT_2K		2048
52 #define	BUF_COUNT_8K		512
53 #define	BUF_COUNT_64K		256
54 #define	BUF_COUNT_128K		1024
55 #define	BUF_COUNT_256K		8
56 
57 #define	QLT_DMEM_MAX_BUF_SIZE	(4 * 65536)
58 #define	QLT_DMEM_NBUCKETS	5
59 static qlt_dmem_bucket_t bucket2K	= { 2048, BUF_COUNT_2K },
60 			bucket8K	= { 8192, BUF_COUNT_8K },
61 			bucket64K	= { 65536, BUF_COUNT_64K },
62 			bucket128k	= { (2 * 65536), BUF_COUNT_128K },
63 			bucket256k	= { (4 * 65536), BUF_COUNT_256K };
64 
65 static qlt_dmem_bucket_t *dmem_buckets[] = { &bucket2K, &bucket8K,
66 			&bucket64K, &bucket128k, &bucket256k, NULL };
67 static ddi_device_acc_attr_t acc;
68 static ddi_dma_attr_t qlt_scsi_dma_attr = {
69 	DMA_ATTR_V0,		/* dma_attr_version */
70 	0,			/* low DMA address range */
71 	0xffffffffffffffff,	/* high DMA address range */
72 	0xffffffff,		/* DMA counter register */
73 	8192,			/* DMA address alignment */
74 	0xff,			/* DMA burstsizes */
75 	1,			/* min effective DMA size */
76 	0xffffffff,		/* max DMA xfer size */
77 	0xffffffff,		/* segment boundary */
78 	1,			/* s/g list length */
79 	1,			/* granularity of device */
80 	0			/* DMA transfer flags */
81 };
82 
83 fct_status_t
84 qlt_dmem_init(qlt_state_t *qlt)
85 {
86 	qlt_dmem_bucket_t	*p;
87 	qlt_dmem_bctl_t		*bctl, *bc;
88 	qlt_dmem_bctl_t		*prev;
89 	int			ndx, i;
90 	uint32_t		total_mem;
91 	uint8_t			*addr;
92 	uint8_t			*host_addr;
93 	uint64_t		dev_addr;
94 	ddi_dma_cookie_t	cookie;
95 	uint32_t		ncookie;
96 	uint32_t		bsize;
97 	size_t			len;
98 
99 	if (qlt->qlt_bucketcnt[0] != 0) {
100 		bucket2K.dmem_nbufs = qlt->qlt_bucketcnt[0];
101 	}
102 	if (qlt->qlt_bucketcnt[1] != 0) {
103 		bucket8K.dmem_nbufs = qlt->qlt_bucketcnt[1];
104 	}
105 	if (qlt->qlt_bucketcnt[2] != 0) {
106 		bucket64K.dmem_nbufs = qlt->qlt_bucketcnt[2];
107 	}
108 	if (qlt->qlt_bucketcnt[3] != 0) {
109 		bucket128k.dmem_nbufs = qlt->qlt_bucketcnt[3];
110 	}
111 	if (qlt->qlt_bucketcnt[4] != 0) {
112 		bucket256k.dmem_nbufs = qlt->qlt_bucketcnt[4];
113 	}
114 
115 	bsize = sizeof (dmem_buckets);
116 	ndx = (int)(bsize / sizeof (void *));
117 	/*
118 	 * The reason it is ndx - 1 everywhere is becasue the last bucket
119 	 * pointer is NULL.
120 	 */
121 	qlt->dmem_buckets = (qlt_dmem_bucket_t **)kmem_zalloc(bsize +
122 	    ((ndx - 1) * (int)sizeof (qlt_dmem_bucket_t)), KM_SLEEP);
123 	for (i = 0; i < (ndx - 1); i++) {
124 		qlt->dmem_buckets[i] = (qlt_dmem_bucket_t *)
125 		    ((uint8_t *)qlt->dmem_buckets + bsize +
126 		    (i * (int)sizeof (qlt_dmem_bucket_t)));
127 		bcopy(dmem_buckets[i], qlt->dmem_buckets[i],
128 		    sizeof (qlt_dmem_bucket_t));
129 	}
130 	bzero(&acc, sizeof (acc));
131 	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
132 	acc.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
133 	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
134 	for (ndx = 0; (p = qlt->dmem_buckets[ndx]) != NULL; ndx++) {
135 		bctl = (qlt_dmem_bctl_t *)kmem_zalloc(p->dmem_nbufs *
136 		    sizeof (qlt_dmem_bctl_t), KM_NOSLEEP);
137 		if (bctl == NULL) {
138 			EL(qlt, "bctl==NULL\n");
139 			goto alloc_bctl_failed;
140 		}
141 		p->dmem_bctls_mem = bctl;
142 		mutex_init(&p->dmem_lock, NULL, MUTEX_DRIVER, NULL);
143 		if ((i = ddi_dma_alloc_handle(qlt->dip, &qlt_scsi_dma_attr,
144 		    DDI_DMA_SLEEP, 0, &p->dmem_dma_handle)) != DDI_SUCCESS) {
145 			EL(qlt, "ddi_dma_alloc_handle status=%xh\n", i);
146 			goto alloc_handle_failed;
147 		}
148 
149 		total_mem = p->dmem_buf_size * p->dmem_nbufs;
150 
151 		if ((i = ddi_dma_mem_alloc(p->dmem_dma_handle, total_mem, &acc,
152 		    DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0, (caddr_t *)&addr,
153 		    &len, &p->dmem_acc_handle)) != DDI_SUCCESS) {
154 			EL(qlt, "ddi_dma_mem_alloc status=%xh\n", i);
155 			goto mem_alloc_failed;
156 		}
157 
158 		if ((i = ddi_dma_addr_bind_handle(p->dmem_dma_handle, NULL,
159 		    (caddr_t)addr, total_mem, DDI_DMA_RDWR | DDI_DMA_STREAMING,
160 		    DDI_DMA_DONTWAIT, 0, &cookie, &ncookie)) != DDI_SUCCESS) {
161 			EL(qlt, "ddi_dma_addr_bind_handle status=%xh\n", i);
162 			goto addr_bind_handle_failed;
163 		}
164 		if (ncookie != 1) {
165 			EL(qlt, "ncookie=%d\n", ncookie);
166 			goto dmem_init_failed;
167 		}
168 
169 		p->dmem_host_addr = host_addr = addr;
170 		p->dmem_dev_addr = dev_addr = (uint64_t)cookie.dmac_laddress;
171 		bsize = p->dmem_buf_size;
172 		p->dmem_bctl_free_list = bctl;
173 		p->dmem_nbufs_free = p->dmem_nbufs;
174 		for (i = 0; i < p->dmem_nbufs; i++) {
175 			stmf_data_buf_t	*db;
176 			prev = bctl;
177 			bctl->bctl_bucket = p;
178 			bctl->bctl_buf = db = stmf_alloc(STMF_STRUCT_DATA_BUF,
179 			    0, 0);
180 			db->db_port_private = bctl;
181 			db->db_sglist[0].seg_addr = host_addr;
182 			bctl->bctl_dev_addr = dev_addr;
183 			db->db_sglist[0].seg_length = db->db_buf_size = bsize;
184 			db->db_sglist_length = 1;
185 			host_addr += bsize;
186 			dev_addr += bsize;
187 			bctl++;
188 			prev->bctl_next = bctl;
189 		}
190 		prev->bctl_next = NULL;
191 	}
192 
193 	return (QLT_SUCCESS);
194 
195 dmem_failure_loop:;
196 	bc = bctl;
197 	while (bc) {
198 		stmf_free(bc->bctl_buf);
199 		bc = bc->bctl_next;
200 	}
201 dmem_init_failed:;
202 	(void) ddi_dma_unbind_handle(p->dmem_dma_handle);
203 addr_bind_handle_failed:;
204 	ddi_dma_mem_free(&p->dmem_acc_handle);
205 mem_alloc_failed:;
206 	ddi_dma_free_handle(&p->dmem_dma_handle);
207 alloc_handle_failed:;
208 	kmem_free(p->dmem_bctls_mem, p->dmem_nbufs * sizeof (qlt_dmem_bctl_t));
209 	mutex_destroy(&p->dmem_lock);
210 alloc_bctl_failed:;
211 	if (--ndx >= 0) {
212 		p = qlt->dmem_buckets[ndx];
213 		bctl = p->dmem_bctl_free_list;
214 		goto dmem_failure_loop;
215 	}
216 	kmem_free(qlt->dmem_buckets, sizeof (dmem_buckets) +
217 	    ((sizeof (dmem_buckets)/sizeof (void *))
218 	    *sizeof (qlt_dmem_bucket_t)));
219 	qlt->dmem_buckets = NULL;
220 
221 	return (QLT_FAILURE);
222 }
223 
224 void
225 qlt_dma_handle_pool_init(qlt_state_t *qlt)
226 {
227 	qlt_dma_handle_pool_t *pool;
228 
229 	pool = kmem_zalloc(sizeof (*pool), KM_SLEEP);
230 	mutex_init(&pool->pool_lock, NULL, MUTEX_DRIVER, NULL);
231 	qlt->qlt_dma_handle_pool = pool;
232 }
233 
234 void
235 qlt_dma_handle_pool_fini(qlt_state_t *qlt)
236 {
237 	qlt_dma_handle_pool_t	*pool;
238 	qlt_dma_handle_t	*handle, *next_handle;
239 
240 	pool = qlt->qlt_dma_handle_pool;
241 	mutex_enter(&pool->pool_lock);
242 	/*
243 	 * XXX Need to wait for free == total elements
244 	 * XXX Not sure how other driver shutdown stuff is done.
245 	 */
246 	ASSERT(pool->num_free == pool->num_total);
247 	if (pool->num_free != pool->num_total)
248 		cmn_err(CE_WARN,
249 		    "num_free %d != num_total %d\n",
250 		    pool->num_free, pool->num_total);
251 	handle = pool->free_list;
252 	while (handle) {
253 		next_handle = handle->next;
254 		kmem_free(handle, sizeof (*handle));
255 		handle = next_handle;
256 	}
257 	qlt->qlt_dma_handle_pool = NULL;
258 	mutex_destroy(&pool->pool_lock);
259 	kmem_free(pool, sizeof (*pool));
260 }
261 
262 void
263 qlt_dmem_fini(qlt_state_t *qlt)
264 {
265 	qlt_dmem_bucket_t *p;
266 	qlt_dmem_bctl_t *bctl;
267 	int ndx;
268 
269 	for (ndx = 0; (p = qlt->dmem_buckets[ndx]) != NULL; ndx++) {
270 		bctl = p->dmem_bctl_free_list;
271 		while (bctl) {
272 			stmf_free(bctl->bctl_buf);
273 			bctl = bctl->bctl_next;
274 		}
275 		bctl = p->dmem_bctl_free_list;
276 		(void) ddi_dma_unbind_handle(p->dmem_dma_handle);
277 		ddi_dma_mem_free(&p->dmem_acc_handle);
278 		ddi_dma_free_handle(&p->dmem_dma_handle);
279 		kmem_free(p->dmem_bctls_mem,
280 		    p->dmem_nbufs * sizeof (qlt_dmem_bctl_t));
281 		mutex_destroy(&p->dmem_lock);
282 	}
283 	kmem_free(qlt->dmem_buckets, sizeof (dmem_buckets) +
284 	    (((sizeof (dmem_buckets)/sizeof (void *))-1)*
285 	    sizeof (qlt_dmem_bucket_t)));
286 	qlt->dmem_buckets = NULL;
287 }
288 
289 stmf_data_buf_t *
290 qlt_dmem_alloc(fct_local_port_t *port, uint32_t size, uint32_t *pminsize,
291     uint32_t flags)
292 {
293 	return (qlt_i_dmem_alloc((qlt_state_t *)
294 	    port->port_fca_private, size, pminsize,
295 	    flags));
296 }
297 
298 /* ARGSUSED */
299 stmf_data_buf_t *
300 qlt_i_dmem_alloc(qlt_state_t *qlt, uint32_t size, uint32_t *pminsize,
301     uint32_t flags)
302 {
303 	qlt_dmem_bucket_t	*p;
304 	qlt_dmem_bctl_t 	*bctl;
305 	int			i;
306 	uint32_t		size_possible = 0;
307 
308 	if (size > QLT_DMEM_MAX_BUF_SIZE) {
309 		goto qlt_try_partial_alloc;
310 	}
311 
312 	/* 1st try to do a full allocation */
313 	for (i = 0; (p = qlt->dmem_buckets[i]) != NULL; i++) {
314 		if (p->dmem_buf_size >= size) {
315 			if (p->dmem_nbufs_free) {
316 				mutex_enter(&p->dmem_lock);
317 				bctl = p->dmem_bctl_free_list;
318 				if (bctl == NULL) {
319 					mutex_exit(&p->dmem_lock);
320 					continue;
321 				}
322 				p->dmem_bctl_free_list =
323 				    bctl->bctl_next;
324 				p->dmem_nbufs_free--;
325 				qlt->qlt_bufref[i]++;
326 				mutex_exit(&p->dmem_lock);
327 				bctl->bctl_buf->db_data_size = size;
328 				return (bctl->bctl_buf);
329 			} else {
330 				qlt->qlt_bumpbucket++;
331 			}
332 		}
333 	}
334 
335 qlt_try_partial_alloc:
336 
337 	qlt->qlt_pmintry++;
338 
339 	/* Now go from high to low */
340 	for (i = QLT_DMEM_NBUCKETS - 1; i >= 0; i--) {
341 		p = qlt->dmem_buckets[i];
342 		if (p->dmem_nbufs_free == 0)
343 			continue;
344 		if (!size_possible) {
345 			size_possible = p->dmem_buf_size;
346 		}
347 		if (*pminsize > p->dmem_buf_size) {
348 			/* At this point we know the request is failing. */
349 			if (size_possible) {
350 				/*
351 				 * This caller is asking too much. We already
352 				 * know what we can give, so get out.
353 				 */
354 				break;
355 			} else {
356 				/*
357 				 * Lets continue to find out and tell what
358 				 * we can give.
359 				 */
360 				continue;
361 			}
362 		}
363 		mutex_enter(&p->dmem_lock);
364 		if (*pminsize <= p->dmem_buf_size) {
365 			bctl = p->dmem_bctl_free_list;
366 			if (bctl == NULL) {
367 				/* Someone took it. */
368 				size_possible = 0;
369 				mutex_exit(&p->dmem_lock);
370 				continue;
371 			}
372 			p->dmem_bctl_free_list = bctl->bctl_next;
373 			p->dmem_nbufs_free--;
374 			mutex_exit(&p->dmem_lock);
375 			bctl->bctl_buf->db_data_size = p->dmem_buf_size;
376 			qlt->qlt_pmin_ok++;
377 			return (bctl->bctl_buf);
378 		}
379 	}
380 
381 	*pminsize = size_possible;
382 
383 	return (NULL);
384 }
385 
386 /* ARGSUSED */
387 void
388 qlt_i_dmem_free(qlt_state_t *qlt, stmf_data_buf_t *dbuf)
389 {
390 	qlt_dmem_free(0, dbuf);
391 }
392 
393 /* ARGSUSED */
394 void
395 qlt_dmem_free(fct_dbuf_store_t *fds, stmf_data_buf_t *dbuf)
396 {
397 	qlt_dmem_bctl_t		*bctl;
398 	qlt_dmem_bucket_t	*p;
399 
400 	ASSERT((dbuf->db_flags & DB_LU_DATA_BUF) == 0);
401 
402 	bctl = (qlt_dmem_bctl_t *)dbuf->db_port_private;
403 	p = bctl->bctl_bucket;
404 	mutex_enter(&p->dmem_lock);
405 	bctl->bctl_next = p->dmem_bctl_free_list;
406 	p->dmem_bctl_free_list = bctl;
407 	p->dmem_nbufs_free++;
408 	mutex_exit(&p->dmem_lock);
409 }
410 
411 void
412 qlt_dmem_dma_sync(stmf_data_buf_t *dbuf, uint_t sync_type)
413 {
414 	qlt_dmem_bctl_t		*bctl;
415 	qlt_dma_sgl_t		*qsgl;
416 	qlt_dmem_bucket_t	*p;
417 	qlt_dma_handle_t	*th;
418 	int			rv;
419 
420 	if (dbuf->db_flags & DB_LU_DATA_BUF) {
421 		/*
422 		 * go through ddi handle list
423 		 */
424 		qsgl = (qlt_dma_sgl_t *)dbuf->db_port_private;
425 		th = qsgl->handle_list;
426 		while (th) {
427 			rv = ddi_dma_sync(th->dma_handle,
428 			    0, 0, sync_type);
429 			if (rv != DDI_SUCCESS) {
430 				cmn_err(CE_WARN, "ddi_dma_sync FAILED\n");
431 			}
432 			th = th->next;
433 		}
434 	} else {
435 		bctl = (qlt_dmem_bctl_t *)dbuf->db_port_private;
436 		p = bctl->bctl_bucket;
437 		(void) ddi_dma_sync(p->dmem_dma_handle, (off_t)
438 		    (bctl->bctl_dev_addr - p->dmem_dev_addr),
439 		    dbuf->db_data_size, sync_type);
440 	}
441 }
442 
443 /*
444  * A very lite version of ddi_dma_addr_bind_handle()
445  */
446 uint64_t
447 qlt_ddi_vtop(caddr_t vaddr)
448 {
449 	uint64_t offset, paddr;
450 	pfn_t pfn;
451 
452 	pfn = hat_getpfnum(kas.a_hat, vaddr);
453 	ASSERT(pfn != PFN_INVALID && pfn != PFN_SUSPENDED);
454 	offset = ((uintptr_t)vaddr) & MMU_PAGEOFFSET;
455 	paddr = mmu_ptob(pfn);
456 	return (paddr+offset);
457 }
458 
459 static ddi_dma_attr_t 	qlt_sgl_dma_attr = {
460 	DMA_ATTR_V0,		/* dma_attr_version */
461 	0,				/* low DMA address range */
462 	0xffffffffffffffff,		/* high DMA address range */
463 	0xffffffff,			/* DMA counter register */
464 	64,				/* DMA address alignment */
465 	0xff,			/* DMA burstsizes */
466 	1,				/* min effective DMA size */
467 	0xffffffff,			/* max DMA xfer size */
468 	0xffffffff,			/* segment boundary */
469 	QLT_DMA_SG_LIST_LENGTH,	/* s/g list length */
470 	1,				/* granularity of device */
471 	0				/* DMA transfer flags */
472 };
473 
474 /*
475  * Allocate a qlt_dma_handle container and fill it with a ddi_dma_handle
476  */
477 static qlt_dma_handle_t *
478 qlt_dma_alloc_handle(qlt_state_t *qlt)
479 {
480 	ddi_dma_handle_t ddi_handle;
481 	qlt_dma_handle_t *qlt_handle;
482 	int rv;
483 
484 	rv = ddi_dma_alloc_handle(qlt->dip, &qlt_sgl_dma_attr,
485 	    DDI_DMA_SLEEP, 0, &ddi_handle);
486 	if (rv != DDI_SUCCESS) {
487 		EL(qlt, "ddi_dma_alloc_handle status=%xh\n", rv);
488 		return (NULL);
489 	}
490 	qlt_handle = kmem_zalloc(sizeof (qlt_dma_handle_t), KM_SLEEP);
491 	qlt_handle->dma_handle = ddi_handle;
492 	return (qlt_handle);
493 }
494 
495 /*
496  * Allocate a list of qlt_dma_handle containers from the free list
497  */
498 static qlt_dma_handle_t *
499 qlt_dma_alloc_handle_list(qlt_state_t *qlt, int handle_count)
500 {
501 	qlt_dma_handle_pool_t	*pool;
502 	qlt_dma_handle_t	*tmp_handle, *first_handle, *last_handle;
503 	int i;
504 
505 	/*
506 	 * Make sure the free list can satisfy the request.
507 	 * Once the free list is primed, it should satisfy most requests.
508 	 * XXX Should there be a limit on pool size?
509 	 */
510 	pool = qlt->qlt_dma_handle_pool;
511 	mutex_enter(&pool->pool_lock);
512 	while (handle_count > pool->num_free) {
513 		mutex_exit(&pool->pool_lock);
514 		if ((tmp_handle = qlt_dma_alloc_handle(qlt)) == NULL)
515 			return (NULL);
516 		mutex_enter(&pool->pool_lock);
517 		tmp_handle->next = pool->free_list;
518 		pool->free_list = tmp_handle;
519 		pool->num_free++;
520 		pool->num_total++;
521 	}
522 
523 	/*
524 	 * The free list lock is held and the list is large enough to
525 	 * satisfy this request. Run down the freelist and snip off
526 	 * the number of elements needed for this request.
527 	 */
528 	first_handle = pool->free_list;
529 	tmp_handle = first_handle;
530 	for (i = 0; i < handle_count; i++) {
531 		last_handle = tmp_handle;
532 		tmp_handle = tmp_handle->next;
533 	}
534 	pool->free_list = tmp_handle;
535 	pool->num_free -= handle_count;
536 	mutex_exit(&pool->pool_lock);
537 	last_handle->next = NULL;	/* sanity */
538 	return (first_handle);
539 }
540 
541 /*
542  * Return a list of qlt_dma_handle containers to the free list.
543  */
544 static void
545 qlt_dma_free_handles(qlt_state_t *qlt, qlt_dma_handle_t *first_handle)
546 {
547 	qlt_dma_handle_pool_t *pool;
548 	qlt_dma_handle_t *tmp_handle, *last_handle;
549 	int rv, handle_count;
550 
551 	/*
552 	 * Traverse the list and unbind the handles
553 	 */
554 	ASSERT(first_handle);
555 	tmp_handle = first_handle;
556 	handle_count = 0;
557 	while (tmp_handle != NULL) {
558 		last_handle = tmp_handle;
559 		/*
560 		 * If the handle is bound, unbind the handle so it can be
561 		 * reused. It may not be bound if there was a bind failure.
562 		 */
563 		if (tmp_handle->num_cookies != 0) {
564 			rv = ddi_dma_unbind_handle(tmp_handle->dma_handle);
565 			ASSERT(rv == DDI_SUCCESS);
566 			tmp_handle->num_cookies = 0;
567 			tmp_handle->num_cookies_fetched = 0;
568 		}
569 		tmp_handle = tmp_handle->next;
570 		handle_count++;
571 	}
572 	/*
573 	 * Insert this list into the free list
574 	 */
575 	pool = qlt->qlt_dma_handle_pool;
576 	mutex_enter(&pool->pool_lock);
577 	last_handle->next = pool->free_list;
578 	pool->free_list = first_handle;
579 	pool->num_free += handle_count;
580 	mutex_exit(&pool->pool_lock);
581 }
582 
583 /*
584  * cookies produced by mapping this dbuf
585  */
586 uint16_t
587 qlt_get_cookie_count(stmf_data_buf_t *dbuf)
588 {
589 	qlt_dma_sgl_t *qsgl = dbuf->db_port_private;
590 
591 	ASSERT(dbuf->db_flags & DB_LU_DATA_BUF);
592 	return (qsgl->cookie_count);
593 }
594 
595 ddi_dma_cookie_t
596 *qlt_get_cookie_array(stmf_data_buf_t *dbuf)
597 {
598 	qlt_dma_sgl_t *qsgl = dbuf->db_port_private;
599 
600 	ASSERT(dbuf->db_flags & DB_LU_DATA_BUF);
601 
602 	if (qsgl->cookie_prefetched)
603 		return (&qsgl->cookie[0]);
604 	else
605 		return (NULL);
606 }
607 
608 /*
609  * Wrapper around ddi_dma_nextcookie that hides the ddi_dma_handle usage.
610  */
611 void
612 qlt_ddi_dma_nextcookie(stmf_data_buf_t *dbuf, ddi_dma_cookie_t *cookiep)
613 {
614 	qlt_dma_sgl_t *qsgl = dbuf->db_port_private;
615 
616 	ASSERT(dbuf->db_flags & DB_LU_DATA_BUF);
617 
618 	if (qsgl->cookie_prefetched) {
619 		ASSERT(qsgl->cookie_next_fetch < qsgl->cookie_count);
620 		*cookiep = qsgl->cookie[qsgl->cookie_next_fetch++];
621 	} else {
622 		qlt_dma_handle_t *fetch;
623 		qlt_dma_handle_t *FETCH_DONE = (qlt_dma_handle_t *)0xbad;
624 
625 		ASSERT(qsgl->handle_list != NULL);
626 		ASSERT(qsgl->handle_next_fetch != FETCH_DONE);
627 
628 		fetch = qsgl->handle_next_fetch;
629 		if (fetch->num_cookies_fetched == 0) {
630 			*cookiep = fetch->first_cookie;
631 		} else {
632 			ddi_dma_nextcookie(fetch->dma_handle, cookiep);
633 		}
634 		if (++fetch->num_cookies_fetched == fetch->num_cookies) {
635 			if (fetch->next == NULL)
636 				qsgl->handle_next_fetch = FETCH_DONE;
637 			else
638 				qsgl->handle_next_fetch = fetch->next;
639 		}
640 	}
641 }
642 
643 /*
644  * Set this flag to fetch the DDI dma cookies from the handles here and
645  * store them in the port private area of the dbuf. This will allow
646  * faster access to the cookies in qlt_xfer_scsi_data() at the expense of
647  * an extra copy. If the qlt->req_lock is hot, this may help.
648  */
649 int qlt_sgl_prefetch = 0;
650 
651 /*ARGSUSED*/
652 stmf_status_t
653 qlt_dma_setup_dbuf(fct_local_port_t *port, stmf_data_buf_t *dbuf,
654     uint32_t flags)
655 {
656 	qlt_state_t		*qlt = port->port_fca_private;
657 	qlt_dma_sgl_t		*qsgl;
658 	struct stmf_sglist_ent	*sglp;
659 	qlt_dma_handle_t	*handle_list, *th;
660 	int			i, rv;
661 	ddi_dma_cookie_t	*cookie_p;
662 	int			cookie_count, numbufs;
663 	int			prefetch;
664 	size_t			qsize;
665 
666 	/*
667 	 * psuedo code:
668 	 * get dma handle list from cache - one per sglist entry
669 	 * foreach sglist entry
670 	 *	bind dma handle to sglist vaddr
671 	 * allocate space for DMA state to store in db_port_private
672 	 * fill in port private object
673 	 * if prefetching
674 	 *	move all dma cookies into db_port_private
675 	 */
676 	dbuf->db_port_private = NULL;
677 	numbufs = dbuf->db_sglist_length;
678 	handle_list = qlt_dma_alloc_handle_list(qlt, numbufs);
679 	if (handle_list == NULL) {
680 		EL(qlt, "handle_list==NULL\n");
681 		return (STMF_FAILURE);
682 	}
683 	/*
684 	 * Loop through sglist and bind each entry to a handle
685 	 */
686 	th = handle_list;
687 	sglp = &dbuf->db_sglist[0];
688 	cookie_count = 0;
689 	for (i = 0; i < numbufs; i++, sglp++) {
690 
691 		/*
692 		 * Bind this sgl entry to a DDI dma handle
693 		 */
694 		if ((rv = ddi_dma_addr_bind_handle(
695 		    th->dma_handle,
696 		    NULL,
697 		    (caddr_t)(sglp->seg_addr),
698 		    (size_t)sglp->seg_length,
699 		    DDI_DMA_RDWR | DDI_DMA_STREAMING,
700 		    DDI_DMA_DONTWAIT,
701 		    NULL,
702 		    &th->first_cookie,
703 		    &th->num_cookies)) != DDI_DMA_MAPPED) {
704 			cmn_err(CE_NOTE, "ddi_dma_addr_bind_handle %d", rv);
705 			qlt_dma_free_handles(qlt, handle_list);
706 			return (STMF_FAILURE);
707 		}
708 
709 		/*
710 		 * Add to total cookie count
711 		 */
712 		cookie_count += th->num_cookies;
713 		if (cookie_count > QLT_DMA_SG_LIST_LENGTH) {
714 			/*
715 			 * Request exceeds HBA limit
716 			 */
717 			qlt_dma_free_handles(qlt, handle_list);
718 			return (STMF_FAILURE);
719 		}
720 		/* move to next ddi_dma_handle */
721 		th = th->next;
722 	}
723 
724 	/*
725 	 * Allocate our port private object for DMA mapping state.
726 	 */
727 	prefetch =  qlt_sgl_prefetch;
728 	qsize = sizeof (qlt_dma_sgl_t);
729 	if (prefetch) {
730 		/* one extra ddi_dma_cookie allocated for alignment padding */
731 		qsize += cookie_count * sizeof (ddi_dma_cookie_t);
732 	}
733 	qsgl = kmem_alloc(qsize, KM_SLEEP);
734 	/*
735 	 * Fill in the sgl
736 	 */
737 	dbuf->db_port_private = qsgl;
738 	qsgl->qsize = qsize;
739 	qsgl->handle_count = dbuf->db_sglist_length;
740 	qsgl->cookie_prefetched = prefetch;
741 	qsgl->cookie_count = cookie_count;
742 	qsgl->cookie_next_fetch = 0;
743 	qsgl->handle_list = handle_list;
744 	qsgl->handle_next_fetch = handle_list;
745 	if (prefetch) {
746 		/*
747 		 * traverse handle list and move cookies to db_port_private
748 		 */
749 		th = handle_list;
750 		cookie_p = &qsgl->cookie[0];
751 		for (i = 0; i < numbufs; i++) {
752 			uint_t cc = th->num_cookies;
753 
754 			*cookie_p++ = th->first_cookie;
755 			while (--cc > 0) {
756 				ddi_dma_nextcookie(th->dma_handle, cookie_p++);
757 			}
758 			th->num_cookies_fetched = th->num_cookies;
759 			th = th->next;
760 		}
761 	}
762 
763 	return (STMF_SUCCESS);
764 }
765 
766 void
767 qlt_dma_teardown_dbuf(fct_dbuf_store_t *fds, stmf_data_buf_t *dbuf)
768 {
769 	qlt_state_t		*qlt = fds->fds_fca_private;
770 	qlt_dma_sgl_t		*qsgl = dbuf->db_port_private;
771 
772 	ASSERT(qlt);
773 	ASSERT(qsgl);
774 	ASSERT(dbuf->db_flags & DB_LU_DATA_BUF);
775 
776 	/*
777 	 * unbind and free the dma handles
778 	 */
779 	if (qsgl->handle_list) {
780 		/* go through ddi handle list */
781 		qlt_dma_free_handles(qlt, qsgl->handle_list);
782 	}
783 	kmem_free(qsgl, qsgl->qsize);
784 }
785 
786 uint8_t
787 qlt_get_iocb_count(uint32_t cookie_count)
788 {
789 	uint32_t	cnt, cont_segs;
790 	uint8_t		iocb_count;
791 
792 	iocb_count = 1;
793 	cnt = CMD7_2400_DATA_SEGMENTS;
794 	cont_segs = CONT_A64_DATA_SEGMENTS;
795 
796 	if (cookie_count > cnt) {
797 		cnt = cookie_count - cnt;
798 		iocb_count = (uint8_t)(iocb_count + cnt / cont_segs);
799 		if (cnt % cont_segs) {
800 			iocb_count++;
801 		}
802 	}
803 	return (iocb_count);
804 }
805