1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009-2015 QLogic Corporation. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved. 29 */ 30 31 #include <sys/conf.h> 32 #include <sys/ddi.h> 33 #include <sys/sunddi.h> 34 #include <sys/modctl.h> 35 36 #include <sys/stmf_defines.h> 37 #include <sys/fct_defines.h> 38 #include <sys/stmf.h> 39 #include <sys/portif.h> 40 #include <sys/fct.h> 41 42 #include "qlt.h" 43 #include "qlt_dma.h" 44 45 /* 46 * Local Function Prototypes. 47 */ 48 static void 49 qlt_dma_free_handles(qlt_state_t *qlt, qlt_dma_handle_t *first_handle); 50 51 #define BUF_COUNT_2K 2048 52 #define BUF_COUNT_8K 512 53 #define BUF_COUNT_64K 256 54 #define BUF_COUNT_128K 1024 55 #define BUF_COUNT_256K 8 56 57 #define QLT_DMEM_MAX_BUF_SIZE (4 * 65536) 58 #define QLT_DMEM_NBUCKETS 5 59 static qlt_dmem_bucket_t bucket2K = { 2048, BUF_COUNT_2K }, 60 bucket8K = { 8192, BUF_COUNT_8K }, 61 bucket64K = { 65536, BUF_COUNT_64K }, 62 bucket128k = { (2 * 65536), BUF_COUNT_128K }, 63 bucket256k = { (4 * 65536), BUF_COUNT_256K }; 64 65 static qlt_dmem_bucket_t *dmem_buckets[] = { &bucket2K, &bucket8K, 66 &bucket64K, &bucket128k, &bucket256k, NULL }; 67 static ddi_device_acc_attr_t acc; 68 static ddi_dma_attr_t qlt_scsi_dma_attr = { 69 DMA_ATTR_V0, /* dma_attr_version */ 70 0, /* low DMA address range */ 71 0xffffffffffffffff, /* high DMA address range */ 72 0xffffffff, /* DMA counter register */ 73 8192, /* DMA address alignment */ 74 0xff, /* DMA burstsizes */ 75 1, /* min effective DMA size */ 76 0xffffffff, /* max DMA xfer size */ 77 0xffffffff, /* segment boundary */ 78 1, /* s/g list length */ 79 1, /* granularity of device */ 80 0 /* DMA transfer flags */ 81 }; 82 83 fct_status_t 84 qlt_dmem_init(qlt_state_t *qlt) 85 { 86 qlt_dmem_bucket_t *p; 87 qlt_dmem_bctl_t *bctl, *bc; 88 qlt_dmem_bctl_t *prev; 89 int ndx, i; 90 uint32_t total_mem; 91 uint8_t *addr; 92 uint8_t *host_addr; 93 uint64_t dev_addr; 94 ddi_dma_cookie_t cookie; 95 uint32_t ncookie; 96 uint32_t bsize; 97 size_t len; 98 99 if (qlt->qlt_bucketcnt[0] != 0) { 100 bucket2K.dmem_nbufs = qlt->qlt_bucketcnt[0]; 101 } 102 if (qlt->qlt_bucketcnt[1] != 0) { 103 bucket8K.dmem_nbufs = qlt->qlt_bucketcnt[1]; 104 } 105 if (qlt->qlt_bucketcnt[2] != 0) { 106 bucket64K.dmem_nbufs = qlt->qlt_bucketcnt[2]; 107 } 108 if (qlt->qlt_bucketcnt[3] != 0) { 109 bucket128k.dmem_nbufs = qlt->qlt_bucketcnt[3]; 110 } 111 if (qlt->qlt_bucketcnt[4] != 0) { 112 bucket256k.dmem_nbufs = qlt->qlt_bucketcnt[4]; 113 } 114 115 bsize = sizeof (dmem_buckets); 116 ndx = (int)(bsize / sizeof (void *)); 117 /* 118 * The reason it is ndx - 1 everywhere is becasue the last bucket 119 * pointer is NULL. 120 */ 121 qlt->dmem_buckets = (qlt_dmem_bucket_t **)kmem_zalloc(bsize + 122 ((ndx - 1) * (int)sizeof (qlt_dmem_bucket_t)), KM_SLEEP); 123 for (i = 0; i < (ndx - 1); i++) { 124 qlt->dmem_buckets[i] = (qlt_dmem_bucket_t *) 125 ((uint8_t *)qlt->dmem_buckets + bsize + 126 (i * (int)sizeof (qlt_dmem_bucket_t))); 127 bcopy(dmem_buckets[i], qlt->dmem_buckets[i], 128 sizeof (qlt_dmem_bucket_t)); 129 } 130 bzero(&acc, sizeof (acc)); 131 acc.devacc_attr_version = DDI_DEVICE_ATTR_V0; 132 acc.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC; 133 acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 134 for (ndx = 0; (p = qlt->dmem_buckets[ndx]) != NULL; ndx++) { 135 bctl = (qlt_dmem_bctl_t *)kmem_zalloc(p->dmem_nbufs * 136 sizeof (qlt_dmem_bctl_t), KM_NOSLEEP); 137 if (bctl == NULL) { 138 EL(qlt, "bctl==NULL\n"); 139 goto alloc_bctl_failed; 140 } 141 p->dmem_bctls_mem = bctl; 142 mutex_init(&p->dmem_lock, NULL, MUTEX_DRIVER, NULL); 143 if ((i = ddi_dma_alloc_handle(qlt->dip, &qlt_scsi_dma_attr, 144 DDI_DMA_SLEEP, 0, &p->dmem_dma_handle)) != DDI_SUCCESS) { 145 EL(qlt, "ddi_dma_alloc_handle status=%xh\n", i); 146 goto alloc_handle_failed; 147 } 148 149 total_mem = p->dmem_buf_size * p->dmem_nbufs; 150 151 if ((i = ddi_dma_mem_alloc(p->dmem_dma_handle, total_mem, &acc, 152 DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0, (caddr_t *)&addr, 153 &len, &p->dmem_acc_handle)) != DDI_SUCCESS) { 154 EL(qlt, "ddi_dma_mem_alloc status=%xh\n", i); 155 goto mem_alloc_failed; 156 } 157 158 if ((i = ddi_dma_addr_bind_handle(p->dmem_dma_handle, NULL, 159 (caddr_t)addr, total_mem, DDI_DMA_RDWR | DDI_DMA_STREAMING, 160 DDI_DMA_DONTWAIT, 0, &cookie, &ncookie)) != DDI_SUCCESS) { 161 EL(qlt, "ddi_dma_addr_bind_handle status=%xh\n", i); 162 goto addr_bind_handle_failed; 163 } 164 if (ncookie != 1) { 165 EL(qlt, "ncookie=%d\n", ncookie); 166 goto dmem_init_failed; 167 } 168 169 p->dmem_host_addr = host_addr = addr; 170 p->dmem_dev_addr = dev_addr = (uint64_t)cookie.dmac_laddress; 171 bsize = p->dmem_buf_size; 172 p->dmem_bctl_free_list = bctl; 173 p->dmem_nbufs_free = p->dmem_nbufs; 174 for (i = 0; i < p->dmem_nbufs; i++) { 175 stmf_data_buf_t *db; 176 prev = bctl; 177 bctl->bctl_bucket = p; 178 bctl->bctl_buf = db = stmf_alloc(STMF_STRUCT_DATA_BUF, 179 0, 0); 180 db->db_port_private = bctl; 181 db->db_sglist[0].seg_addr = host_addr; 182 bctl->bctl_dev_addr = dev_addr; 183 db->db_sglist[0].seg_length = db->db_buf_size = bsize; 184 db->db_sglist_length = 1; 185 host_addr += bsize; 186 dev_addr += bsize; 187 bctl++; 188 prev->bctl_next = bctl; 189 } 190 prev->bctl_next = NULL; 191 } 192 193 return (QLT_SUCCESS); 194 195 dmem_failure_loop:; 196 bc = bctl; 197 while (bc) { 198 stmf_free(bc->bctl_buf); 199 bc = bc->bctl_next; 200 } 201 dmem_init_failed:; 202 (void) ddi_dma_unbind_handle(p->dmem_dma_handle); 203 addr_bind_handle_failed:; 204 ddi_dma_mem_free(&p->dmem_acc_handle); 205 mem_alloc_failed:; 206 ddi_dma_free_handle(&p->dmem_dma_handle); 207 alloc_handle_failed:; 208 kmem_free(p->dmem_bctls_mem, p->dmem_nbufs * sizeof (qlt_dmem_bctl_t)); 209 mutex_destroy(&p->dmem_lock); 210 alloc_bctl_failed:; 211 if (--ndx >= 0) { 212 p = qlt->dmem_buckets[ndx]; 213 bctl = p->dmem_bctl_free_list; 214 goto dmem_failure_loop; 215 } 216 kmem_free(qlt->dmem_buckets, sizeof (dmem_buckets) + 217 (((sizeof (dmem_buckets)/sizeof (void *))-1)* 218 sizeof (qlt_dmem_bucket_t))); 219 qlt->dmem_buckets = NULL; 220 221 return (QLT_FAILURE); 222 } 223 224 void 225 qlt_dma_handle_pool_init(qlt_state_t *qlt) 226 { 227 qlt_dma_handle_pool_t *pool; 228 229 pool = kmem_zalloc(sizeof (*pool), KM_SLEEP); 230 mutex_init(&pool->pool_lock, NULL, MUTEX_DRIVER, NULL); 231 qlt->qlt_dma_handle_pool = pool; 232 } 233 234 void 235 qlt_dma_handle_pool_fini(qlt_state_t *qlt) 236 { 237 qlt_dma_handle_pool_t *pool; 238 qlt_dma_handle_t *handle, *next_handle; 239 240 pool = qlt->qlt_dma_handle_pool; 241 mutex_enter(&pool->pool_lock); 242 /* 243 * XXX Need to wait for free == total elements 244 * XXX Not sure how other driver shutdown stuff is done. 245 */ 246 ASSERT(pool->num_free == pool->num_total); 247 if (pool->num_free != pool->num_total) 248 cmn_err(CE_WARN, 249 "num_free %d != num_total %d\n", 250 pool->num_free, pool->num_total); 251 handle = pool->free_list; 252 while (handle) { 253 next_handle = handle->next; 254 kmem_free(handle, sizeof (*handle)); 255 handle = next_handle; 256 } 257 qlt->qlt_dma_handle_pool = NULL; 258 mutex_destroy(&pool->pool_lock); 259 kmem_free(pool, sizeof (*pool)); 260 } 261 262 void 263 qlt_dmem_fini(qlt_state_t *qlt) 264 { 265 qlt_dmem_bucket_t *p; 266 qlt_dmem_bctl_t *bctl; 267 int ndx; 268 269 for (ndx = 0; (p = qlt->dmem_buckets[ndx]) != NULL; ndx++) { 270 bctl = p->dmem_bctl_free_list; 271 while (bctl) { 272 stmf_free(bctl->bctl_buf); 273 bctl = bctl->bctl_next; 274 } 275 bctl = p->dmem_bctl_free_list; 276 (void) ddi_dma_unbind_handle(p->dmem_dma_handle); 277 ddi_dma_mem_free(&p->dmem_acc_handle); 278 ddi_dma_free_handle(&p->dmem_dma_handle); 279 kmem_free(p->dmem_bctls_mem, 280 p->dmem_nbufs * sizeof (qlt_dmem_bctl_t)); 281 mutex_destroy(&p->dmem_lock); 282 } 283 kmem_free(qlt->dmem_buckets, sizeof (dmem_buckets) + 284 (((sizeof (dmem_buckets)/sizeof (void *))-1)* 285 sizeof (qlt_dmem_bucket_t))); 286 qlt->dmem_buckets = NULL; 287 } 288 289 stmf_data_buf_t * 290 qlt_dmem_alloc(fct_local_port_t *port, uint32_t size, uint32_t *pminsize, 291 uint32_t flags) 292 { 293 return (qlt_i_dmem_alloc((qlt_state_t *) 294 port->port_fca_private, size, pminsize, 295 flags)); 296 } 297 298 /* ARGSUSED */ 299 stmf_data_buf_t * 300 qlt_i_dmem_alloc(qlt_state_t *qlt, uint32_t size, uint32_t *pminsize, 301 uint32_t flags) 302 { 303 qlt_dmem_bucket_t *p; 304 qlt_dmem_bctl_t *bctl; 305 int i; 306 uint32_t size_possible = 0; 307 308 if (size > QLT_DMEM_MAX_BUF_SIZE) { 309 goto qlt_try_partial_alloc; 310 } 311 312 /* 1st try to do a full allocation */ 313 for (i = 0; (p = qlt->dmem_buckets[i]) != NULL; i++) { 314 if (p->dmem_buf_size >= size) { 315 if (p->dmem_nbufs_free) { 316 mutex_enter(&p->dmem_lock); 317 bctl = p->dmem_bctl_free_list; 318 if (bctl == NULL) { 319 mutex_exit(&p->dmem_lock); 320 continue; 321 } 322 p->dmem_bctl_free_list = 323 bctl->bctl_next; 324 p->dmem_nbufs_free--; 325 qlt->qlt_bufref[i]++; 326 mutex_exit(&p->dmem_lock); 327 bctl->bctl_buf->db_data_size = size; 328 return (bctl->bctl_buf); 329 } else { 330 qlt->qlt_bumpbucket++; 331 } 332 } 333 } 334 335 qlt_try_partial_alloc: 336 337 qlt->qlt_pmintry++; 338 339 /* Now go from high to low */ 340 for (i = QLT_DMEM_NBUCKETS - 1; i >= 0; i--) { 341 p = qlt->dmem_buckets[i]; 342 if (p->dmem_nbufs_free == 0) 343 continue; 344 if (!size_possible) { 345 size_possible = p->dmem_buf_size; 346 } 347 if (*pminsize > p->dmem_buf_size) { 348 /* At this point we know the request is failing. */ 349 if (size_possible) { 350 /* 351 * This caller is asking too much. We already 352 * know what we can give, so get out. 353 */ 354 break; 355 } else { 356 /* 357 * Lets continue to find out and tell what 358 * we can give. 359 */ 360 continue; 361 } 362 } 363 mutex_enter(&p->dmem_lock); 364 if (*pminsize <= p->dmem_buf_size) { 365 bctl = p->dmem_bctl_free_list; 366 if (bctl == NULL) { 367 /* Someone took it. */ 368 size_possible = 0; 369 mutex_exit(&p->dmem_lock); 370 continue; 371 } 372 p->dmem_bctl_free_list = bctl->bctl_next; 373 p->dmem_nbufs_free--; 374 mutex_exit(&p->dmem_lock); 375 bctl->bctl_buf->db_data_size = p->dmem_buf_size; 376 qlt->qlt_pmin_ok++; 377 return (bctl->bctl_buf); 378 } 379 } 380 381 *pminsize = size_possible; 382 383 return (NULL); 384 } 385 386 /* ARGSUSED */ 387 void 388 qlt_i_dmem_free(qlt_state_t *qlt, stmf_data_buf_t *dbuf) 389 { 390 qlt_dmem_free(0, dbuf); 391 } 392 393 /* ARGSUSED */ 394 void 395 qlt_dmem_free(fct_dbuf_store_t *fds, stmf_data_buf_t *dbuf) 396 { 397 qlt_dmem_bctl_t *bctl; 398 qlt_dmem_bucket_t *p; 399 400 ASSERT((dbuf->db_flags & DB_LU_DATA_BUF) == 0); 401 402 bctl = (qlt_dmem_bctl_t *)dbuf->db_port_private; 403 p = bctl->bctl_bucket; 404 mutex_enter(&p->dmem_lock); 405 bctl->bctl_next = p->dmem_bctl_free_list; 406 p->dmem_bctl_free_list = bctl; 407 p->dmem_nbufs_free++; 408 mutex_exit(&p->dmem_lock); 409 } 410 411 void 412 qlt_dmem_dma_sync(stmf_data_buf_t *dbuf, uint_t sync_type) 413 { 414 qlt_dmem_bctl_t *bctl; 415 qlt_dma_sgl_t *qsgl; 416 qlt_dmem_bucket_t *p; 417 qlt_dma_handle_t *th; 418 int rv; 419 420 if (dbuf->db_flags & DB_LU_DATA_BUF) { 421 /* 422 * go through ddi handle list 423 */ 424 qsgl = (qlt_dma_sgl_t *)dbuf->db_port_private; 425 th = qsgl->handle_list; 426 while (th) { 427 rv = ddi_dma_sync(th->dma_handle, 428 0, 0, sync_type); 429 if (rv != DDI_SUCCESS) { 430 cmn_err(CE_WARN, "ddi_dma_sync FAILED\n"); 431 } 432 th = th->next; 433 } 434 } else { 435 bctl = (qlt_dmem_bctl_t *)dbuf->db_port_private; 436 p = bctl->bctl_bucket; 437 (void) ddi_dma_sync(p->dmem_dma_handle, (off_t) 438 (bctl->bctl_dev_addr - p->dmem_dev_addr), 439 dbuf->db_data_size, sync_type); 440 } 441 } 442 443 /* 444 * A very lite version of ddi_dma_addr_bind_handle() 445 */ 446 uint64_t 447 qlt_ddi_vtop(caddr_t vaddr) 448 { 449 uint64_t offset, paddr; 450 pfn_t pfn; 451 452 pfn = hat_getpfnum(kas.a_hat, vaddr); 453 ASSERT(pfn != PFN_INVALID && pfn != PFN_SUSPENDED); 454 offset = ((uintptr_t)vaddr) & MMU_PAGEOFFSET; 455 paddr = mmu_ptob(pfn); 456 return (paddr+offset); 457 } 458 459 static ddi_dma_attr_t qlt_sgl_dma_attr = { 460 DMA_ATTR_V0, /* dma_attr_version */ 461 0, /* low DMA address range */ 462 0xffffffffffffffff, /* high DMA address range */ 463 0xffffffff, /* DMA counter register */ 464 64, /* DMA address alignment */ 465 0xff, /* DMA burstsizes */ 466 1, /* min effective DMA size */ 467 0xffffffff, /* max DMA xfer size */ 468 0xffffffff, /* segment boundary */ 469 QLT_DMA_SG_LIST_LENGTH, /* s/g list length */ 470 1, /* granularity of device */ 471 0 /* DMA transfer flags */ 472 }; 473 474 /* 475 * Allocate a qlt_dma_handle container and fill it with a ddi_dma_handle 476 */ 477 static qlt_dma_handle_t * 478 qlt_dma_alloc_handle(qlt_state_t *qlt) 479 { 480 ddi_dma_handle_t ddi_handle; 481 qlt_dma_handle_t *qlt_handle; 482 int rv; 483 484 rv = ddi_dma_alloc_handle(qlt->dip, &qlt_sgl_dma_attr, 485 DDI_DMA_SLEEP, 0, &ddi_handle); 486 if (rv != DDI_SUCCESS) { 487 EL(qlt, "ddi_dma_alloc_handle status=%xh\n", rv); 488 return (NULL); 489 } 490 qlt_handle = kmem_zalloc(sizeof (qlt_dma_handle_t), KM_SLEEP); 491 qlt_handle->dma_handle = ddi_handle; 492 return (qlt_handle); 493 } 494 495 /* 496 * Allocate a list of qlt_dma_handle containers from the free list 497 */ 498 static qlt_dma_handle_t * 499 qlt_dma_alloc_handle_list(qlt_state_t *qlt, int handle_count) 500 { 501 qlt_dma_handle_pool_t *pool; 502 qlt_dma_handle_t *tmp_handle, *first_handle, *last_handle; 503 int i; 504 505 /* 506 * Make sure the free list can satisfy the request. 507 * Once the free list is primed, it should satisfy most requests. 508 * XXX Should there be a limit on pool size? 509 */ 510 pool = qlt->qlt_dma_handle_pool; 511 mutex_enter(&pool->pool_lock); 512 while (handle_count > pool->num_free) { 513 mutex_exit(&pool->pool_lock); 514 if ((tmp_handle = qlt_dma_alloc_handle(qlt)) == NULL) 515 return (NULL); 516 mutex_enter(&pool->pool_lock); 517 tmp_handle->next = pool->free_list; 518 pool->free_list = tmp_handle; 519 pool->num_free++; 520 pool->num_total++; 521 } 522 523 /* 524 * The free list lock is held and the list is large enough to 525 * satisfy this request. Run down the freelist and snip off 526 * the number of elements needed for this request. 527 */ 528 first_handle = pool->free_list; 529 tmp_handle = first_handle; 530 for (i = 0; i < handle_count; i++) { 531 last_handle = tmp_handle; 532 tmp_handle = tmp_handle->next; 533 } 534 pool->free_list = tmp_handle; 535 pool->num_free -= handle_count; 536 mutex_exit(&pool->pool_lock); 537 last_handle->next = NULL; /* sanity */ 538 return (first_handle); 539 } 540 541 /* 542 * Return a list of qlt_dma_handle containers to the free list. 543 */ 544 static void 545 qlt_dma_free_handles(qlt_state_t *qlt, qlt_dma_handle_t *first_handle) 546 { 547 qlt_dma_handle_pool_t *pool; 548 qlt_dma_handle_t *tmp_handle, *last_handle; 549 int rv, handle_count; 550 551 /* 552 * Traverse the list and unbind the handles 553 */ 554 ASSERT(first_handle); 555 tmp_handle = first_handle; 556 handle_count = 0; 557 while (tmp_handle != NULL) { 558 last_handle = tmp_handle; 559 /* 560 * If the handle is bound, unbind the handle so it can be 561 * reused. It may not be bound if there was a bind failure. 562 */ 563 if (tmp_handle->num_cookies != 0) { 564 rv = ddi_dma_unbind_handle(tmp_handle->dma_handle); 565 ASSERT(rv == DDI_SUCCESS); 566 if (rv == DDI_SUCCESS) { 567 tmp_handle->num_cookies = 0; 568 tmp_handle->num_cookies_fetched = 0; 569 } 570 } 571 tmp_handle = tmp_handle->next; 572 handle_count++; 573 } 574 /* 575 * Insert this list into the free list 576 */ 577 pool = qlt->qlt_dma_handle_pool; 578 mutex_enter(&pool->pool_lock); 579 last_handle->next = pool->free_list; 580 pool->free_list = first_handle; 581 pool->num_free += handle_count; 582 mutex_exit(&pool->pool_lock); 583 } 584 585 /* 586 * cookies produced by mapping this dbuf 587 */ 588 uint16_t 589 qlt_get_cookie_count(stmf_data_buf_t *dbuf) 590 { 591 qlt_dma_sgl_t *qsgl = dbuf->db_port_private; 592 593 ASSERT(dbuf->db_flags & DB_LU_DATA_BUF); 594 return (qsgl->cookie_count); 595 } 596 597 ddi_dma_cookie_t 598 *qlt_get_cookie_array(stmf_data_buf_t *dbuf) 599 { 600 qlt_dma_sgl_t *qsgl = dbuf->db_port_private; 601 602 ASSERT(dbuf->db_flags & DB_LU_DATA_BUF); 603 604 if (qsgl->cookie_prefetched) 605 return (&qsgl->cookie[0]); 606 else 607 return (NULL); 608 } 609 610 /* 611 * Wrapper around ddi_dma_nextcookie that hides the ddi_dma_handle usage. 612 */ 613 void 614 qlt_ddi_dma_nextcookie(stmf_data_buf_t *dbuf, ddi_dma_cookie_t *cookiep) 615 { 616 qlt_dma_sgl_t *qsgl = dbuf->db_port_private; 617 618 ASSERT(dbuf->db_flags & DB_LU_DATA_BUF); 619 620 if (qsgl->cookie_prefetched) { 621 ASSERT(qsgl->cookie_next_fetch < qsgl->cookie_count); 622 *cookiep = qsgl->cookie[qsgl->cookie_next_fetch++]; 623 } else { 624 qlt_dma_handle_t *fetch; 625 qlt_dma_handle_t *FETCH_DONE = (qlt_dma_handle_t *)0xbad; 626 627 ASSERT(qsgl->handle_list != NULL); 628 ASSERT(qsgl->handle_next_fetch != FETCH_DONE); 629 630 fetch = qsgl->handle_next_fetch; 631 if (fetch->num_cookies_fetched == 0) { 632 *cookiep = fetch->first_cookie; 633 } else { 634 ddi_dma_nextcookie(fetch->dma_handle, cookiep); 635 } 636 if (++fetch->num_cookies_fetched == fetch->num_cookies) { 637 if (fetch->next == NULL) 638 qsgl->handle_next_fetch = FETCH_DONE; 639 else 640 qsgl->handle_next_fetch = fetch->next; 641 } 642 } 643 } 644 645 /* 646 * Set this flag to fetch the DDI dma cookies from the handles here and 647 * store them in the port private area of the dbuf. This will allow 648 * faster access to the cookies in qlt_xfer_scsi_data() at the expense of 649 * an extra copy. If the qlt->req_lock is hot, this may help. 650 */ 651 uint16_t qlt_sgl_prefetch = 0; 652 653 /*ARGSUSED*/ 654 stmf_status_t 655 qlt_dma_setup_dbuf(fct_local_port_t *port, stmf_data_buf_t *dbuf, 656 uint32_t flags) 657 { 658 qlt_state_t *qlt = port->port_fca_private; 659 qlt_dma_sgl_t *qsgl; 660 struct stmf_sglist_ent *sglp; 661 qlt_dma_handle_t *handle_list, *th; 662 int i, rv; 663 ddi_dma_cookie_t *cookie_p; 664 int numbufs; 665 uint16_t cookie_count; 666 uint16_t prefetch; 667 size_t qsize; 668 669 /* 670 * psuedo code: 671 * get dma handle list from cache - one per sglist entry 672 * foreach sglist entry 673 * bind dma handle to sglist vaddr 674 * allocate space for DMA state to store in db_port_private 675 * fill in port private object 676 * if prefetching 677 * move all dma cookies into db_port_private 678 */ 679 dbuf->db_port_private = NULL; 680 numbufs = dbuf->db_sglist_length; 681 handle_list = qlt_dma_alloc_handle_list(qlt, numbufs); 682 if (handle_list == NULL) { 683 EL(qlt, "handle_list==NULL\n"); 684 return (STMF_FAILURE); 685 } 686 /* 687 * Loop through sglist and bind each entry to a handle 688 */ 689 th = handle_list; 690 sglp = &dbuf->db_sglist[0]; 691 cookie_count = 0; 692 for (i = 0; i < numbufs; i++, sglp++) { 693 694 /* 695 * Bind this sgl entry to a DDI dma handle 696 */ 697 if ((rv = ddi_dma_addr_bind_handle( 698 th->dma_handle, 699 NULL, 700 (caddr_t)(sglp->seg_addr), 701 (size_t)sglp->seg_length, 702 DDI_DMA_RDWR | DDI_DMA_STREAMING, 703 DDI_DMA_DONTWAIT, 704 NULL, 705 &th->first_cookie, 706 &th->num_cookies)) != DDI_DMA_MAPPED) { 707 cmn_err(CE_NOTE, "ddi_dma_addr_bind_handle %d", rv); 708 qlt_dma_free_handles(qlt, handle_list); 709 return (STMF_FAILURE); 710 } 711 712 /* 713 * Add to total cookie count 714 */ 715 cookie_count += th->num_cookies; 716 if (cookie_count > QLT_DMA_SG_LIST_LENGTH) { 717 /* 718 * Request exceeds HBA limit 719 */ 720 qlt_dma_free_handles(qlt, handle_list); 721 return (STMF_FAILURE); 722 } 723 /* move to next ddi_dma_handle */ 724 th = th->next; 725 } 726 727 /* 728 * Allocate our port private object for DMA mapping state. 729 */ 730 prefetch = qlt_sgl_prefetch; 731 qsize = sizeof (qlt_dma_sgl_t); 732 if (prefetch) { 733 /* one extra ddi_dma_cookie allocated for alignment padding */ 734 qsize += cookie_count * sizeof (ddi_dma_cookie_t); 735 } 736 qsgl = kmem_alloc(qsize, KM_SLEEP); 737 /* 738 * Fill in the sgl 739 */ 740 dbuf->db_port_private = qsgl; 741 qsgl->qsize = qsize; 742 qsgl->handle_count = dbuf->db_sglist_length; 743 qsgl->cookie_prefetched = prefetch; 744 qsgl->cookie_count = cookie_count; 745 qsgl->cookie_next_fetch = 0; 746 qsgl->handle_list = handle_list; 747 qsgl->handle_next_fetch = handle_list; 748 if (prefetch) { 749 /* 750 * traverse handle list and move cookies to db_port_private 751 */ 752 th = handle_list; 753 cookie_p = &qsgl->cookie[0]; 754 for (i = 0; i < numbufs; i++) { 755 uint_t cc = th->num_cookies; 756 757 *cookie_p++ = th->first_cookie; 758 while (--cc > 0) { 759 ddi_dma_nextcookie(th->dma_handle, cookie_p++); 760 } 761 th->num_cookies_fetched = th->num_cookies; 762 th = th->next; 763 } 764 } 765 766 return (STMF_SUCCESS); 767 } 768 769 void 770 qlt_dma_teardown_dbuf(fct_dbuf_store_t *fds, stmf_data_buf_t *dbuf) 771 { 772 qlt_state_t *qlt = fds->fds_fca_private; 773 qlt_dma_sgl_t *qsgl = dbuf->db_port_private; 774 775 ASSERT(qlt); 776 ASSERT(qsgl); 777 ASSERT(dbuf->db_flags & DB_LU_DATA_BUF); 778 779 /* 780 * unbind and free the dma handles 781 */ 782 if (qsgl->handle_list) { 783 /* go through ddi handle list */ 784 qlt_dma_free_handles(qlt, qsgl->handle_list); 785 } 786 kmem_free(qsgl, qsgl->qsize); 787 } 788 789 uint8_t 790 qlt_get_iocb_count(uint32_t cookie_count) 791 { 792 uint32_t cnt, cont_segs; 793 uint8_t iocb_count; 794 795 iocb_count = 1; 796 cnt = CMD7_2400_DATA_SEGMENTS; 797 cont_segs = CONT_A64_DATA_SEGMENTS; 798 799 if (cookie_count > cnt) { 800 cnt = cookie_count - cnt; 801 iocb_count = (uint8_t)(iocb_count + cnt / cont_segs); 802 if (cnt % cont_segs) { 803 iocb_count++; 804 } 805 } 806 return (iocb_count); 807 } 808