1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at 9 * http://www.opensource.org/licenses/cddl1.txt. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2004-2011 Emulex. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <emlxs.h> 28 29 /* #define EMLXS_POOL_DEBUG */ 30 31 EMLXS_MSG_DEF(EMLXS_MEM_C); 32 33 34 static uint32_t emlxs_mem_pool_alloc(emlxs_hba_t *hba, MEMSEG *seg, 35 uint32_t count); 36 static void emlxs_mem_pool_free(emlxs_hba_t *hba, MEMSEG *seg, uint32_t count); 37 38 39 extern int32_t 40 emlxs_mem_alloc_buffer(emlxs_hba_t *hba) 41 { 42 emlxs_port_t *port = &PPORT; 43 emlxs_config_t *cfg; 44 MBUF_INFO *buf_info; 45 MEMSEG *seg; 46 MBUF_INFO bufinfo; 47 int32_t i; 48 MATCHMAP *mp; 49 MATCHMAP **bpl_table; 50 51 buf_info = &bufinfo; 52 cfg = &CFG; 53 54 bzero(hba->memseg, sizeof (hba->memseg)); 55 56 /* Allocate the fc_table */ 57 bzero(buf_info, sizeof (MBUF_INFO)); 58 buf_info->size = (hba->max_iotag * sizeof (emlxs_buf_t *)); 59 60 (void) emlxs_mem_alloc(hba, buf_info); 61 if (buf_info->virt == NULL) { 62 63 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 64 "fc_table buffer."); 65 66 goto failed; 67 } 68 hba->fc_table = buf_info->virt; 69 bzero(hba->fc_table, buf_info->size); 70 71 /* Prepare the memory pools */ 72 for (i = 0; i < FC_MAX_SEG; i++) { 73 seg = &hba->memseg[i]; 74 75 switch (i) { 76 case MEM_NLP: 77 (void) strlcpy(seg->fc_label, "Node Pool", 78 sizeof (seg->fc_label)); 79 seg->fc_memtag = MEM_NLP; 80 seg->fc_memsize = sizeof (NODELIST); 81 seg->fc_hi_water = hba->max_nodes + 2; 82 seg->fc_lo_water = 2; 83 seg->fc_step = 1; 84 break; 85 86 case MEM_IOCB: 87 (void) strlcpy(seg->fc_label, "IOCB Pool", 88 sizeof (seg->fc_label)); 89 seg->fc_memtag = MEM_IOCB; 90 seg->fc_memsize = sizeof (IOCBQ); 91 seg->fc_hi_water = cfg[CFG_NUM_IOCBS].current; 92 seg->fc_lo_water = cfg[CFG_NUM_IOCBS].low; 93 seg->fc_step = cfg[CFG_NUM_IOCBS].low; 94 break; 95 96 case MEM_MBOX: 97 (void) strlcpy(seg->fc_label, "MBOX Pool", 98 sizeof (seg->fc_label)); 99 seg->fc_memtag = MEM_MBOX; 100 seg->fc_memsize = sizeof (MAILBOXQ); 101 seg->fc_hi_water = hba->max_nodes + 32; 102 seg->fc_lo_water = 32; 103 seg->fc_step = 1; 104 break; 105 106 case MEM_BPL: 107 if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) { 108 continue; 109 } 110 (void) strlcpy(seg->fc_label, "BPL Pool", 111 sizeof (seg->fc_label)); 112 seg->fc_memtag = MEM_BPL; 113 seg->fc_memsize = hba->sli.sli3.mem_bpl_size; 114 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG; 115 seg->fc_memalign = 32; 116 seg->fc_hi_water = hba->max_iotag; 117 seg->fc_lo_water = cfg[CFG_NUM_IOCBS].low; 118 seg->fc_step = cfg[CFG_NUM_IOCBS].low; 119 break; 120 121 case MEM_BUF: 122 /* These are the unsolicited ELS buffers. */ 123 (void) strlcpy(seg->fc_label, "BUF Pool", 124 sizeof (seg->fc_label)); 125 seg->fc_memtag = MEM_BUF; 126 seg->fc_memsize = MEM_BUF_SIZE; 127 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG; 128 seg->fc_memalign = 32; 129 seg->fc_hi_water = MEM_ELSBUF_COUNT + MEM_BUF_COUNT; 130 seg->fc_lo_water = MEM_ELSBUF_COUNT; 131 seg->fc_step = 1; 132 break; 133 134 case MEM_IPBUF: 135 /* These are the unsolicited IP buffers. */ 136 if (cfg[CFG_NETWORK_ON].current == 0) { 137 continue; 138 } 139 140 (void) strlcpy(seg->fc_label, "IPBUF Pool", 141 sizeof (seg->fc_label)); 142 seg->fc_memtag = MEM_IPBUF; 143 seg->fc_memsize = MEM_IPBUF_SIZE; 144 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG; 145 seg->fc_memalign = 32; 146 seg->fc_hi_water = MEM_IPBUF_COUNT; 147 seg->fc_lo_water = 0; 148 seg->fc_step = 4; 149 break; 150 151 case MEM_CTBUF: 152 /* These are the unsolicited CT buffers. */ 153 (void) strlcpy(seg->fc_label, "CTBUF Pool", 154 sizeof (seg->fc_label)); 155 seg->fc_memtag = MEM_CTBUF; 156 seg->fc_memsize = MEM_CTBUF_SIZE; 157 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG; 158 seg->fc_memalign = 32; 159 seg->fc_hi_water = MEM_CTBUF_COUNT; 160 seg->fc_lo_water = MEM_CTBUF_COUNT; 161 seg->fc_step = 1; 162 break; 163 164 #ifdef SFCT_SUPPORT 165 case MEM_FCTBUF: 166 /* These are the unsolicited FCT buffers. */ 167 if (!(port->flag & EMLXS_TGT_ENABLED)) { 168 continue; 169 } 170 171 (void) strlcpy(seg->fc_label, "FCTBUF Pool", 172 sizeof (seg->fc_label)); 173 seg->fc_memtag = MEM_FCTBUF; 174 seg->fc_memsize = MEM_FCTBUF_SIZE; 175 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG; 176 seg->fc_memalign = 32; 177 seg->fc_hi_water = MEM_FCTBUF_COUNT; 178 seg->fc_lo_water = 0; 179 seg->fc_step = 8; 180 break; 181 #endif /* SFCT_SUPPORT */ 182 183 default: 184 continue; 185 } 186 187 if (seg->fc_memsize == 0) { 188 continue; 189 } 190 191 (void) emlxs_mem_pool_create(hba, seg); 192 193 if (seg->fc_numblks < seg->fc_lo_water) { 194 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 195 "%s: count=%d size=%d flags=%x lo=%d hi=%d", 196 seg->fc_label, seg->fc_numblks, 197 seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water, 198 seg->fc_hi_water); 199 200 goto failed; 201 } 202 } 203 204 hba->sli.sli3.bpl_table = NULL; 205 seg = &hba->memseg[MEM_BPL]; 206 207 /* If SLI3 and MEM_BPL pool is static */ 208 if (!(hba->model_info.sli_mask & EMLXS_SLI4_MASK) && 209 !(seg->fc_memflag & FC_MEMSEG_DYNAMIC)) { 210 /* 211 * Allocate and Initialize bpl_table 212 * This is for increased performance. 213 */ 214 bzero(buf_info, sizeof (MBUF_INFO)); 215 buf_info->size = hba->max_iotag * sizeof (MATCHMAP *); 216 217 (void) emlxs_mem_alloc(hba, buf_info); 218 if (buf_info->virt == NULL) { 219 220 EMLXS_MSGF(EMLXS_CONTEXT, 221 &emlxs_mem_alloc_failed_msg, 222 "BPL table buffer."); 223 224 goto failed; 225 } 226 hba->sli.sli3.bpl_table = buf_info->virt; 227 228 bpl_table = (MATCHMAP**)hba->sli.sli3.bpl_table; 229 for (i = 0; i < hba->max_iotag; i++) { 230 mp = (MATCHMAP *) emlxs_mem_get(hba, MEM_BPL); 231 mp->flag |= MAP_TABLE_ALLOCATED; 232 bpl_table[i] = mp; 233 } 234 } 235 236 return (1); 237 238 failed: 239 240 (void) emlxs_mem_free_buffer(hba); 241 return (0); 242 243 } /* emlxs_mem_alloc_buffer() */ 244 245 246 /* 247 * emlxs_mem_free_buffer 248 * 249 * This routine will free iocb/data buffer space 250 * and TGTM resource. 251 */ 252 extern int 253 emlxs_mem_free_buffer(emlxs_hba_t *hba) 254 { 255 emlxs_port_t *port = &PPORT; 256 emlxs_port_t *vport; 257 int32_t j; 258 MATCHMAP *mp; 259 CHANNEL *cp; 260 RING *rp; 261 MBUF_INFO *buf_info; 262 MBUF_INFO bufinfo; 263 MATCHMAP **bpl_table; 264 265 buf_info = &bufinfo; 266 267 for (j = 0; j < hba->chan_count; j++) { 268 cp = &hba->chan[j]; 269 270 /* Flush the ring */ 271 (void) emlxs_tx_channel_flush(hba, cp, 0); 272 } 273 274 if (!(hba->model_info.sli_mask & EMLXS_SLI4_MASK)) { 275 /* free the mapped address match area for each ring */ 276 for (j = 0; j < MAX_RINGS; j++) { 277 rp = &hba->sli.sli3.ring[j]; 278 279 while (rp->fc_mpoff) { 280 uint64_t addr; 281 282 addr = 0; 283 mp = (MATCHMAP *)(rp->fc_mpoff); 284 285 if ((j == hba->channel_els) || 286 (j == hba->channel_ct) || 287 #ifdef SFCT_SUPPORT 288 (j == hba->CHANNEL_FCT) || 289 #endif /* SFCT_SUPPORT */ 290 (j == hba->channel_ip)) { 291 addr = mp->phys; 292 } 293 294 if ((mp = emlxs_mem_get_vaddr(hba, rp, addr))) { 295 if (j == hba->channel_els) { 296 emlxs_mem_put(hba, 297 MEM_ELSBUF, (void *)mp); 298 } else if (j == hba->channel_ct) { 299 emlxs_mem_put(hba, 300 MEM_CTBUF, (void *)mp); 301 } else if (j == hba->channel_ip) { 302 emlxs_mem_put(hba, 303 MEM_IPBUF, (void *)mp); 304 } 305 #ifdef SFCT_SUPPORT 306 else if (j == hba->CHANNEL_FCT) { 307 emlxs_mem_put(hba, 308 MEM_FCTBUF, (void *)mp); 309 } 310 #endif /* SFCT_SUPPORT */ 311 312 } 313 } 314 } 315 } 316 317 if (hba->flag & FC_HBQ_ENABLED) { 318 emlxs_hbq_free_all(hba, EMLXS_ELS_HBQ_ID); 319 emlxs_hbq_free_all(hba, EMLXS_IP_HBQ_ID); 320 emlxs_hbq_free_all(hba, EMLXS_CT_HBQ_ID); 321 322 if (port->flag & EMLXS_TGT_ENABLED) { 323 emlxs_hbq_free_all(hba, EMLXS_FCT_HBQ_ID); 324 } 325 } 326 327 /* Free the nodes */ 328 for (j = 0; j < MAX_VPORTS; j++) { 329 vport = &VPORT(j); 330 if (vport->node_count) { 331 emlxs_node_destroy_all(vport); 332 } 333 } 334 335 /* Make sure the mailbox queue is empty */ 336 emlxs_mb_flush(hba); 337 338 if (hba->fc_table) { 339 bzero(buf_info, sizeof (MBUF_INFO)); 340 buf_info->size = hba->max_iotag * sizeof (emlxs_buf_t *); 341 buf_info->virt = hba->fc_table; 342 emlxs_mem_free(hba, buf_info); 343 hba->fc_table = NULL; 344 } 345 346 if (hba->sli.sli3.bpl_table) { 347 /* Return MEM_BPLs to their pool */ 348 bpl_table = (MATCHMAP**)hba->sli.sli3.bpl_table; 349 for (j = 0; j < hba->max_iotag; j++) { 350 mp = bpl_table[j]; 351 mp->flag &= ~MAP_TABLE_ALLOCATED; 352 emlxs_mem_put(hba, MEM_BPL, (void*)mp); 353 } 354 355 bzero(buf_info, sizeof (MBUF_INFO)); 356 buf_info->size = hba->max_iotag * sizeof (MATCHMAP *); 357 buf_info->virt = hba->sli.sli3.bpl_table; 358 emlxs_mem_free(hba, buf_info); 359 hba->sli.sli3.bpl_table = NULL; 360 } 361 362 /* Free the memory segments */ 363 for (j = 0; j < FC_MAX_SEG; j++) { 364 emlxs_mem_pool_destroy(hba, &hba->memseg[j]); 365 } 366 367 return (0); 368 369 } /* emlxs_mem_free_buffer() */ 370 371 372 /* Must hold EMLXS_MEMGET_LOCK when calling */ 373 static uint32_t 374 emlxs_mem_pool_alloc(emlxs_hba_t *hba, MEMSEG *seg, uint32_t count) 375 { 376 emlxs_port_t *port = &PPORT; 377 uint8_t *bp = NULL; 378 MATCHMAP *mp = NULL; 379 MBUF_INFO *buf_info; 380 MBUF_INFO local_buf_info; 381 uint32_t i; 382 uint32_t fc_numblks; 383 384 if (seg->fc_memsize == 0) { 385 return (0); 386 } 387 388 if (seg->fc_numblks >= seg->fc_hi_water) { 389 return (0); 390 } 391 392 if (count == 0) { 393 return (0); 394 } 395 396 if (count > (seg->fc_hi_water - seg->fc_numblks)) { 397 count = (seg->fc_hi_water - seg->fc_numblks); 398 } 399 400 buf_info = &local_buf_info; 401 fc_numblks = seg->fc_numblks; 402 403 /* Check for initial allocation */ 404 if (!(seg->fc_memflag & FC_MEMSEG_PUT_ENABLED)) { 405 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg, 406 "%s alloc:%d n=%d s=%d f=%x l=%d,%d,%d " 407 "f=%d:%d", 408 seg->fc_label, count, seg->fc_numblks, 409 seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water, 410 seg->fc_hi_water, seg->fc_step, seg->fc_memget_cnt, 411 seg->fc_low); 412 } 413 414 if (!(seg->fc_memflag & FC_MBUF_DMA)) { 415 goto vmem_pool; 416 } 417 418 /* dma_pool */ 419 420 for (i = 0; i < count; i++) { 421 bzero(buf_info, sizeof (MBUF_INFO)); 422 buf_info->size = sizeof (MATCHMAP); 423 buf_info->align = sizeof (void *); 424 425 (void) emlxs_mem_alloc(hba, buf_info); 426 if (buf_info->virt == NULL) { 427 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 428 "%s: count=%d size=%d", 429 seg->fc_label, seg->fc_numblks, seg->fc_memsize); 430 431 goto done; 432 } 433 434 mp = (MATCHMAP *)buf_info->virt; 435 bzero(mp, sizeof (MATCHMAP)); 436 437 bzero(buf_info, sizeof (MBUF_INFO)); 438 buf_info->size = seg->fc_memsize; 439 buf_info->flags = seg->fc_memflag; 440 buf_info->align = seg->fc_memalign; 441 442 (void) emlxs_mem_alloc(hba, buf_info); 443 if (buf_info->virt == NULL) { 444 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 445 "%s: count=%d size=%d", 446 seg->fc_label, seg->fc_numblks, seg->fc_memsize); 447 448 /* Free the mp object */ 449 bzero(buf_info, sizeof (MBUF_INFO)); 450 buf_info->size = sizeof (MATCHMAP); 451 buf_info->virt = (void *)mp; 452 emlxs_mem_free(hba, buf_info); 453 454 goto done; 455 } 456 bp = (uint8_t *)buf_info->virt; 457 bzero(bp, seg->fc_memsize); 458 459 mp->virt = buf_info->virt; 460 mp->phys = buf_info->phys; 461 mp->size = buf_info->size; 462 mp->dma_handle = buf_info->dma_handle; 463 mp->data_handle = buf_info->data_handle; 464 mp->tag = seg->fc_memtag; 465 mp->segment = seg; 466 mp->flag |= MAP_POOL_ALLOCATED; 467 468 #ifdef SFCT_SUPPORT 469 if (mp->tag >= MEM_FCTSEG) { 470 if (emlxs_fct_stmf_alloc(hba, mp)) { 471 /* Free the DMA memory itself */ 472 emlxs_mem_free(hba, buf_info); 473 474 /* Free the mp object */ 475 bzero(buf_info, sizeof (MBUF_INFO)); 476 buf_info->size = sizeof (MATCHMAP); 477 buf_info->virt = (void *)mp; 478 emlxs_mem_free(hba, buf_info); 479 480 goto done; 481 } 482 } 483 #endif /* SFCT_SUPPORT */ 484 485 /* Add the buffer desc to the tail of the pool freelist */ 486 if (seg->fc_memget_end == NULL) { 487 seg->fc_memget_ptr = (uint8_t *)mp; 488 seg->fc_memget_cnt = 1; 489 } else { 490 *((uint8_t **)(seg->fc_memget_end)) = (uint8_t *)mp; 491 seg->fc_memget_cnt++; 492 } 493 seg->fc_memget_end = (uint8_t *)mp; 494 495 seg->fc_numblks++; 496 seg->fc_total_memsize += (seg->fc_memsize + sizeof (MATCHMAP)); 497 } 498 499 goto done; 500 501 vmem_pool: 502 503 for (i = 0; i < count; i++) { 504 bzero(buf_info, sizeof (MBUF_INFO)); 505 buf_info->size = seg->fc_memsize; 506 507 (void) emlxs_mem_alloc(hba, buf_info); 508 if (buf_info->virt == NULL) { 509 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 510 "%s: count=%d size=%d", 511 seg->fc_label, seg->fc_numblks, seg->fc_memsize); 512 513 goto done; 514 } 515 bp = (uint8_t *)buf_info->virt; 516 517 /* Add the buffer to the tail of the pool freelist */ 518 if (seg->fc_memget_end == NULL) { 519 seg->fc_memget_ptr = (uint8_t *)bp; 520 seg->fc_memget_cnt = 1; 521 } else { 522 *((uint8_t **)(seg->fc_memget_end)) = (uint8_t *)bp; 523 seg->fc_memget_cnt++; 524 } 525 seg->fc_memget_end = (uint8_t *)bp; 526 527 seg->fc_numblks++; 528 seg->fc_total_memsize += seg->fc_memsize; 529 } 530 531 done: 532 533 return ((seg->fc_numblks - fc_numblks)); 534 535 } /* emlxs_mem_pool_alloc() */ 536 537 538 /* Must hold EMLXS_MEMGET_LOCK & EMLXS_MEMPUT_LOCK when calling */ 539 static void 540 emlxs_mem_pool_free(emlxs_hba_t *hba, MEMSEG *seg, uint32_t count) 541 { 542 emlxs_port_t *port = &PPORT; 543 uint8_t *bp = NULL; 544 MATCHMAP *mp = NULL; 545 MBUF_INFO *buf_info; 546 MBUF_INFO local_buf_info; 547 548 if ((seg->fc_memsize == 0) || 549 (seg->fc_numblks == 0) || 550 (count == 0)) { 551 return; 552 } 553 554 /* Check max count */ 555 if (count > seg->fc_numblks) { 556 count = seg->fc_numblks; 557 } 558 559 /* Move memput list to memget list */ 560 if (seg->fc_memput_ptr) { 561 if (seg->fc_memget_end == NULL) { 562 seg->fc_memget_ptr = seg->fc_memput_ptr; 563 } else { 564 *((uint8_t **)(seg->fc_memget_end)) =\ 565 seg->fc_memput_ptr; 566 } 567 seg->fc_memget_end = seg->fc_memput_end; 568 seg->fc_memget_cnt += seg->fc_memput_cnt; 569 570 seg->fc_memput_ptr = NULL; 571 seg->fc_memput_end = NULL; 572 seg->fc_memput_cnt = 0; 573 } 574 575 buf_info = &local_buf_info; 576 577 /* Check for final deallocation */ 578 if (!(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) { 579 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg, 580 "%s free:%d n=%d s=%d f=%x l=%d,%d,%d " 581 "f=%d:%d", 582 seg->fc_label, count, seg->fc_numblks, 583 seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water, 584 seg->fc_hi_water, seg->fc_step, seg->fc_memget_cnt, 585 seg->fc_low); 586 } 587 588 if (!(seg->fc_memflag & FC_MBUF_DMA)) { 589 goto vmem_pool; 590 } 591 592 dma_pool: 593 594 /* Free memory associated with all buffers on get buffer pool */ 595 while (count && ((bp = seg->fc_memget_ptr) != NULL)) { 596 /* Remove buffer from list */ 597 if (seg->fc_memget_end == bp) { 598 seg->fc_memget_ptr = NULL; 599 seg->fc_memget_end = NULL; 600 seg->fc_memget_cnt = 0; 601 602 } else { 603 seg->fc_memget_ptr = *((uint8_t **)bp); 604 seg->fc_memget_cnt--; 605 } 606 mp = (MATCHMAP *)bp; 607 608 #ifdef SFCT_SUPPORT 609 if (mp->tag >= MEM_FCTSEG) { 610 emlxs_fct_stmf_free(hba, mp); 611 } 612 #endif /* SFCT_SUPPORT */ 613 614 /* Free the DMA memory itself */ 615 bzero(buf_info, sizeof (MBUF_INFO)); 616 buf_info->size = mp->size; 617 buf_info->virt = mp->virt; 618 buf_info->phys = mp->phys; 619 buf_info->dma_handle = mp->dma_handle; 620 buf_info->data_handle = mp->data_handle; 621 buf_info->flags = seg->fc_memflag; 622 emlxs_mem_free(hba, buf_info); 623 624 /* Free the handle */ 625 bzero(buf_info, sizeof (MBUF_INFO)); 626 buf_info->size = sizeof (MATCHMAP); 627 buf_info->virt = (void *)mp; 628 emlxs_mem_free(hba, buf_info); 629 630 seg->fc_numblks--; 631 seg->fc_total_memsize -= (seg->fc_memsize + sizeof (MATCHMAP)); 632 633 count--; 634 } 635 636 return; 637 638 vmem_pool: 639 640 /* Free memory associated with all buffers on get buffer pool */ 641 while (count && ((bp = seg->fc_memget_ptr) != NULL)) { 642 /* Remove buffer from list */ 643 if (seg->fc_memget_end == bp) { 644 seg->fc_memget_ptr = NULL; 645 seg->fc_memget_end = NULL; 646 seg->fc_memget_cnt = 0; 647 648 } else { 649 seg->fc_memget_ptr = *((uint8_t **)bp); 650 seg->fc_memget_cnt--; 651 } 652 653 /* Free the Virtual memory itself */ 654 bzero(buf_info, sizeof (MBUF_INFO)); 655 buf_info->size = seg->fc_memsize; 656 buf_info->virt = bp; 657 emlxs_mem_free(hba, buf_info); 658 659 seg->fc_numblks--; 660 seg->fc_total_memsize -= seg->fc_memsize; 661 662 count--; 663 } 664 665 return; 666 667 } /* emlxs_mem_pool_free() */ 668 669 670 extern uint32_t 671 emlxs_mem_pool_create(emlxs_hba_t *hba, MEMSEG *seg) 672 { 673 emlxs_config_t *cfg = &CFG; 674 675 mutex_enter(&EMLXS_MEMGET_LOCK); 676 mutex_enter(&EMLXS_MEMPUT_LOCK); 677 678 if (seg->fc_memsize == 0) { 679 mutex_exit(&EMLXS_MEMPUT_LOCK); 680 mutex_exit(&EMLXS_MEMGET_LOCK); 681 682 return (0); 683 } 684 685 /* Sanity check hi > lo */ 686 if (seg->fc_lo_water > seg->fc_hi_water) { 687 seg->fc_hi_water = seg->fc_lo_water; 688 } 689 690 /* If dynamic pools are disabled, then force pool to max level */ 691 if (cfg[CFG_MEM_DYNAMIC].current == 0) { 692 seg->fc_lo_water = seg->fc_hi_water; 693 } 694 695 /* If pool is dynamic, then fc_step must be >0 */ 696 /* Otherwise, fc_step must be 0 */ 697 if (seg->fc_lo_water != seg->fc_hi_water) { 698 seg->fc_memflag |= FC_MEMSEG_DYNAMIC; 699 700 if (seg->fc_step == 0) { 701 seg->fc_step = 1; 702 } 703 } else { 704 seg->fc_step = 0; 705 } 706 707 seg->fc_numblks = 0; 708 seg->fc_total_memsize = 0; 709 seg->fc_low = 0; 710 711 (void) emlxs_mem_pool_alloc(hba, seg, seg->fc_lo_water); 712 713 seg->fc_memflag |= (FC_MEMSEG_PUT_ENABLED|FC_MEMSEG_GET_ENABLED); 714 715 mutex_exit(&EMLXS_MEMPUT_LOCK); 716 mutex_exit(&EMLXS_MEMGET_LOCK); 717 718 return (seg->fc_numblks); 719 720 } /* emlxs_mem_pool_create() */ 721 722 723 extern void 724 emlxs_mem_pool_destroy(emlxs_hba_t *hba, MEMSEG *seg) 725 { 726 emlxs_port_t *port = &PPORT; 727 728 mutex_enter(&EMLXS_MEMGET_LOCK); 729 mutex_enter(&EMLXS_MEMPUT_LOCK); 730 731 if (seg->fc_memsize == 0) { 732 mutex_exit(&EMLXS_MEMPUT_LOCK); 733 mutex_exit(&EMLXS_MEMGET_LOCK); 734 return; 735 } 736 737 /* Leave FC_MEMSEG_PUT_ENABLED set for now */ 738 seg->fc_memflag &= ~FC_MEMSEG_GET_ENABLED; 739 740 /* Try to free all objects */ 741 emlxs_mem_pool_free(hba, seg, seg->fc_numblks); 742 743 if (seg->fc_numblks) { 744 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg, 745 "mem_pool_destroy: %s leak detected: " 746 "%d objects still allocated.", 747 seg->fc_label, seg->fc_numblks); 748 } else { 749 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg, 750 "mem_pool_destroy: %s destroyed.", 751 seg->fc_label); 752 753 /* Clear all */ 754 bzero(seg, sizeof (MEMSEG)); 755 } 756 757 mutex_exit(&EMLXS_MEMPUT_LOCK); 758 mutex_exit(&EMLXS_MEMGET_LOCK); 759 760 return; 761 762 } /* emlxs_mem_pool_destroy() */ 763 764 765 extern void 766 emlxs_mem_pool_clean(emlxs_hba_t *hba, MEMSEG *seg) 767 { 768 emlxs_port_t *port = &PPORT; 769 uint32_t clean_count; 770 uint32_t free_count; 771 uint32_t free_pad; 772 773 mutex_enter(&EMLXS_MEMGET_LOCK); 774 mutex_enter(&EMLXS_MEMPUT_LOCK); 775 776 if (!(seg->fc_memflag & FC_MEMSEG_DYNAMIC)) { 777 mutex_exit(&EMLXS_MEMPUT_LOCK); 778 mutex_exit(&EMLXS_MEMGET_LOCK); 779 return; 780 } 781 782 if (!(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) { 783 goto done; 784 } 785 786 #ifdef EMLXS_POOL_DEBUG 787 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg, 788 "%s clean: n=%d s=%d f=%x l=%d,%d,%d " 789 "f=%d:%d", 790 seg->fc_label, seg->fc_numblks, 791 seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water, 792 seg->fc_hi_water, seg->fc_step, seg->fc_memget_cnt, 793 seg->fc_low); 794 #endif /* EMLXS_POOL_DEBUG */ 795 796 /* Calculatge current free count */ 797 free_count = (seg->fc_memget_cnt + seg->fc_memput_cnt); 798 799 /* Reset fc_low value to current free count */ 800 clean_count = seg->fc_low; 801 seg->fc_low = free_count; 802 803 /* Return if pool is already at lo water mark */ 804 if (seg->fc_numblks <= seg->fc_lo_water) { 805 goto done; 806 } 807 808 /* Return if there is nothing to clean */ 809 if ((free_count == 0) || 810 (clean_count <= 1)) { 811 goto done; 812 } 813 814 /* Calculate a 3 percent free pad count (1 being minimum) */ 815 if (seg->fc_numblks > 66) { 816 free_pad = ((seg->fc_numblks * 3)/100); 817 } else { 818 free_pad = 1; 819 } 820 821 /* Return if fc_low is below pool free pad */ 822 if (clean_count <= free_pad) { 823 goto done; 824 } 825 826 clean_count -= free_pad; 827 828 /* clean_count can't exceed minimum pool levels */ 829 if (clean_count > (seg->fc_numblks - seg->fc_lo_water)) { 830 clean_count = (seg->fc_numblks - seg->fc_lo_water); 831 } 832 833 emlxs_mem_pool_free(hba, seg, clean_count); 834 835 done: 836 if (seg->fc_last != seg->fc_numblks) { 837 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg, 838 "%s update: n=%d->%d s=%d f=%x l=%d,%d,%d " 839 "f=%d:%d", 840 seg->fc_label, seg->fc_last, seg->fc_numblks, 841 seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water, 842 seg->fc_hi_water, seg->fc_step, seg->fc_memget_cnt, 843 seg->fc_low); 844 845 seg->fc_last = seg->fc_numblks; 846 } 847 848 mutex_exit(&EMLXS_MEMPUT_LOCK); 849 mutex_exit(&EMLXS_MEMGET_LOCK); 850 return; 851 852 } /* emlxs_mem_pool_clean() */ 853 854 855 extern void * 856 emlxs_mem_pool_get(emlxs_hba_t *hba, MEMSEG *seg) 857 { 858 emlxs_port_t *port = &PPORT; 859 void *bp = NULL; 860 MATCHMAP *mp; 861 uint32_t free_count; 862 863 mutex_enter(&EMLXS_MEMGET_LOCK); 864 865 /* Check if memory pool is GET enabled */ 866 if (!(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) { 867 mutex_exit(&EMLXS_MEMGET_LOCK); 868 return (NULL); 869 } 870 871 /* If no entries on memget list, then check memput list */ 872 if (!seg->fc_memget_ptr) { 873 mutex_enter(&EMLXS_MEMPUT_LOCK); 874 if (seg->fc_memput_ptr) { 875 /* 876 * Move list from memput to memget 877 */ 878 seg->fc_memget_ptr = seg->fc_memput_ptr; 879 seg->fc_memget_end = seg->fc_memput_end; 880 seg->fc_memget_cnt = seg->fc_memput_cnt; 881 seg->fc_memput_ptr = NULL; 882 seg->fc_memput_end = NULL; 883 seg->fc_memput_cnt = 0; 884 } 885 mutex_exit(&EMLXS_MEMPUT_LOCK); 886 } 887 888 /* If no entries on memget list, then pool is empty */ 889 /* Try to allocate more if pool is dynamic */ 890 if (!seg->fc_memget_ptr && 891 (seg->fc_memflag & FC_MEMSEG_DYNAMIC)) { 892 (void) emlxs_mem_pool_alloc(hba, seg, seg->fc_step); 893 seg->fc_low = 0; 894 } 895 896 /* If no entries on memget list, then pool is empty */ 897 if (!seg->fc_memget_ptr) { 898 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_alloc_failed_msg, 899 "%s empty.", seg->fc_label); 900 901 mutex_exit(&EMLXS_MEMGET_LOCK); 902 return (NULL); 903 } 904 905 /* Remove an entry from the get list */ 906 bp = seg->fc_memget_ptr; 907 908 if (seg->fc_memget_end == bp) { 909 seg->fc_memget_ptr = NULL; 910 seg->fc_memget_end = NULL; 911 seg->fc_memget_cnt = 0; 912 913 } else { 914 seg->fc_memget_ptr = *((uint8_t **)bp); 915 seg->fc_memget_cnt--; 916 } 917 918 /* Initialize buffer */ 919 if (!(seg->fc_memflag & FC_MBUF_DMA)) { 920 bzero(bp, seg->fc_memsize); 921 } else { 922 mp = (MATCHMAP *)bp; 923 mp->fc_mptr = NULL; 924 mp->flag |= MAP_POOL_ALLOCATED; 925 } 926 927 /* Set fc_low if pool is dynamic */ 928 if (seg->fc_memflag & FC_MEMSEG_DYNAMIC) { 929 free_count = (seg->fc_memget_cnt + seg->fc_memput_cnt); 930 if (free_count < seg->fc_low) { 931 seg->fc_low = free_count; 932 } 933 } 934 935 mutex_exit(&EMLXS_MEMGET_LOCK); 936 937 return (bp); 938 939 } /* emlxs_mem_pool_get() */ 940 941 942 extern void 943 emlxs_mem_pool_put(emlxs_hba_t *hba, MEMSEG *seg, void *bp) 944 { 945 emlxs_port_t *port = &PPORT; 946 MATCHMAP *mp; 947 948 /* Free the pool object */ 949 mutex_enter(&EMLXS_MEMPUT_LOCK); 950 951 /* Check if memory pool is PUT enabled */ 952 if (!(seg->fc_memflag & FC_MEMSEG_PUT_ENABLED)) { 953 mutex_exit(&EMLXS_MEMPUT_LOCK); 954 return; 955 } 956 957 /* Check if buffer was just freed */ 958 if ((seg->fc_memput_end == bp) || (seg->fc_memget_end == bp)) { 959 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg, 960 "%s: Freeing free object: bp=%p", seg->fc_label, bp); 961 962 mutex_exit(&EMLXS_MEMPUT_LOCK); 963 return; 964 } 965 966 /* Validate DMA buffer */ 967 if (seg->fc_memflag & FC_MBUF_DMA) { 968 mp = (MATCHMAP *)bp; 969 970 if (!(mp->flag & MAP_POOL_ALLOCATED) || 971 (mp->segment != seg)) { 972 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg, 973 "mem_pool_put: %s invalid: mp=%p " \ 974 "tag=0x%x flag=%x", seg->fc_label, 975 mp, mp->tag, mp->flag); 976 977 EMLXS_STATE_CHANGE(hba, FC_ERROR); 978 979 mutex_exit(&EMLXS_MEMPUT_LOCK); 980 981 emlxs_thread_spawn(hba, emlxs_shutdown_thread, 982 NULL, NULL); 983 984 return; 985 } 986 } 987 988 /* Release buffer to the end of the memput list */ 989 if (seg->fc_memput_end == NULL) { 990 seg->fc_memput_ptr = bp; 991 seg->fc_memput_cnt = 1; 992 } else { 993 *((void **)(seg->fc_memput_end)) = bp; 994 seg->fc_memput_cnt++; 995 } 996 seg->fc_memput_end = bp; 997 *((void **)(bp)) = NULL; 998 999 mutex_exit(&EMLXS_MEMPUT_LOCK); 1000 1001 /* This is for late PUT's after an initial */ 1002 /* emlxs_mem_pool_destroy call */ 1003 if ((seg->fc_memflag & FC_MEMSEG_PUT_ENABLED) && 1004 !(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) { 1005 emlxs_mem_pool_destroy(hba, seg); 1006 } 1007 1008 return; 1009 1010 } /* emlxs_mem_pool_put() */ 1011 1012 1013 extern MATCHMAP * 1014 emlxs_mem_buf_alloc(emlxs_hba_t *hba, uint32_t size) 1015 { 1016 emlxs_port_t *port = &PPORT; 1017 uint8_t *bp = NULL; 1018 MATCHMAP *mp = NULL; 1019 MBUF_INFO *buf_info; 1020 MBUF_INFO bufinfo; 1021 1022 buf_info = &bufinfo; 1023 1024 bzero(buf_info, sizeof (MBUF_INFO)); 1025 buf_info->size = sizeof (MATCHMAP); 1026 buf_info->align = sizeof (void *); 1027 1028 (void) emlxs_mem_alloc(hba, buf_info); 1029 if (buf_info->virt == NULL) { 1030 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 1031 "MEM_BUF_ALLOC buffer."); 1032 1033 return (NULL); 1034 } 1035 1036 mp = (MATCHMAP *)buf_info->virt; 1037 bzero(mp, sizeof (MATCHMAP)); 1038 1039 bzero(buf_info, sizeof (MBUF_INFO)); 1040 buf_info->size = size; 1041 buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32; 1042 buf_info->align = 32; 1043 1044 (void) emlxs_mem_alloc(hba, buf_info); 1045 if (buf_info->virt == NULL) { 1046 1047 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 1048 "MEM_BUF_ALLOC DMA buffer."); 1049 1050 /* Free the mp object */ 1051 bzero(buf_info, sizeof (MBUF_INFO)); 1052 buf_info->size = sizeof (MATCHMAP); 1053 buf_info->virt = (void *)mp; 1054 emlxs_mem_free(hba, buf_info); 1055 1056 return (0); 1057 } 1058 bp = (uint8_t *)buf_info->virt; 1059 bzero(bp, buf_info->size); 1060 1061 mp->virt = buf_info->virt; 1062 mp->phys = buf_info->phys; 1063 mp->size = buf_info->size; 1064 mp->dma_handle = buf_info->dma_handle; 1065 mp->data_handle = buf_info->data_handle; 1066 mp->tag = MEM_BUF; 1067 mp->flag |= MAP_BUF_ALLOCATED; 1068 1069 return (mp); 1070 1071 } /* emlxs_mem_buf_alloc() */ 1072 1073 1074 extern void 1075 emlxs_mem_buf_free(emlxs_hba_t *hba, MATCHMAP *mp) 1076 { 1077 MBUF_INFO bufinfo; 1078 MBUF_INFO *buf_info; 1079 1080 buf_info = &bufinfo; 1081 1082 if (!(mp->flag & MAP_BUF_ALLOCATED)) { 1083 return; 1084 } 1085 1086 bzero(buf_info, sizeof (MBUF_INFO)); 1087 buf_info->size = mp->size; 1088 buf_info->virt = mp->virt; 1089 buf_info->phys = mp->phys; 1090 buf_info->dma_handle = mp->dma_handle; 1091 buf_info->data_handle = mp->data_handle; 1092 buf_info->flags = FC_MBUF_DMA; 1093 emlxs_mem_free(hba, buf_info); 1094 1095 bzero(buf_info, sizeof (MBUF_INFO)); 1096 buf_info->size = sizeof (MATCHMAP); 1097 buf_info->virt = (void *)mp; 1098 emlxs_mem_free(hba, buf_info); 1099 1100 return; 1101 1102 } /* emlxs_mem_buf_free() */ 1103 1104 1105 extern void * 1106 emlxs_mem_get(emlxs_hba_t *hba, uint32_t seg_id) 1107 { 1108 emlxs_port_t *port = &PPORT; 1109 void *bp; 1110 MAILBOXQ *mbq; 1111 IOCBQ *iocbq; 1112 NODELIST *node; 1113 MEMSEG *seg; 1114 1115 if (seg_id >= FC_MAX_SEG) { 1116 1117 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg, 1118 "mem_get: Invalid segment id = %d", 1119 seg_id); 1120 1121 return (NULL); 1122 } 1123 seg = &hba->memseg[seg_id]; 1124 1125 /* Alloc a buffer from the pool */ 1126 bp = emlxs_mem_pool_get(hba, seg); 1127 1128 if (bp) { 1129 switch (seg_id) { 1130 case MEM_MBOX: 1131 mbq = (MAILBOXQ *)bp; 1132 mbq->flag |= MBQ_POOL_ALLOCATED; 1133 break; 1134 1135 case MEM_IOCB: 1136 iocbq = (IOCBQ *)bp; 1137 iocbq->flag |= IOCB_POOL_ALLOCATED; 1138 break; 1139 1140 case MEM_NLP: 1141 node = (NODELIST *)bp; 1142 node->flag |= NODE_POOL_ALLOCATED; 1143 break; 1144 } 1145 } 1146 1147 return (bp); 1148 1149 } /* emlxs_mem_get() */ 1150 1151 1152 extern void 1153 emlxs_mem_put(emlxs_hba_t *hba, uint32_t seg_id, void *bp) 1154 { 1155 emlxs_port_t *port = &PPORT; 1156 MAILBOXQ *mbq; 1157 IOCBQ *iocbq; 1158 NODELIST *node; 1159 MEMSEG *seg; 1160 MATCHMAP *mp; 1161 1162 if (seg_id >= FC_MAX_SEG) { 1163 1164 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg, 1165 "mem_put: Invalid segment id = %d: bp=%p", 1166 seg_id, bp); 1167 1168 return; 1169 } 1170 seg = &hba->memseg[seg_id]; 1171 1172 /* Verify buffer */ 1173 switch (seg_id) { 1174 case MEM_MBOX: 1175 mbq = (MAILBOXQ *)bp; 1176 1177 if (!(mbq->flag & MBQ_POOL_ALLOCATED)) { 1178 return; 1179 } 1180 break; 1181 1182 case MEM_IOCB: 1183 iocbq = (IOCBQ *)bp; 1184 1185 if (!(iocbq->flag & IOCB_POOL_ALLOCATED)) { 1186 return; 1187 } 1188 1189 /* Any IOCBQ with a packet attached did not come */ 1190 /* from our pool */ 1191 if (iocbq->sbp) { 1192 return; 1193 } 1194 break; 1195 1196 case MEM_NLP: 1197 node = (NODELIST *)bp; 1198 1199 if (!(node->flag & NODE_POOL_ALLOCATED)) { 1200 return; 1201 } 1202 break; 1203 1204 default: 1205 mp = (MATCHMAP *)bp; 1206 1207 if (mp->flag & MAP_BUF_ALLOCATED) { 1208 emlxs_mem_buf_free(hba, mp); 1209 return; 1210 } 1211 1212 if (mp->flag & MAP_TABLE_ALLOCATED) { 1213 return; 1214 } 1215 1216 if (!(mp->flag & MAP_POOL_ALLOCATED)) { 1217 return; 1218 } 1219 break; 1220 } 1221 1222 /* Free a buffer to the pool */ 1223 emlxs_mem_pool_put(hba, seg, bp); 1224 1225 return; 1226 1227 } /* emlxs_mem_put() */ 1228 1229 1230 /* 1231 * Look up the virtual address given a mapped address 1232 */ 1233 /* SLI3 */ 1234 extern MATCHMAP * 1235 emlxs_mem_get_vaddr(emlxs_hba_t *hba, RING *rp, uint64_t mapbp) 1236 { 1237 emlxs_port_t *port = &PPORT; 1238 MATCHMAP *prev; 1239 MATCHMAP *mp; 1240 1241 if (rp->ringno == hba->channel_els) { 1242 mp = (MATCHMAP *)rp->fc_mpoff; 1243 prev = 0; 1244 1245 while (mp) { 1246 if (mp->phys == mapbp) { 1247 if (prev == 0) { 1248 rp->fc_mpoff = mp->fc_mptr; 1249 } else { 1250 prev->fc_mptr = mp->fc_mptr; 1251 } 1252 1253 if (rp->fc_mpon == mp) { 1254 rp->fc_mpon = (void *)prev; 1255 } 1256 1257 mp->fc_mptr = NULL; 1258 1259 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size, 1260 DDI_DMA_SYNC_FORKERNEL); 1261 1262 HBASTATS.ElsUbPosted--; 1263 1264 return (mp); 1265 } 1266 1267 prev = mp; 1268 mp = (MATCHMAP *)mp->fc_mptr; 1269 } 1270 1271 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg, 1272 "ELS Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p", 1273 mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon); 1274 1275 } else if (rp->ringno == hba->channel_ct) { 1276 1277 mp = (MATCHMAP *)rp->fc_mpoff; 1278 prev = 0; 1279 1280 while (mp) { 1281 if (mp->phys == mapbp) { 1282 if (prev == 0) { 1283 rp->fc_mpoff = mp->fc_mptr; 1284 } else { 1285 prev->fc_mptr = mp->fc_mptr; 1286 } 1287 1288 if (rp->fc_mpon == mp) { 1289 rp->fc_mpon = (void *)prev; 1290 } 1291 1292 mp->fc_mptr = NULL; 1293 1294 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size, 1295 DDI_DMA_SYNC_FORKERNEL); 1296 1297 HBASTATS.CtUbPosted--; 1298 1299 return (mp); 1300 } 1301 1302 prev = mp; 1303 mp = (MATCHMAP *)mp->fc_mptr; 1304 } 1305 1306 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg, 1307 "CT Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p", 1308 mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon); 1309 1310 } else if (rp->ringno == hba->channel_ip) { 1311 1312 mp = (MATCHMAP *)rp->fc_mpoff; 1313 prev = 0; 1314 1315 while (mp) { 1316 if (mp->phys == mapbp) { 1317 if (prev == 0) { 1318 rp->fc_mpoff = mp->fc_mptr; 1319 } else { 1320 prev->fc_mptr = mp->fc_mptr; 1321 } 1322 1323 if (rp->fc_mpon == mp) { 1324 rp->fc_mpon = (void *)prev; 1325 } 1326 1327 mp->fc_mptr = NULL; 1328 1329 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size, 1330 DDI_DMA_SYNC_FORKERNEL); 1331 1332 HBASTATS.IpUbPosted--; 1333 1334 return (mp); 1335 } 1336 1337 prev = mp; 1338 mp = (MATCHMAP *)mp->fc_mptr; 1339 } 1340 1341 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg, 1342 "IP Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p", 1343 mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon); 1344 1345 #ifdef SFCT_SUPPORT 1346 } else if (rp->ringno == hba->CHANNEL_FCT) { 1347 mp = (MATCHMAP *)rp->fc_mpoff; 1348 prev = 0; 1349 1350 while (mp) { 1351 if (mp->phys == mapbp) { 1352 if (prev == 0) { 1353 rp->fc_mpoff = mp->fc_mptr; 1354 } else { 1355 prev->fc_mptr = mp->fc_mptr; 1356 } 1357 1358 if (rp->fc_mpon == mp) { 1359 rp->fc_mpon = (void *)prev; 1360 } 1361 1362 mp->fc_mptr = NULL; 1363 1364 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size, 1365 DDI_DMA_SYNC_FORKERNEL); 1366 1367 HBASTATS.FctUbPosted--; 1368 1369 return (mp); 1370 } 1371 1372 prev = mp; 1373 mp = (MATCHMAP *)mp->fc_mptr; 1374 } 1375 1376 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg, 1377 "FCT Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p", 1378 mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon); 1379 1380 #endif /* SFCT_SUPPORT */ 1381 } 1382 1383 return (0); 1384 1385 } /* emlxs_mem_get_vaddr() */ 1386 1387 1388 /* 1389 * Given a virtual address bp, generate the physical mapped address and 1390 * place it where addr points to. Save the address pair for lookup later. 1391 */ 1392 /* SLI3 */ 1393 extern void 1394 emlxs_mem_map_vaddr(emlxs_hba_t *hba, RING *rp, MATCHMAP *mp, 1395 uint32_t *haddr, uint32_t *laddr) 1396 { 1397 if (rp->ringno == hba->channel_els) { 1398 /* 1399 * Update slot fc_mpon points to then bump it 1400 * fc_mpoff is pointer head of the list. 1401 * fc_mpon is pointer tail of the list. 1402 */ 1403 mp->fc_mptr = NULL; 1404 if (rp->fc_mpoff == 0) { 1405 rp->fc_mpoff = (void *)mp; 1406 rp->fc_mpon = (void *)mp; 1407 } else { 1408 ((MATCHMAP *)(rp->fc_mpon))->fc_mptr = 1409 (void *)mp; 1410 rp->fc_mpon = (void *)mp; 1411 } 1412 1413 if (hba->flag & FC_SLIM2_MODE) { 1414 1415 /* return mapped address */ 1416 *haddr = PADDR_HI(mp->phys); 1417 /* return mapped address */ 1418 *laddr = PADDR_LO(mp->phys); 1419 } else { 1420 /* return mapped address */ 1421 *laddr = PADDR_LO(mp->phys); 1422 } 1423 1424 HBASTATS.ElsUbPosted++; 1425 1426 } else if (rp->ringno == hba->channel_ct) { 1427 /* 1428 * Update slot fc_mpon points to then bump it 1429 * fc_mpoff is pointer head of the list. 1430 * fc_mpon is pointer tail of the list. 1431 */ 1432 mp->fc_mptr = NULL; 1433 if (rp->fc_mpoff == 0) { 1434 rp->fc_mpoff = (void *)mp; 1435 rp->fc_mpon = (void *)mp; 1436 } else { 1437 ((MATCHMAP *)(rp->fc_mpon))->fc_mptr = 1438 (void *)mp; 1439 rp->fc_mpon = (void *)mp; 1440 } 1441 1442 if (hba->flag & FC_SLIM2_MODE) { 1443 /* return mapped address */ 1444 *haddr = PADDR_HI(mp->phys); 1445 /* return mapped address */ 1446 *laddr = PADDR_LO(mp->phys); 1447 } else { 1448 /* return mapped address */ 1449 *laddr = PADDR_LO(mp->phys); 1450 } 1451 1452 HBASTATS.CtUbPosted++; 1453 1454 1455 } else if (rp->ringno == hba->channel_ip) { 1456 /* 1457 * Update slot fc_mpon points to then bump it 1458 * fc_mpoff is pointer head of the list. 1459 * fc_mpon is pointer tail of the list. 1460 */ 1461 mp->fc_mptr = NULL; 1462 if (rp->fc_mpoff == 0) { 1463 rp->fc_mpoff = (void *)mp; 1464 rp->fc_mpon = (void *)mp; 1465 } else { 1466 ((MATCHMAP *)(rp->fc_mpon))->fc_mptr = 1467 (void *)mp; 1468 rp->fc_mpon = (void *)mp; 1469 } 1470 1471 if (hba->flag & FC_SLIM2_MODE) { 1472 /* return mapped address */ 1473 *haddr = PADDR_HI(mp->phys); 1474 *laddr = PADDR_LO(mp->phys); 1475 } else { 1476 *laddr = PADDR_LO(mp->phys); 1477 } 1478 1479 HBASTATS.IpUbPosted++; 1480 1481 1482 #ifdef SFCT_SUPPORT 1483 } else if (rp->ringno == hba->CHANNEL_FCT) { 1484 /* 1485 * Update slot fc_mpon points to then bump it 1486 * fc_mpoff is pointer head of the list. 1487 * fc_mpon is pointer tail of the list. 1488 */ 1489 mp->fc_mptr = NULL; 1490 if (rp->fc_mpoff == 0) { 1491 rp->fc_mpoff = (void *)mp; 1492 rp->fc_mpon = (void *)mp; 1493 } else { 1494 ((MATCHMAP *)(rp->fc_mpon))->fc_mptr = 1495 (void *)mp; 1496 rp->fc_mpon = (void *)mp; 1497 } 1498 1499 if (hba->flag & FC_SLIM2_MODE) { 1500 /* return mapped address */ 1501 *haddr = PADDR_HI(mp->phys); 1502 /* return mapped address */ 1503 *laddr = PADDR_LO(mp->phys); 1504 } else { 1505 /* return mapped address */ 1506 *laddr = PADDR_LO(mp->phys); 1507 } 1508 1509 HBASTATS.FctUbPosted++; 1510 1511 #endif /* SFCT_SUPPORT */ 1512 } 1513 } /* emlxs_mem_map_vaddr() */ 1514 1515 1516 /* SLI3 */ 1517 uint32_t 1518 emlxs_hbq_alloc(emlxs_hba_t *hba, uint32_t hbq_id) 1519 { 1520 emlxs_port_t *port = &PPORT; 1521 HBQ_INIT_t *hbq; 1522 MBUF_INFO *buf_info; 1523 MBUF_INFO bufinfo; 1524 1525 hbq = &hba->sli.sli3.hbq_table[hbq_id]; 1526 1527 if (hbq->HBQ_host_buf.virt == 0) { 1528 buf_info = &bufinfo; 1529 1530 /* Get the system's page size in a DDI-compliant way. */ 1531 bzero(buf_info, sizeof (MBUF_INFO)); 1532 buf_info->size = hbq->HBQ_numEntries * sizeof (HBQE_t); 1533 buf_info->flags = FC_MBUF_DMA; 1534 buf_info->align = 4096; 1535 1536 (void) emlxs_mem_alloc(hba, buf_info); 1537 1538 if (buf_info->virt == NULL) { 1539 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg, 1540 "Unable to alloc HBQ."); 1541 return (ENOMEM); 1542 } 1543 1544 hbq->HBQ_host_buf.virt = buf_info->virt; 1545 hbq->HBQ_host_buf.phys = buf_info->phys; 1546 hbq->HBQ_host_buf.data_handle = buf_info->data_handle; 1547 hbq->HBQ_host_buf.dma_handle = buf_info->dma_handle; 1548 hbq->HBQ_host_buf.size = buf_info->size; 1549 hbq->HBQ_host_buf.tag = hbq_id; 1550 1551 bzero((char *)hbq->HBQ_host_buf.virt, buf_info->size); 1552 } 1553 1554 return (0); 1555 1556 } /* emlxs_hbq_alloc() */ 1557