1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Emulex. All rights reserved. 24 * Use is subject to License terms. 25 */ 26 27 28 #include "emlxs.h" 29 30 EMLXS_MSG_DEF(EMLXS_MEM_C); 31 32 33 #ifdef SLI3_SUPPORT 34 static uint32_t emlxs_hbq_alloc(emlxs_hba_t *hba, uint32_t hbq_id); 35 static void emlxs_hbq_free_all(emlxs_hba_t *hba, uint32_t hbq_id); 36 #endif /* SLI3_SUPPORT */ 37 38 /* 39 * emlxs_mem_alloc_buffer 40 * 41 * This routine will allocate iocb/data buffer 42 * space and setup the buffers for all rings on 43 * the specified board to use. The data buffers 44 * can be posted to the ring with the 45 * fc_post_buffer routine. The iocb buffers 46 * are used to make a temp copy of the response 47 * ring iocbs. Returns 0 if not enough memory, 48 * Returns 1 if successful. 49 */ 50 51 52 extern int32_t 53 emlxs_mem_alloc_buffer(emlxs_hba_t *hba) 54 { 55 emlxs_port_t *port = &PPORT; 56 emlxs_config_t *cfg; 57 MBUF_INFO *buf_info; 58 uint8_t *bp; 59 uint8_t *oldbp; 60 MEMSEG *mp; 61 MATCHMAP *matp; 62 NODELIST *ndlp; 63 IOCBQ *iocbq; 64 MAILBOXQ *mbox; 65 MBUF_INFO bufinfo; 66 int32_t i; 67 RING *fcp_rp; 68 RING *ip_rp; 69 RING *els_rp; 70 RING *ct_rp; 71 #ifdef EMLXS_SPARC 72 int32_t j; 73 ULP_BDE64 *v_bpl; 74 ULP_BDE64 *p_bpl; 75 #endif /* EMLXS_SPARC */ 76 uint32_t total_iotags; 77 78 buf_info = &bufinfo; 79 cfg = &CFG; 80 81 mutex_enter(&EMLXS_MEMGET_LOCK); 82 83 /* 84 * Allocate and Initialize MEM_NLP (0) 85 */ 86 mp = &hba->memseg[MEM_NLP]; 87 mp->fc_memsize = sizeof (NODELIST); 88 mp->fc_numblks = (int16_t)hba->max_nodes + 2; 89 mp->fc_total_memsize = mp->fc_memsize * mp->fc_numblks; 90 mp->fc_memstart_virt = kmem_zalloc(mp->fc_total_memsize, KM_NOSLEEP); 91 mp->fc_memget_cnt = mp->fc_numblks; 92 mp->fc_memput_cnt = 0; 93 mp->fc_memstart_phys = 0; 94 mp->fc_memflag = 0; 95 mp->fc_lowmem = 0; 96 mp->fc_mem_dma_handle = 0; 97 mp->fc_mem_dat_handle = 0; 98 mp->fc_memget_ptr = 0; 99 mp->fc_memget_end = 0; 100 mp->fc_memput_ptr = 0; 101 mp->fc_memput_end = 0; 102 103 if (mp->fc_memstart_virt == NULL) { 104 mutex_exit(&EMLXS_MEMGET_LOCK); 105 106 (void) emlxs_mem_free_buffer(hba); 107 108 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 109 "NLP memory pool."); 110 111 return (0); 112 } 113 bzero(mp->fc_memstart_virt, mp->fc_memsize); 114 ndlp = (NODELIST *) mp->fc_memstart_virt; 115 116 /* 117 * Link buffer into beginning of list. The first pointer in each 118 * buffer is a forward pointer to the next buffer. 119 */ 120 for (i = 0; i < mp->fc_numblks; i++, ndlp++) { 121 ndlp->flag |= NODE_POOL_ALLOCATED; 122 123 oldbp = mp->fc_memget_ptr; 124 bp = (uint8_t *)ndlp; 125 if (oldbp == NULL) { 126 mp->fc_memget_end = bp; 127 } 128 mp->fc_memget_ptr = bp; 129 *((uint8_t **)bp) = oldbp; 130 } 131 132 133 /* 134 * Allocate and Initialize MEM_IOCB (1) 135 */ 136 mp = &hba->memseg[MEM_IOCB]; 137 mp->fc_memsize = sizeof (IOCBQ); 138 mp->fc_numblks = (uint16_t)cfg[CFG_NUM_IOCBS].current; 139 mp->fc_total_memsize = mp->fc_memsize * mp->fc_numblks; 140 mp->fc_memstart_virt = kmem_zalloc(mp->fc_total_memsize, KM_NOSLEEP); 141 mp->fc_lowmem = (mp->fc_numblks >> 4); 142 mp->fc_memget_cnt = mp->fc_numblks; 143 mp->fc_memput_cnt = 0; 144 mp->fc_memflag = 0; 145 mp->fc_memstart_phys = 0; 146 mp->fc_mem_dma_handle = 0; 147 mp->fc_mem_dat_handle = 0; 148 mp->fc_memget_ptr = 0; 149 mp->fc_memget_end = 0; 150 mp->fc_memput_ptr = 0; 151 mp->fc_memput_end = 0; 152 153 if (mp->fc_memstart_virt == NULL) { 154 mutex_exit(&EMLXS_MEMGET_LOCK); 155 156 (void) emlxs_mem_free_buffer(hba); 157 158 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 159 "IOCB memory pool."); 160 161 return (0); 162 } 163 bzero(mp->fc_memstart_virt, mp->fc_memsize); 164 iocbq = (IOCBQ *) mp->fc_memstart_virt; 165 166 /* 167 * Link buffer into beginning of list. The first pointer in each 168 * buffer is a forward pointer to the next buffer. 169 */ 170 for (i = 0; i < mp->fc_numblks; i++, iocbq++) { 171 iocbq->flag |= IOCB_POOL_ALLOCATED; 172 173 oldbp = mp->fc_memget_ptr; 174 bp = (uint8_t *)iocbq; 175 if (oldbp == NULL) { 176 mp->fc_memget_end = bp; 177 } 178 mp->fc_memget_ptr = bp; 179 *((uint8_t **)bp) = oldbp; 180 } 181 182 /* 183 * Allocate and Initialize MEM_MBOX (2) 184 */ 185 mp = &hba->memseg[MEM_MBOX]; 186 mp->fc_memsize = sizeof (MAILBOXQ); 187 mp->fc_numblks = (int16_t)hba->max_nodes + 32; 188 mp->fc_total_memsize = mp->fc_memsize * mp->fc_numblks; 189 mp->fc_memstart_virt = kmem_zalloc(mp->fc_total_memsize, KM_NOSLEEP); 190 mp->fc_lowmem = (mp->fc_numblks >> 3); 191 mp->fc_memget_cnt = mp->fc_numblks; 192 mp->fc_memput_cnt = 0; 193 mp->fc_memflag = 0; 194 mp->fc_memstart_phys = 0; 195 mp->fc_mem_dma_handle = 0; 196 mp->fc_mem_dat_handle = 0; 197 mp->fc_memget_ptr = 0; 198 mp->fc_memget_end = 0; 199 mp->fc_memput_ptr = 0; 200 mp->fc_memput_end = 0; 201 202 if (mp->fc_memstart_virt == NULL) { 203 mutex_exit(&EMLXS_MEMGET_LOCK); 204 205 (void) emlxs_mem_free_buffer(hba); 206 207 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 208 "MBOX memory pool."); 209 210 return (0); 211 } 212 bzero(mp->fc_memstart_virt, mp->fc_memsize); 213 mbox = (MAILBOXQ *) mp->fc_memstart_virt; 214 215 /* 216 * Link buffer into beginning of list. The first pointer in each 217 * buffer is a forward pointer to the next buffer. 218 */ 219 for (i = 0; i < mp->fc_numblks; i++, mbox++) { 220 mbox->flag |= MBQ_POOL_ALLOCATED; 221 222 oldbp = mp->fc_memget_ptr; 223 bp = (uint8_t *)mbox; 224 if (oldbp == NULL) { 225 mp->fc_memget_end = bp; 226 } 227 mp->fc_memget_ptr = bp; 228 *((uint8_t **)bp) = oldbp; 229 } 230 231 /* 232 * Initialize fc_table 233 */ 234 fcp_rp = &hba->ring[FC_FCP_RING]; 235 ip_rp = &hba->ring[FC_IP_RING]; 236 els_rp = &hba->ring[FC_ELS_RING]; 237 ct_rp = &hba->ring[FC_CT_RING]; 238 239 fcp_rp->max_iotag = cfg[CFG_NUM_IOTAGS].current; 240 ip_rp->max_iotag = hba->max_nodes; 241 els_rp->max_iotag = hba->max_nodes; 242 ct_rp->max_iotag = hba->max_nodes; 243 244 /* Allocate the fc_table */ 245 total_iotags = fcp_rp->max_iotag + ip_rp->max_iotag + 246 els_rp->max_iotag + ct_rp->max_iotag; 247 248 bzero(buf_info, sizeof (MBUF_INFO)); 249 buf_info->size = total_iotags * sizeof (emlxs_buf_t *); 250 buf_info->align = sizeof (void *); 251 252 (void) emlxs_mem_alloc(hba, buf_info); 253 if (buf_info->virt == NULL) { 254 mutex_exit(&EMLXS_MEMGET_LOCK); 255 256 (void) emlxs_mem_free_buffer(hba); 257 258 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 259 "fc_table buffer."); 260 261 return (0); 262 } 263 hba->iotag_table = buf_info->virt; 264 fcp_rp->fc_table = &hba->iotag_table[0]; 265 ip_rp->fc_table = &hba->iotag_table[fcp_rp->max_iotag]; 266 els_rp->fc_table = &hba->iotag_table[fcp_rp->max_iotag + 267 ip_rp->max_iotag]; 268 ct_rp->fc_table = &hba->iotag_table[fcp_rp->max_iotag + 269 ip_rp->max_iotag + els_rp->max_iotag]; 270 271 #ifdef EMLXS_SPARC 272 /* 273 * Allocate and Initialize FCP MEM_BPL's. This is for increased 274 * performance on sparc 275 */ 276 277 bzero(buf_info, sizeof (MBUF_INFO)); 278 buf_info->size = fcp_rp->max_iotag * sizeof (MATCHMAP); 279 buf_info->align = sizeof (void *); 280 281 (void) emlxs_mem_alloc(hba, buf_info); 282 if (buf_info->virt == NULL) { 283 mutex_exit(&EMLXS_MEMGET_LOCK); 284 285 (void) emlxs_mem_free_buffer(hba); 286 287 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 288 "FCP BPL table buffer."); 289 290 return (0); 291 } 292 hba->fcp_bpl_table = buf_info->virt; 293 bzero(hba->fcp_bpl_table, buf_info->size); 294 295 bzero(buf_info, sizeof (MBUF_INFO)); 296 buf_info->size = (fcp_rp->max_iotag * (3 * sizeof (ULP_BDE64))); 297 buf_info->flags = FC_MBUF_DMA; 298 buf_info->align = 32; 299 300 (void) emlxs_mem_alloc(hba, buf_info); 301 if (buf_info->virt == NULL) { 302 mutex_exit(&EMLXS_MEMGET_LOCK); 303 304 (void) emlxs_mem_free_buffer(hba); 305 306 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 307 "FCP BPL DMA buffers."); 308 309 return (0); 310 } 311 bzero(buf_info->virt, buf_info->size); 312 313 hba->fcp_bpl_mp.size = buf_info->size; 314 hba->fcp_bpl_mp.virt = buf_info->virt; 315 hba->fcp_bpl_mp.phys = buf_info->phys; 316 hba->fcp_bpl_mp.data_handle = buf_info->data_handle; 317 hba->fcp_bpl_mp.dma_handle = buf_info->dma_handle; 318 hba->fcp_bpl_mp.tag = NULL; 319 320 v_bpl = (ULP_BDE64 *) hba->fcp_bpl_mp.virt; 321 p_bpl = (ULP_BDE64 *) hba->fcp_bpl_mp.phys; 322 for (i = 0, j = 0; i < fcp_rp->max_iotag; i++, j += 3) { 323 matp = &hba->fcp_bpl_table[i]; 324 325 matp->fc_mptr = NULL; 326 matp->size = (3 * sizeof (ULP_BDE64)); 327 matp->virt = (uint8_t *)& v_bpl[j]; 328 matp->phys = (uint64_t)& p_bpl[j]; 329 matp->dma_handle = NULL; 330 matp->data_handle = NULL; 331 matp->tag = MEM_BPL; 332 matp->flag |= MAP_TABLE_ALLOCATED; 333 } 334 335 #endif /* EMLXS_SPARC */ 336 337 /* 338 * Allocate and Initialize MEM_BPL (3) 339 */ 340 341 mp = &hba->memseg[MEM_BPL]; 342 mp->fc_memsize = hba->mem_bpl_size; /* Set during attach */ 343 mp->fc_numblks = (uint16_t)cfg[CFG_NUM_IOCBS].current; 344 mp->fc_memflag = FC_MEM_DMA; 345 mp->fc_lowmem = (mp->fc_numblks >> 4); 346 mp->fc_memstart_virt = 0; 347 mp->fc_memstart_phys = 0; 348 mp->fc_mem_dma_handle = 0; 349 mp->fc_mem_dat_handle = 0; 350 mp->fc_memget_ptr = 0; 351 mp->fc_memget_end = 0; 352 mp->fc_memput_ptr = 0; 353 mp->fc_memput_end = 0; 354 mp->fc_total_memsize = 0; 355 mp->fc_memget_cnt = mp->fc_numblks; 356 mp->fc_memput_cnt = 0; 357 358 /* Allocate buffer pools for above buffer structures */ 359 for (i = 0; i < mp->fc_numblks; i++) { 360 /* 361 * If this is a DMA buffer we need alignment on a page so we 362 * don't want to worry about buffers spanning page boundries 363 * when mapping memory for the adapter. 364 */ 365 bzero(buf_info, sizeof (MBUF_INFO)); 366 buf_info->size = sizeof (MATCHMAP); 367 buf_info->align = sizeof (void *); 368 369 (void) emlxs_mem_alloc(hba, buf_info); 370 if (buf_info->virt == NULL) { 371 mutex_exit(&EMLXS_MEMGET_LOCK); 372 373 (void) emlxs_mem_free_buffer(hba); 374 375 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 376 "BPL segment buffer."); 377 378 return (0); 379 } 380 matp = (MATCHMAP *) buf_info->virt; 381 bzero(matp, sizeof (MATCHMAP)); 382 383 bzero(buf_info, sizeof (MBUF_INFO)); 384 buf_info->size = mp->fc_memsize; 385 buf_info->flags = FC_MBUF_DMA; 386 buf_info->align = 32; 387 388 (void) emlxs_mem_alloc(hba, buf_info); 389 if (buf_info->virt == NULL) { 390 mutex_exit(&EMLXS_MEMGET_LOCK); 391 392 (void) emlxs_mem_free_buffer(hba); 393 394 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 395 "BPL DMA buffer."); 396 397 return (0); 398 } 399 bp = (uint8_t *)buf_info->virt; 400 bzero(bp, mp->fc_memsize); 401 402 /* 403 * Link buffer into beginning of list. The first pointer in 404 * each buffer is a forward pointer to the next buffer. 405 */ 406 oldbp = mp->fc_memget_ptr; 407 408 if (oldbp == 0) { 409 mp->fc_memget_end = (uint8_t *)matp; 410 } 411 mp->fc_memget_ptr = (uint8_t *)matp; 412 matp->fc_mptr = oldbp; 413 matp->virt = buf_info->virt; 414 matp->phys = buf_info->phys; 415 matp->size = buf_info->size; 416 matp->dma_handle = buf_info->dma_handle; 417 matp->data_handle = buf_info->data_handle; 418 matp->tag = MEM_BPL; 419 matp->flag |= MAP_POOL_ALLOCATED; 420 } 421 422 423 /* 424 * These represent the unsolicited ELS buffers we preallocate. 425 */ 426 427 mp = &hba->memseg[MEM_BUF]; 428 mp->fc_memsize = MEM_BUF_SIZE; 429 mp->fc_numblks = MEM_ELSBUF_COUNT + MEM_BUF_COUNT; 430 mp->fc_memflag = FC_MEM_DMA; 431 mp->fc_lowmem = 3; 432 mp->fc_memstart_virt = 0; 433 mp->fc_memstart_phys = 0; 434 mp->fc_mem_dma_handle = 0; 435 mp->fc_mem_dat_handle = 0; 436 mp->fc_memget_ptr = 0; 437 mp->fc_memget_end = 0; 438 mp->fc_memput_ptr = 0; 439 mp->fc_memput_end = 0; 440 mp->fc_total_memsize = 0; 441 mp->fc_memget_cnt = mp->fc_numblks; 442 mp->fc_memput_cnt = 0; 443 444 /* Allocate buffer pools for above buffer structures */ 445 for (i = 0; i < mp->fc_numblks; i++) { 446 /* 447 * If this is a DMA buffer we need alignment on a page so we 448 * don't want to worry about buffers spanning page boundries 449 * when mapping memory for the adapter. 450 */ 451 bzero(buf_info, sizeof (MBUF_INFO)); 452 buf_info->size = sizeof (MATCHMAP); 453 buf_info->align = sizeof (void *); 454 455 (void) emlxs_mem_alloc(hba, buf_info); 456 if (buf_info->virt == NULL) { 457 mutex_exit(&EMLXS_MEMGET_LOCK); 458 459 (void) emlxs_mem_free_buffer(hba); 460 461 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 462 "MEM_BUF Segment buffer."); 463 464 return (0); 465 } 466 matp = (MATCHMAP *) buf_info->virt; 467 bzero(matp, sizeof (MATCHMAP)); 468 469 bzero(buf_info, sizeof (MBUF_INFO)); 470 buf_info->size = mp->fc_memsize; 471 buf_info->flags = FC_MBUF_DMA; 472 buf_info->align = 32; 473 474 (void) emlxs_mem_alloc(hba, buf_info); 475 if (buf_info->virt == NULL) { 476 mutex_exit(&EMLXS_MEMGET_LOCK); 477 478 (void) emlxs_mem_free_buffer(hba); 479 480 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 481 "MEM_BUF DMA buffer."); 482 483 return (0); 484 } 485 bp = (uint8_t *)buf_info->virt; 486 bzero(bp, mp->fc_memsize); 487 488 /* 489 * Link buffer into beginning of list. The first pointer in 490 * each buffer is a forward pointer to the next buffer. 491 */ 492 oldbp = mp->fc_memget_ptr; 493 494 if (oldbp == 0) { 495 mp->fc_memget_end = (uint8_t *)matp; 496 } 497 mp->fc_memget_ptr = (uint8_t *)matp; 498 matp->fc_mptr = oldbp; 499 matp->virt = buf_info->virt; 500 matp->phys = buf_info->phys; 501 matp->size = buf_info->size; 502 matp->dma_handle = buf_info->dma_handle; 503 matp->data_handle = buf_info->data_handle; 504 matp->tag = MEM_BUF; 505 matp->flag |= MAP_POOL_ALLOCATED; 506 } 507 508 509 /* 510 * These represent the unsolicited IP buffers we preallocate. 511 */ 512 513 mp = &hba->memseg[MEM_IPBUF]; 514 mp->fc_memsize = MEM_IPBUF_SIZE; 515 mp->fc_numblks = MEM_IPBUF_COUNT; 516 mp->fc_memflag = FC_MEM_DMA; 517 mp->fc_lowmem = 3; 518 mp->fc_memstart_virt = 0; 519 mp->fc_memstart_phys = 0; 520 mp->fc_mem_dma_handle = 0; 521 mp->fc_mem_dat_handle = 0; 522 mp->fc_memget_ptr = 0; 523 mp->fc_memget_end = 0; 524 mp->fc_memput_ptr = 0; 525 mp->fc_memput_end = 0; 526 mp->fc_total_memsize = 0; 527 mp->fc_memget_cnt = mp->fc_numblks; 528 mp->fc_memput_cnt = 0; 529 530 /* Allocate buffer pools for above buffer structures */ 531 for (i = 0; i < mp->fc_numblks; i++) { 532 /* 533 * If this is a DMA buffer we need alignment on a page so we 534 * don't want to worry about buffers spanning page boundries 535 * when mapping memory for the adapter. 536 */ 537 bzero(buf_info, sizeof (MBUF_INFO)); 538 buf_info->size = sizeof (MATCHMAP); 539 buf_info->align = sizeof (void *); 540 541 (void) emlxs_mem_alloc(hba, buf_info); 542 if (buf_info->virt == NULL) { 543 mutex_exit(&EMLXS_MEMGET_LOCK); 544 545 (void) emlxs_mem_free_buffer(hba); 546 547 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 548 "IP_BUF Segment buffer."); 549 550 return (0); 551 } 552 matp = (MATCHMAP *) buf_info->virt; 553 bzero(matp, sizeof (MATCHMAP)); 554 555 bzero(buf_info, sizeof (MBUF_INFO)); 556 buf_info->size = mp->fc_memsize; 557 buf_info->flags = FC_MBUF_DMA; 558 buf_info->align = 32; 559 560 (void) emlxs_mem_alloc(hba, buf_info); 561 if (buf_info->virt == NULL) { 562 mutex_exit(&EMLXS_MEMGET_LOCK); 563 564 (void) emlxs_mem_free_buffer(hba); 565 566 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 567 "IP_BUF DMA buffer."); 568 569 return (0); 570 } 571 bp = (uint8_t *)buf_info->virt; 572 bzero(bp, mp->fc_memsize); 573 574 /* 575 * Link buffer into beginning of list. The first pointer in 576 * each buffer is a forward pointer to the next buffer. 577 */ 578 oldbp = mp->fc_memget_ptr; 579 580 if (oldbp == 0) { 581 mp->fc_memget_end = (uint8_t *)matp; 582 } 583 mp->fc_memget_ptr = (uint8_t *)matp; 584 matp->fc_mptr = oldbp; 585 matp->virt = buf_info->virt; 586 matp->phys = buf_info->phys; 587 matp->size = buf_info->size; 588 matp->dma_handle = buf_info->dma_handle; 589 matp->data_handle = buf_info->data_handle; 590 matp->tag = MEM_IPBUF; 591 matp->flag |= MAP_POOL_ALLOCATED; 592 } 593 594 /* 595 * These represent the unsolicited CT buffers we preallocate. 596 */ 597 mp = &hba->memseg[MEM_CTBUF]; 598 mp->fc_memsize = MEM_CTBUF_SIZE; 599 mp->fc_numblks = MEM_CTBUF_COUNT; 600 mp->fc_memflag = FC_MEM_DMA; 601 mp->fc_lowmem = 0; 602 mp->fc_memstart_virt = 0; 603 mp->fc_memstart_phys = 0; 604 mp->fc_mem_dma_handle = 0; 605 mp->fc_mem_dat_handle = 0; 606 mp->fc_memget_ptr = 0; 607 mp->fc_memget_end = 0; 608 mp->fc_memput_ptr = 0; 609 mp->fc_memput_end = 0; 610 mp->fc_total_memsize = 0; 611 mp->fc_memget_cnt = mp->fc_numblks; 612 mp->fc_memput_cnt = 0; 613 614 /* Allocate buffer pools for above buffer structures */ 615 for (i = 0; i < mp->fc_numblks; i++) { 616 /* 617 * If this is a DMA buffer we need alignment on a page so we 618 * don't want to worry about buffers spanning page boundries 619 * when mapping memory for the adapter. 620 */ 621 bzero(buf_info, sizeof (MBUF_INFO)); 622 buf_info->size = sizeof (MATCHMAP); 623 buf_info->align = sizeof (void *); 624 625 (void) emlxs_mem_alloc(hba, buf_info); 626 if (buf_info->virt == NULL) { 627 mutex_exit(&EMLXS_MEMGET_LOCK); 628 629 (void) emlxs_mem_free_buffer(hba); 630 631 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 632 "CT_BUF Segment buffer."); 633 634 return (0); 635 } 636 matp = (MATCHMAP *) buf_info->virt; 637 bzero(matp, sizeof (MATCHMAP)); 638 639 bzero(buf_info, sizeof (MBUF_INFO)); 640 buf_info->size = mp->fc_memsize; 641 buf_info->flags = FC_MBUF_DMA; 642 buf_info->align = 32; 643 644 (void) emlxs_mem_alloc(hba, buf_info); 645 if (buf_info->virt == NULL) { 646 mutex_exit(&EMLXS_MEMGET_LOCK); 647 648 (void) emlxs_mem_free_buffer(hba); 649 650 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 651 "CT_BUF DMA buffer."); 652 653 return (0); 654 } 655 bp = (uint8_t *)buf_info->virt; 656 bzero(bp, mp->fc_memsize); 657 658 /* 659 * Link buffer into beginning of list. The first pointer in 660 * each buffer is a forward pointer to the next buffer. 661 */ 662 oldbp = mp->fc_memget_ptr; 663 664 if (oldbp == 0) { 665 mp->fc_memget_end = (uint8_t *)matp; 666 } 667 mp->fc_memget_ptr = (uint8_t *)matp; 668 matp->fc_mptr = oldbp; 669 matp->virt = buf_info->virt; 670 matp->phys = buf_info->phys; 671 matp->size = buf_info->size; 672 matp->dma_handle = buf_info->dma_handle; 673 matp->data_handle = buf_info->data_handle; 674 matp->tag = MEM_CTBUF; 675 matp->flag |= MAP_POOL_ALLOCATED; 676 } 677 678 #ifdef SFCT_SUPPORT 679 680 /* 681 * These represent the unsolicited FCT buffers we preallocate. 682 */ 683 mp = &hba->memseg[MEM_FCTBUF]; 684 mp->fc_memsize = MEM_FCTBUF_SIZE; 685 mp->fc_numblks = (hba->tgt_mode) ? MEM_FCTBUF_COUNT : 0; 686 mp->fc_memflag = FC_MEM_DMA; 687 mp->fc_lowmem = 0; 688 mp->fc_memstart_virt = 0; 689 mp->fc_memstart_phys = 0; 690 mp->fc_mem_dma_handle = 0; 691 mp->fc_mem_dat_handle = 0; 692 mp->fc_memget_ptr = 0; 693 mp->fc_memget_end = 0; 694 mp->fc_memput_ptr = 0; 695 mp->fc_memput_end = 0; 696 mp->fc_total_memsize = 0; 697 mp->fc_memget_cnt = mp->fc_numblks; 698 mp->fc_memput_cnt = 0; 699 700 /* Allocate buffer pools for above buffer structures */ 701 for (i = 0; i < mp->fc_numblks; i++) { 702 /* 703 * If this is a DMA buffer we need alignment on a page so we 704 * don't want to worry about buffers spanning page boundries 705 * when mapping memory for the adapter. 706 */ 707 bzero(buf_info, sizeof (MBUF_INFO)); 708 buf_info->size = sizeof (MATCHMAP); 709 buf_info->align = sizeof (void *); 710 711 (void) emlxs_mem_alloc(hba, buf_info); 712 if (buf_info->virt == NULL) { 713 mutex_exit(&EMLXS_MEMGET_LOCK); 714 715 (void) emlxs_mem_free_buffer(hba); 716 717 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 718 "FCT_BUF Segment buffer."); 719 720 return (0); 721 } 722 matp = (MATCHMAP *) buf_info->virt; 723 bzero(matp, sizeof (MATCHMAP)); 724 725 bzero(buf_info, sizeof (MBUF_INFO)); 726 buf_info->size = mp->fc_memsize; 727 buf_info->flags = FC_MBUF_DMA; 728 buf_info->align = 32; 729 730 (void) emlxs_mem_alloc(hba, buf_info); 731 if (buf_info->virt == NULL) { 732 mutex_exit(&EMLXS_MEMGET_LOCK); 733 734 (void) emlxs_mem_free_buffer(hba); 735 736 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 737 "FCT_BUF DMA buffer."); 738 739 return (0); 740 } 741 bp = (uint8_t *)buf_info->virt; 742 bzero(bp, mp->fc_memsize); 743 744 /* 745 * Link buffer into beginning of list. The first pointer in 746 * each buffer is a forward pointer to the next buffer. 747 */ 748 oldbp = mp->fc_memget_ptr; 749 750 if (oldbp == 0) { 751 mp->fc_memget_end = (uint8_t *)matp; 752 } 753 mp->fc_memget_ptr = (uint8_t *)matp; 754 matp->fc_mptr = oldbp; 755 matp->virt = buf_info->virt; 756 matp->phys = buf_info->phys; 757 matp->size = buf_info->size; 758 matp->dma_handle = buf_info->dma_handle; 759 matp->data_handle = buf_info->data_handle; 760 matp->tag = MEM_FCTBUF; 761 matp->flag |= MAP_POOL_ALLOCATED; 762 } 763 #endif /* SFCT_SUPPORT */ 764 765 for (i = 0; i < FC_MAX_SEG; i++) { 766 char *seg; 767 768 switch (i) { 769 case MEM_NLP: 770 seg = "MEM_NLP"; 771 break; 772 case MEM_IOCB: 773 seg = "MEM_IOCB"; 774 break; 775 case MEM_MBOX: 776 seg = "MEM_MBOX"; 777 break; 778 case MEM_BPL: 779 seg = "MEM_BPL"; 780 break; 781 case MEM_BUF: 782 seg = "MEM_BUF"; 783 break; 784 case MEM_IPBUF: 785 seg = "MEM_IPBUF"; 786 break; 787 case MEM_CTBUF: 788 seg = "MEM_CTBUF"; 789 break; 790 #ifdef SFCT_SUPPORT 791 case MEM_FCTBUF: 792 seg = "MEM_FCTBUF"; 793 break; 794 #endif /* SFCT_SUPPORT */ 795 default: 796 break; 797 } 798 799 mp = &hba->memseg[i]; 800 801 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg, 802 "Segment: %s mp=%p size=%x count=%d flags=%x base=%p", 803 seg, mp, mp->fc_memsize, mp->fc_numblks, mp->fc_memflag, 804 mp->fc_memget_ptr); 805 } 806 807 mutex_exit(&EMLXS_MEMGET_LOCK); 808 809 return (1); 810 811 } /* emlxs_mem_alloc_buffer() */ 812 813 814 815 /* 816 * emlxs_mem_free_buffer 817 * 818 * This routine will free iocb/data buffer space 819 * and TGTM resource. 820 */ 821 extern int 822 emlxs_mem_free_buffer(emlxs_hba_t *hba) 823 { 824 emlxs_port_t *port = &PPORT; 825 emlxs_port_t *vport; 826 int32_t j; 827 uint8_t *bp; 828 MEMSEG *mp; 829 MATCHMAP *mm; 830 RING *rp; 831 IOCBQ *iocbq; 832 IOCB *iocb; 833 MAILBOXQ *mbox, *mbsave; 834 MBUF_INFO *buf_info; 835 MBUF_INFO bufinfo; 836 emlxs_buf_t *sbp; 837 fc_unsol_buf_t *ubp; 838 RING *fcp_rp; 839 RING *ip_rp; 840 RING *els_rp; 841 RING *ct_rp; 842 uint32_t total_iotags; 843 emlxs_ub_priv_t *ub_priv; 844 845 buf_info = &bufinfo; 846 847 /* Check for deferred pkt completion */ 848 if (hba->mbox_sbp) { 849 sbp = (emlxs_buf_t *)hba->mbox_sbp; 850 hba->mbox_sbp = 0; 851 852 emlxs_pkt_complete(sbp, -1, 0, 1); 853 } 854 /* Check for deferred ub completion */ 855 if (hba->mbox_ubp) { 856 ubp = (fc_unsol_buf_t *)hba->mbox_ubp; 857 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 858 port = ub_priv->port; 859 hba->mbox_ubp = 0; 860 861 emlxs_ub_callback(port, ubp); 862 } 863 /* Check for deferred iocb tx */ 864 if (hba->mbox_iocbq) { /* iocb */ 865 iocbq = (IOCBQ *) hba->mbox_iocbq; 866 hba->mbox_iocbq = 0; 867 iocb = &iocbq->iocb; 868 869 /* Set the error status of the iocb */ 870 iocb->ulpStatus = IOSTAT_LOCAL_REJECT; 871 iocb->un.grsp.perr.statLocalError = IOERR_ABORT_REQUESTED; 872 873 switch (iocb->ulpCommand) { 874 case CMD_FCP_ICMND_CR: 875 case CMD_FCP_ICMND_CX: 876 case CMD_FCP_IREAD_CR: 877 case CMD_FCP_IREAD_CX: 878 case CMD_FCP_IWRITE_CR: 879 case CMD_FCP_IWRITE_CX: 880 case CMD_FCP_ICMND64_CR: 881 case CMD_FCP_ICMND64_CX: 882 case CMD_FCP_IREAD64_CR: 883 case CMD_FCP_IREAD64_CX: 884 case CMD_FCP_IWRITE64_CR: 885 case CMD_FCP_IWRITE64_CX: 886 rp = &hba->ring[FC_FCP_RING]; 887 emlxs_handle_fcp_event(hba, rp, iocbq); 888 break; 889 890 case CMD_ELS_REQUEST_CR: 891 case CMD_ELS_REQUEST_CX: 892 case CMD_XMIT_ELS_RSP_CX: 893 case CMD_ELS_REQUEST64_CR: /* This is the only one used */ 894 /* currently for deferred */ 895 /* iocb tx */ 896 case CMD_ELS_REQUEST64_CX: 897 case CMD_XMIT_ELS_RSP64_CX: 898 rp = &hba->ring[FC_ELS_RING]; 899 (void) emlxs_els_handle_event(hba, rp, iocbq); 900 break; 901 902 case CMD_GEN_REQUEST64_CR: 903 case CMD_GEN_REQUEST64_CX: 904 rp = &hba->ring[FC_CT_RING]; 905 (void) emlxs_ct_handle_event(hba, rp, iocbq); 906 break; 907 908 default: 909 rp = (RING *) iocbq->ring; 910 911 if (rp) { 912 if (rp->ringno == FC_ELS_RING) { 913 (void) emlxs_mem_put(hba, MEM_ELSBUF, 914 (uint8_t *)iocbq->bp); 915 } else if (rp->ringno == FC_CT_RING) { 916 (void) emlxs_mem_put(hba, MEM_CTBUF, 917 (uint8_t *)iocbq->bp); 918 } else if (rp->ringno == FC_IP_RING) { 919 (void) emlxs_mem_put(hba, MEM_IPBUF, 920 (uint8_t *)iocbq->bp); 921 } 922 #ifdef SFCT_SUPPORT 923 else if (rp->ringno == FC_FCT_RING) { 924 (void) emlxs_mem_put(hba, MEM_FCTBUF, 925 (uint8_t *)iocbq->bp); 926 } 927 #endif /* SFCT_SUPPORT */ 928 929 } else if (iocbq->bp) { 930 (void) emlxs_mem_put(hba, MEM_BUF, 931 (uint8_t *)iocbq->bp); 932 } 933 if (!iocbq->sbp) { 934 (void) emlxs_mem_put(hba, MEM_IOCB, 935 (uint8_t *)iocbq); 936 } 937 } 938 } 939 /* free the mapped address match area for each ring */ 940 for (j = 0; j < hba->ring_count; j++) { 941 rp = &hba->ring[j]; 942 943 /* Flush the ring */ 944 (void) emlxs_tx_ring_flush(hba, rp, 0); 945 946 while (rp->fc_mpoff) { 947 uint64_t addr; 948 949 addr = 0; 950 mm = (MATCHMAP *) (rp->fc_mpoff); 951 952 if ((j == FC_ELS_RING) || 953 (j == FC_CT_RING) || 954 #ifdef SFCT_SUPPORT 955 (j == FC_FCT_RING) || 956 #endif /* SFCT_SUPPORT */ 957 (j == FC_IP_RING)) { 958 addr = mm->phys; 959 } 960 if ((mm = emlxs_mem_get_vaddr(hba, rp, addr))) { 961 if (j == FC_ELS_RING) { 962 (void) emlxs_mem_put(hba, MEM_ELSBUF, 963 (uint8_t *)mm); 964 } else if (j == FC_CT_RING) { 965 (void) emlxs_mem_put(hba, MEM_CTBUF, 966 (uint8_t *)mm); 967 } else if (j == FC_IP_RING) { 968 (void) emlxs_mem_put(hba, MEM_IPBUF, 969 (uint8_t *)mm); 970 } 971 #ifdef SFCT_SUPPORT 972 else if (j == FC_FCT_RING) { 973 (void) emlxs_mem_put(hba, MEM_FCTBUF, 974 (uint8_t *)mm); 975 } 976 #endif /* SFCT_SUPPORT */ 977 978 } 979 } 980 } 981 982 #ifdef SLI3_SUPPORT 983 if (hba->flag & FC_HBQ_ENABLED) { 984 emlxs_hbq_free_all(hba, EMLXS_ELS_HBQ_ID); 985 emlxs_hbq_free_all(hba, EMLXS_IP_HBQ_ID); 986 emlxs_hbq_free_all(hba, EMLXS_CT_HBQ_ID); 987 #ifdef SFCT_SUPPORT 988 if (hba->tgt_mode) { 989 emlxs_hbq_free_all(hba, EMLXS_FCT_HBQ_ID); 990 } 991 #endif /* SFCT_SUPPORT */ 992 993 } 994 #endif /* SLI3_SUPPORT */ 995 996 /* Free everything on mbox queue */ 997 mbox = (MAILBOXQ *) (hba->mbox_queue.q_first); 998 while (mbox) { 999 mbsave = mbox; 1000 mbox = (MAILBOXQ *) mbox->next; 1001 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbsave); 1002 } 1003 hba->mbox_queue.q_first = NULL; 1004 hba->mbox_queue.q_last = NULL; 1005 hba->mbox_queue.q_cnt = 0; 1006 hba->mbox_queue_flag = 0; 1007 1008 /* Free the nodes */ 1009 for (j = 0; j < MAX_VPORTS; j++) { 1010 vport = &VPORT(j); 1011 if (vport->node_count) { 1012 emlxs_node_destroy_all(vport); 1013 } 1014 } 1015 1016 /* Free memory associated with all buffers on get buffer pool */ 1017 if (hba->iotag_table) { 1018 fcp_rp = &hba->ring[FC_FCP_RING]; 1019 ip_rp = &hba->ring[FC_IP_RING]; 1020 els_rp = &hba->ring[FC_ELS_RING]; 1021 ct_rp = &hba->ring[FC_CT_RING]; 1022 1023 total_iotags = fcp_rp->max_iotag + ip_rp->max_iotag + 1024 els_rp->max_iotag + ct_rp->max_iotag; 1025 1026 bzero(buf_info, sizeof (MBUF_INFO)); 1027 buf_info->size = total_iotags * sizeof (emlxs_buf_t *); 1028 buf_info->virt = hba->iotag_table; 1029 emlxs_mem_free(hba, buf_info); 1030 1031 hba->iotag_table = 0; 1032 } 1033 #ifdef EMLXS_SPARC 1034 if (hba->fcp_bpl_table) { 1035 bzero(buf_info, sizeof (MBUF_INFO)); 1036 buf_info->size = fcp_rp->max_iotag * sizeof (MATCHMAP); 1037 buf_info->virt = hba->fcp_bpl_table; 1038 emlxs_mem_free(hba, buf_info); 1039 1040 hba->fcp_bpl_table = 0; 1041 } 1042 if (hba->fcp_bpl_mp.virt) { 1043 bzero(buf_info, sizeof (MBUF_INFO)); 1044 buf_info->size = hba->fcp_bpl_mp.size; 1045 buf_info->virt = hba->fcp_bpl_mp.virt; 1046 buf_info->phys = hba->fcp_bpl_mp.phys; 1047 buf_info->dma_handle = hba->fcp_bpl_mp.dma_handle; 1048 buf_info->data_handle = hba->fcp_bpl_mp.data_handle; 1049 buf_info->flags = FC_MBUF_DMA; 1050 emlxs_mem_free(hba, buf_info); 1051 1052 bzero(&hba->fcp_bpl_mp, sizeof (MATCHMAP)); 1053 } 1054 #endif /* EMLXS_SPARC */ 1055 1056 /* Free the memory segments */ 1057 for (j = 0; j < FC_MAX_SEG; j++) { 1058 mp = &hba->memseg[j]; 1059 1060 /* MEM_NLP, MEM_IOCB, MEM_MBOX */ 1061 if (j < MEM_BPL) { 1062 if (mp->fc_memstart_virt) { 1063 kmem_free(mp->fc_memstart_virt, 1064 mp->fc_total_memsize); 1065 bzero((char *)mp, sizeof (MEMSEG)); 1066 } 1067 continue; 1068 } 1069 /* 1070 * MEM_BPL, MEM_BUF, MEM_ELSBUF, MEM_IPBUF, MEM_CTBUF, 1071 * MEM_FCTBUF 1072 */ 1073 1074 /* Free memory associated with all buffers on get buffer pool */ 1075 mutex_enter(&EMLXS_MEMGET_LOCK); 1076 while ((bp = mp->fc_memget_ptr) != NULL) { 1077 mp->fc_memget_ptr = *((uint8_t **)bp); 1078 mm = (MATCHMAP *) bp; 1079 1080 bzero(buf_info, sizeof (MBUF_INFO)); 1081 buf_info->size = mm->size; 1082 buf_info->virt = mm->virt; 1083 buf_info->phys = mm->phys; 1084 buf_info->dma_handle = mm->dma_handle; 1085 buf_info->data_handle = mm->data_handle; 1086 buf_info->flags = FC_MBUF_DMA; 1087 emlxs_mem_free(hba, buf_info); 1088 1089 bzero(buf_info, sizeof (MBUF_INFO)); 1090 buf_info->size = sizeof (MATCHMAP); 1091 buf_info->virt = (uint32_t *)mm; 1092 emlxs_mem_free(hba, buf_info); 1093 } 1094 mutex_exit(&EMLXS_MEMGET_LOCK); 1095 1096 /* Free memory associated with all buffers on put buffer pool */ 1097 mutex_enter(&EMLXS_MEMPUT_LOCK); 1098 while ((bp = mp->fc_memput_ptr) != NULL) { 1099 mp->fc_memput_ptr = *((uint8_t **)bp); 1100 mm = (MATCHMAP *) bp; 1101 1102 bzero(buf_info, sizeof (MBUF_INFO)); 1103 buf_info->size = mm->size; 1104 buf_info->virt = mm->virt; 1105 buf_info->phys = mm->phys; 1106 buf_info->dma_handle = mm->dma_handle; 1107 buf_info->data_handle = mm->data_handle; 1108 buf_info->flags = FC_MBUF_DMA; 1109 emlxs_mem_free(hba, buf_info); 1110 1111 bzero(buf_info, sizeof (MBUF_INFO)); 1112 buf_info->size = sizeof (MATCHMAP); 1113 buf_info->virt = (uint32_t *)mm; 1114 emlxs_mem_free(hba, buf_info); 1115 } 1116 mutex_exit(&EMLXS_MEMPUT_LOCK); 1117 bzero((char *)mp, sizeof (MEMSEG)); 1118 } 1119 1120 return (0); 1121 1122 } /* emlxs_mem_free_buffer() */ 1123 1124 1125 extern uint8_t * 1126 emlxs_mem_buf_alloc(emlxs_hba_t *hba) 1127 { 1128 emlxs_port_t *port = &PPORT; 1129 uint8_t *bp = NULL; 1130 MATCHMAP *matp = NULL; 1131 MBUF_INFO *buf_info; 1132 MBUF_INFO bufinfo; 1133 1134 buf_info = &bufinfo; 1135 1136 bzero(buf_info, sizeof (MBUF_INFO)); 1137 buf_info->size = sizeof (MATCHMAP); 1138 buf_info->align = sizeof (void *); 1139 1140 (void) emlxs_mem_alloc(hba, buf_info); 1141 if (buf_info->virt == NULL) { 1142 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 1143 "MEM_BUF_ALLOC buffer."); 1144 1145 return (0); 1146 } 1147 matp = (MATCHMAP *) buf_info->virt; 1148 bzero(matp, sizeof (MATCHMAP)); 1149 1150 bzero(buf_info, sizeof (MBUF_INFO)); 1151 buf_info->size = MEM_BUF_SIZE; 1152 buf_info->flags = FC_MBUF_DMA; 1153 buf_info->align = 32; 1154 1155 (void) emlxs_mem_alloc(hba, buf_info); 1156 if (buf_info->virt == NULL) { 1157 1158 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 1159 "MEM_BUF_ALLOC DMA buffer."); 1160 1161 /* Free the matp object */ 1162 bzero(buf_info, sizeof (MBUF_INFO)); 1163 buf_info->size = sizeof (MATCHMAP); 1164 buf_info->virt = (uint32_t *)matp; 1165 emlxs_mem_free(hba, buf_info); 1166 1167 return (0); 1168 } 1169 bp = (uint8_t *)buf_info->virt; 1170 bzero(bp, MEM_BUF_SIZE); 1171 1172 matp->fc_mptr = NULL; 1173 matp->virt = buf_info->virt; 1174 matp->phys = buf_info->phys; 1175 matp->size = buf_info->size; 1176 matp->dma_handle = buf_info->dma_handle; 1177 matp->data_handle = buf_info->data_handle; 1178 matp->tag = MEM_BUF; 1179 matp->flag |= MAP_BUF_ALLOCATED; 1180 1181 return ((uint8_t *)matp); 1182 1183 } /* emlxs_mem_buf_alloc() */ 1184 1185 1186 extern uint8_t * 1187 emlxs_mem_buf_free(emlxs_hba_t *hba, uint8_t *bp) 1188 { 1189 MATCHMAP *matp; 1190 MBUF_INFO bufinfo; 1191 MBUF_INFO *buf_info; 1192 1193 buf_info = &bufinfo; 1194 1195 matp = (MATCHMAP *) bp; 1196 1197 if (!(matp->flag & MAP_BUF_ALLOCATED)) { 1198 return (NULL); 1199 } 1200 1201 bzero(buf_info, sizeof (MBUF_INFO)); 1202 buf_info->size = matp->size; 1203 buf_info->virt = matp->virt; 1204 buf_info->phys = matp->phys; 1205 buf_info->dma_handle = matp->dma_handle; 1206 buf_info->data_handle = matp->data_handle; 1207 buf_info->flags = FC_MBUF_DMA; 1208 emlxs_mem_free(hba, buf_info); 1209 1210 bzero(buf_info, sizeof (MBUF_INFO)); 1211 buf_info->size = sizeof (MATCHMAP); 1212 buf_info->virt = (uint32_t *)matp; 1213 emlxs_mem_free(hba, buf_info); 1214 1215 return (bp); 1216 1217 } /* emlxs_mem_buf_free() */ 1218 1219 1220 1221 /* 1222 * emlxs_mem_get 1223 * 1224 * This routine will get a free memory buffer. 1225 * seg identifies which buffer pool to use. 1226 * Returns the free buffer ptr or 0 for no buf 1227 */ 1228 extern uint8_t * 1229 emlxs_mem_get(emlxs_hba_t *hba, uint32_t arg) 1230 { 1231 emlxs_port_t *port = &PPORT; 1232 MEMSEG *mp; 1233 uint8_t *bp = NULL; 1234 uint32_t seg = arg & MEM_SEG_MASK; 1235 MAILBOXQ *mbq; 1236 MATCHMAP *matp; 1237 IOCBQ *iocbq; 1238 NODELIST *node; 1239 uint8_t *base; 1240 uint8_t *end; 1241 1242 /* range check on seg argument */ 1243 if (seg >= FC_MAX_SEG) { 1244 return (NULL); 1245 } 1246 mp = &hba->memseg[seg]; 1247 1248 /* Check if memory segment destroyed! */ 1249 if (mp->fc_memsize == 0) { 1250 return (NULL); 1251 } 1252 mutex_enter(&EMLXS_MEMGET_LOCK); 1253 1254 /* 1255 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg, "mem_get[%d]: 1256 * memget=%p,%d memput=%p,%d", seg, mp->fc_memget_ptr, 1257 * mp->fc_memget_cnt, mp->fc_memput_ptr, mp->fc_memput_cnt); 1258 */ 1259 1260 top: 1261 1262 if (mp->fc_memget_ptr) { 1263 bp = mp->fc_memget_ptr; 1264 1265 /* 1266 * Checking (seg == MEM_MBOX || seg == MEM_IOCB || seg == 1267 * MEM_NLP) 1268 */ 1269 /* Verify buffer is in this memory region */ 1270 if (mp->fc_memstart_virt && mp->fc_total_memsize) { 1271 base = mp->fc_memstart_virt; 1272 end = mp->fc_memstart_virt + mp->fc_total_memsize; 1273 if (bp < base || bp >= end) { 1274 /* Invalidate the the get list */ 1275 mp->fc_memget_ptr = NULL; 1276 mp->fc_memget_end = NULL; 1277 mp->fc_memget_cnt = 0; 1278 1279 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg, 1280 "Corruption detected: seg=%x bp=%p " 1281 "base=%p end=%p.", seg, bp, base, end); 1282 1283 emlxs_ffstate_change(hba, FC_ERROR); 1284 1285 mutex_exit(&EMLXS_MEMGET_LOCK); 1286 1287 (void) thread_create(NULL, 0, 1288 emlxs_shutdown_thread, 1289 (char *)hba, 0, &p0, TS_RUN, 1290 v.v_maxsyspri - 2); 1291 1292 return (NULL); 1293 } 1294 } 1295 /* 1296 * If a memory block exists, take it off freelist and return 1297 * it to the user. 1298 */ 1299 if (mp->fc_memget_end == bp) { 1300 mp->fc_memget_ptr = NULL; 1301 mp->fc_memget_end = NULL; 1302 mp->fc_memget_cnt = 0; 1303 1304 } else { 1305 /* 1306 * Pointer to the next free buffer 1307 */ 1308 mp->fc_memget_ptr = *((uint8_t **)bp); 1309 mp->fc_memget_cnt--; 1310 } 1311 1312 switch (seg) { 1313 case MEM_MBOX: 1314 bzero(bp, sizeof (MAILBOXQ)); 1315 1316 mbq = (MAILBOXQ *) bp; 1317 mbq->flag |= MBQ_POOL_ALLOCATED; 1318 break; 1319 1320 case MEM_IOCB: 1321 bzero(bp, sizeof (IOCBQ)); 1322 1323 iocbq = (IOCBQ *) bp; 1324 iocbq->flag |= IOCB_POOL_ALLOCATED; 1325 break; 1326 1327 case MEM_NLP: 1328 bzero(bp, sizeof (NODELIST)); 1329 1330 node = (NODELIST *) bp; 1331 node->flag |= NODE_POOL_ALLOCATED; 1332 break; 1333 1334 case MEM_BPL: 1335 case MEM_BUF: /* MEM_ELSBUF */ 1336 case MEM_IPBUF: 1337 case MEM_CTBUF: 1338 #ifdef SFCT_SUPPORT 1339 case MEM_FCTBUF: 1340 #endif /* SFCT_SUPPORT */ 1341 default: 1342 matp = (MATCHMAP *) bp; 1343 matp->fc_mptr = NULL; 1344 matp->flag |= MAP_POOL_ALLOCATED; 1345 break; 1346 } 1347 } else { 1348 mutex_enter(&EMLXS_MEMPUT_LOCK); 1349 if (mp->fc_memput_ptr) { 1350 /* 1351 * Move buffer from memput to memget 1352 */ 1353 mp->fc_memget_ptr = mp->fc_memput_ptr; 1354 mp->fc_memget_end = mp->fc_memput_end; 1355 mp->fc_memget_cnt = mp->fc_memput_cnt; 1356 mp->fc_memput_ptr = NULL; 1357 mp->fc_memput_end = NULL; 1358 mp->fc_memput_cnt = 0; 1359 mutex_exit(&EMLXS_MEMPUT_LOCK); 1360 1361 goto top; 1362 } 1363 mutex_exit(&EMLXS_MEMPUT_LOCK); 1364 1365 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_alloc_failed_msg, 1366 "Pool empty: seg=%x lowmem=%x free=%x", 1367 seg, mp->fc_lowmem, mp->fc_memget_cnt); 1368 1369 /* HBASTATS.memAllocErr++; */ 1370 } 1371 1372 /* 1373 * bp2 = mp->fc_memget_ptr; 1374 * 1375 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg, "mem_get[%d]-: 1376 * memget=%p,%d memput=%p,%d >%x", seg, mp->fc_memget_ptr, 1377 * mp->fc_memget_cnt, mp->fc_memput_ptr, mp->fc_memput_cnt, ((bp2)? 1378 * *((uint8_t **) bp2):0)); 1379 */ 1380 1381 mutex_exit(&EMLXS_MEMGET_LOCK); 1382 1383 return (bp); 1384 1385 } /* emlxs_mem_get() */ 1386 1387 1388 1389 extern uint8_t * 1390 emlxs_mem_put(emlxs_hba_t *hba, uint32_t seg, uint8_t *bp) 1391 { 1392 emlxs_port_t *port = &PPORT; 1393 MEMSEG *mp; 1394 uint8_t *oldbp; 1395 MATCHMAP *matp; 1396 IOCBQ *iocbq; 1397 MAILBOXQ *mbq; 1398 NODELIST *node; 1399 uint8_t *base; 1400 uint8_t *end; 1401 1402 if (!bp) { 1403 return (NULL); 1404 } 1405 /* Check on seg argument */ 1406 if (seg >= FC_MAX_SEG) { 1407 return (NULL); 1408 } 1409 mp = &hba->memseg[seg]; 1410 1411 switch (seg) { 1412 case MEM_MBOX: 1413 mbq = (MAILBOXQ *) bp; 1414 1415 if (!(mbq->flag & MBQ_POOL_ALLOCATED)) { 1416 return (bp); 1417 } 1418 break; 1419 1420 case MEM_IOCB: 1421 iocbq = (IOCBQ *) bp; 1422 1423 /* Check to make sure the IOCB is pool allocated */ 1424 if (!(iocbq->flag & IOCB_POOL_ALLOCATED)) { 1425 return (bp); 1426 } 1427 /* 1428 * Any IOCBQ with a packet attached did not come from our 1429 * pool 1430 */ 1431 if (iocbq->sbp) { 1432 return (bp); 1433 } 1434 break; 1435 1436 case MEM_NLP: 1437 node = (NODELIST *) bp; 1438 1439 /* Check to make sure the NODE is pool allocated */ 1440 if (!(node->flag & NODE_POOL_ALLOCATED)) { 1441 return (bp); 1442 } 1443 break; 1444 1445 case MEM_BPL: 1446 case MEM_BUF: /* MEM_ELSBUF */ 1447 case MEM_IPBUF: 1448 case MEM_CTBUF: 1449 #ifdef SFCT_SUPPORT 1450 case MEM_FCTBUF: 1451 #endif /* SFCT_SUPPORT */ 1452 default: 1453 matp = (MATCHMAP *) bp; 1454 1455 if (matp->flag & MAP_BUF_ALLOCATED) { 1456 return (emlxs_mem_buf_free(hba, bp)); 1457 } 1458 if (matp->flag & MAP_TABLE_ALLOCATED) { 1459 return (bp); 1460 } 1461 /* Check to make sure the MATCHMAP is pool allocated */ 1462 if (!(matp->flag & MAP_POOL_ALLOCATED)) { 1463 return (bp); 1464 } 1465 break; 1466 } 1467 1468 /* Free the pool object */ 1469 mutex_enter(&EMLXS_MEMPUT_LOCK); 1470 1471 /* 1472 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg, "mem_put[%d]: 1473 * memget=%p,%d memput=%p,%d", seg, mp->fc_memget_ptr, 1474 * mp->fc_memget_cnt, mp->fc_memput_ptr, mp->fc_memput_cnt); 1475 */ 1476 1477 /* Check if memory segment destroyed! */ 1478 if (mp->fc_memsize == 0) { 1479 mutex_exit(&EMLXS_MEMPUT_LOCK); 1480 return (NULL); 1481 } 1482 /* Check if buffer was just freed */ 1483 if (mp->fc_memput_ptr == bp) { 1484 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg, 1485 "Freeing Free object: seg=%x bp=%p", seg, bp); 1486 1487 mutex_exit(&EMLXS_MEMPUT_LOCK); 1488 return (NULL); 1489 } 1490 /* Validate the buffer */ 1491 1492 /* 1493 * Checking (seg == MEM_BUF) || (seg == MEM_BPL) || (seg == 1494 * MEM_CTBUF) || (seg == MEM_IPBUF) || (seg == MEM_FCTBUF) 1495 */ 1496 if (mp->fc_memflag & FC_MEM_DMA) { 1497 if (matp->tag != seg) { 1498 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg, 1499 "Corruption detected: seg=%x tag=%x bp=%p", 1500 seg, matp->tag, bp); 1501 1502 emlxs_ffstate_change(hba, FC_ERROR); 1503 1504 mutex_exit(&EMLXS_MEMPUT_LOCK); 1505 1506 (void) thread_create(NULL, 0, emlxs_shutdown_thread, 1507 (char *)hba, 0, &p0, TS_RUN, v.v_maxsyspri - 2); 1508 1509 return (NULL); 1510 } 1511 } 1512 /* Checking (seg == MEM_MBOX || seg == MEM_IOCB || seg == MEM_NLP) */ 1513 else if (mp->fc_memstart_virt && mp->fc_total_memsize) { 1514 base = mp->fc_memstart_virt; 1515 end = mp->fc_memstart_virt + mp->fc_total_memsize; 1516 if (bp < base || bp >= end) { 1517 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg, 1518 "Corruption detected: seg=%x bp=%p base=%p end=%p", 1519 seg, bp, base, end); 1520 1521 emlxs_ffstate_change(hba, FC_ERROR); 1522 1523 mutex_exit(&EMLXS_MEMPUT_LOCK); 1524 1525 (void) thread_create(NULL, 0, emlxs_shutdown_thread, 1526 (char *)hba, 0, &p0, TS_RUN, v.v_maxsyspri - 2); 1527 1528 return (NULL); 1529 } 1530 } 1531 /* Release to the first place of the freelist */ 1532 oldbp = mp->fc_memput_ptr; 1533 mp->fc_memput_ptr = bp; 1534 *((uint8_t **)bp) = oldbp; 1535 1536 if (oldbp == NULL) { 1537 mp->fc_memput_end = bp; 1538 mp->fc_memput_cnt = 1; 1539 } else { 1540 mp->fc_memput_cnt++; 1541 } 1542 1543 /* 1544 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg, "mem_put[%d]-: 1545 * memget=%p,%d memput=%p,%d", seg, mp->fc_memget_ptr, 1546 * mp->fc_memget_cnt, mp->fc_memput_ptr, mp->fc_memput_cnt); 1547 */ 1548 1549 mutex_exit(&EMLXS_MEMPUT_LOCK); 1550 1551 return (bp); 1552 1553 } /* emlxs_mem_put() */ 1554 1555 1556 1557 /* 1558 * Look up the virtual address given a mapped address 1559 */ 1560 extern MATCHMAP * 1561 emlxs_mem_get_vaddr(emlxs_hba_t *hba, RING *rp, uint64_t mapbp) 1562 { 1563 emlxs_port_t *port = &PPORT; 1564 MATCHMAP *prev; 1565 MATCHMAP *mp; 1566 1567 switch (rp->ringno) { 1568 case FC_ELS_RING: 1569 mp = (MATCHMAP *) rp->fc_mpoff; 1570 prev = 0; 1571 1572 while (mp) { 1573 if (mp->phys == mapbp) { 1574 if (prev == 0) { 1575 rp->fc_mpoff = mp->fc_mptr; 1576 } else { 1577 prev->fc_mptr = mp->fc_mptr; 1578 } 1579 1580 if (rp->fc_mpon == (uint8_t *)mp) { 1581 rp->fc_mpon = (uint8_t *)prev; 1582 } 1583 mp->fc_mptr = 0; 1584 1585 emlxs_mpdata_sync(mp->dma_handle, 0, mp->size, 1586 DDI_DMA_SYNC_FORKERNEL); 1587 1588 HBASTATS.ElsUbPosted--; 1589 1590 return (mp); 1591 } 1592 prev = mp; 1593 mp = (MATCHMAP *) mp->fc_mptr; 1594 } 1595 1596 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg, 1597 "ELS Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p", 1598 mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon); 1599 1600 break; 1601 1602 case FC_CT_RING: 1603 mp = (MATCHMAP *) rp->fc_mpoff; 1604 prev = 0; 1605 1606 while (mp) { 1607 if (mp->phys == mapbp) { 1608 if (prev == 0) { 1609 rp->fc_mpoff = mp->fc_mptr; 1610 } else { 1611 prev->fc_mptr = mp->fc_mptr; 1612 } 1613 1614 if (rp->fc_mpon == (uint8_t *)mp) { 1615 rp->fc_mpon = (uint8_t *)prev; 1616 } 1617 mp->fc_mptr = 0; 1618 1619 emlxs_mpdata_sync(mp->dma_handle, 0, mp->size, 1620 DDI_DMA_SYNC_FORKERNEL); 1621 1622 HBASTATS.CtUbPosted--; 1623 1624 return (mp); 1625 } 1626 prev = mp; 1627 mp = (MATCHMAP *) mp->fc_mptr; 1628 } 1629 1630 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg, 1631 "CT Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p", 1632 mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon); 1633 1634 break; 1635 1636 case FC_IP_RING: 1637 mp = (MATCHMAP *) rp->fc_mpoff; 1638 prev = 0; 1639 1640 while (mp) { 1641 if (mp->phys == mapbp) { 1642 if (prev == 0) { 1643 rp->fc_mpoff = mp->fc_mptr; 1644 } else { 1645 prev->fc_mptr = mp->fc_mptr; 1646 } 1647 1648 if (rp->fc_mpon == (uint8_t *)mp) { 1649 rp->fc_mpon = (uint8_t *)prev; 1650 } 1651 mp->fc_mptr = 0; 1652 1653 emlxs_mpdata_sync(mp->dma_handle, 0, mp->size, 1654 DDI_DMA_SYNC_FORKERNEL); 1655 1656 HBASTATS.IpUbPosted--; 1657 1658 return (mp); 1659 } 1660 prev = mp; 1661 mp = (MATCHMAP *) mp->fc_mptr; 1662 } 1663 1664 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg, 1665 "IP Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p", 1666 mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon); 1667 1668 break; 1669 1670 #ifdef SFCT_SUPPORT 1671 case FC_FCT_RING: 1672 mp = (MATCHMAP *) rp->fc_mpoff; 1673 prev = 0; 1674 1675 while (mp) { 1676 if (mp->phys == mapbp) { 1677 if (prev == 0) { 1678 rp->fc_mpoff = mp->fc_mptr; 1679 } else { 1680 prev->fc_mptr = mp->fc_mptr; 1681 } 1682 1683 if (rp->fc_mpon == (uint8_t *)mp) { 1684 rp->fc_mpon = (uint8_t *)prev; 1685 } 1686 mp->fc_mptr = 0; 1687 1688 emlxs_mpdata_sync(mp->dma_handle, 0, mp->size, 1689 DDI_DMA_SYNC_FORKERNEL); 1690 1691 HBASTATS.FctUbPosted--; 1692 1693 return (mp); 1694 } 1695 prev = mp; 1696 mp = (MATCHMAP *) mp->fc_mptr; 1697 } 1698 1699 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg, 1700 "FCT Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p", 1701 mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon); 1702 1703 break; 1704 #endif /* SFCT_SUPPORT */ 1705 } 1706 1707 return (0); 1708 1709 } /* emlxs_mem_get_vaddr() */ 1710 1711 1712 /* 1713 * Given a virtual address, bp, generate the physical mapped address and place 1714 * it where addr points to. Save the address pair for lookup later. 1715 */ 1716 extern void 1717 emlxs_mem_map_vaddr(emlxs_hba_t *hba, RING *rp, MATCHMAP *mp, uint32_t *haddr, 1718 uint32_t *laddr) 1719 { 1720 switch (rp->ringno) { 1721 case FC_ELS_RING: 1722 /* 1723 * Update slot fc_mpon points to then bump it fc_mpoff is 1724 * pointer head of the list. fc_mpon is pointer tail of the 1725 * list. 1726 */ 1727 mp->fc_mptr = 0; 1728 if (rp->fc_mpoff == 0) { 1729 rp->fc_mpoff = (uint8_t *)mp; 1730 rp->fc_mpon = (uint8_t *)mp; 1731 } else { 1732 ((MATCHMAP *) (rp->fc_mpon))->fc_mptr = (uint8_t *)mp; 1733 rp->fc_mpon = (uint8_t *)mp; 1734 } 1735 1736 if (hba->flag & FC_SLIM2_MODE) { 1737 /* return mapped address */ 1738 *haddr = (uint32_t)putPaddrHigh(mp->phys); 1739 *laddr = (uint32_t)putPaddrLow(mp->phys); 1740 } else { 1741 /* return mapped address */ 1742 *laddr = (uint32_t)putPaddrLow(mp->phys); 1743 } 1744 1745 HBASTATS.ElsUbPosted++; 1746 1747 break; 1748 1749 case FC_CT_RING: 1750 /* 1751 * Update slot fc_mpon points to then bump it fc_mpoff is 1752 * pointer head of the list. fc_mpon is pointer tail of the 1753 * list. 1754 */ 1755 mp->fc_mptr = 0; 1756 if (rp->fc_mpoff == 0) { 1757 rp->fc_mpoff = (uint8_t *)mp; 1758 rp->fc_mpon = (uint8_t *)mp; 1759 } else { 1760 ((MATCHMAP *) (rp->fc_mpon))->fc_mptr = (uint8_t *)mp; 1761 rp->fc_mpon = (uint8_t *)mp; 1762 } 1763 1764 if (hba->flag & FC_SLIM2_MODE) { 1765 /* return mapped address */ 1766 *haddr = (uint32_t)putPaddrHigh(mp->phys); 1767 *laddr = (uint32_t)putPaddrLow(mp->phys); 1768 } else { 1769 /* return mapped address */ 1770 *laddr = (uint32_t)putPaddrLow(mp->phys); 1771 } 1772 1773 HBASTATS.CtUbPosted++; 1774 1775 break; 1776 1777 1778 case FC_IP_RING: 1779 /* 1780 * Update slot fc_mpon points to then bump it fc_mpoff is 1781 * pointer head of the list. fc_mpon is pointer tail of the 1782 * list. 1783 */ 1784 mp->fc_mptr = 0; 1785 if (rp->fc_mpoff == 0) { 1786 rp->fc_mpoff = (uint8_t *)mp; 1787 rp->fc_mpon = (uint8_t *)mp; 1788 } else { 1789 ((MATCHMAP *) (rp->fc_mpon))->fc_mptr = (uint8_t *)mp; 1790 rp->fc_mpon = (uint8_t *)mp; 1791 } 1792 1793 if (hba->flag & FC_SLIM2_MODE) { 1794 /* return mapped address */ 1795 *haddr = (uint32_t)putPaddrHigh(mp->phys); 1796 *laddr = (uint32_t)putPaddrLow(mp->phys); 1797 } else { 1798 /* return mapped address */ 1799 *laddr = (uint32_t)putPaddrLow(mp->phys); 1800 } 1801 1802 HBASTATS.IpUbPosted++; 1803 break; 1804 1805 1806 #ifdef SFCT_SUPPORT 1807 case FC_FCT_RING: 1808 /* 1809 * Update slot fc_mpon points to then bump it fc_mpoff is 1810 * pointer head of the list. fc_mpon is pointer tail of the 1811 * list. 1812 */ 1813 mp->fc_mptr = 0; 1814 if (rp->fc_mpoff == 0) { 1815 rp->fc_mpoff = (uint8_t *)mp; 1816 rp->fc_mpon = (uint8_t *)mp; 1817 } else { 1818 ((MATCHMAP *) (rp->fc_mpon))->fc_mptr = (uint8_t *)mp; 1819 rp->fc_mpon = (uint8_t *)mp; 1820 } 1821 1822 if (hba->flag & FC_SLIM2_MODE) { 1823 /* return mapped address */ 1824 *haddr = (uint32_t)putPaddrHigh(mp->phys); 1825 *laddr = (uint32_t)putPaddrLow(mp->phys); 1826 } else { 1827 /* return mapped address */ 1828 *laddr = (uint32_t)putPaddrLow(mp->phys); 1829 } 1830 1831 HBASTATS.FctUbPosted++; 1832 break; 1833 #endif /* SFCT_SUPPORT */ 1834 } 1835 } /* emlxs_mem_map_vaddr() */ 1836 1837 1838 #ifdef SLI3_SUPPORT 1839 1840 static uint32_t 1841 emlxs_hbq_alloc(emlxs_hba_t *hba, uint32_t hbq_id) 1842 { 1843 emlxs_port_t *port = &PPORT; 1844 HBQ_INIT_t *hbq; 1845 MBUF_INFO *buf_info; 1846 MBUF_INFO bufinfo; 1847 1848 hbq = &hba->hbq_table[hbq_id]; 1849 1850 if (hbq->HBQ_host_buf.virt == 0) { 1851 buf_info = &bufinfo; 1852 1853 /* Get the system's page size in a DDI-compliant way. */ 1854 bzero(buf_info, sizeof (MBUF_INFO)); 1855 buf_info->size = hbq->HBQ_numEntries * sizeof (HBQE_t); 1856 buf_info->flags = FC_MBUF_DMA; 1857 buf_info->align = 4096; 1858 1859 (void) emlxs_mem_alloc(hba, buf_info); 1860 1861 if (buf_info->virt == NULL) { 1862 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg, 1863 "Unable to alloc HBQ."); 1864 return (ENOMEM); 1865 } 1866 hbq->HBQ_host_buf.virt = (void *) buf_info->virt; 1867 hbq->HBQ_host_buf.phys = buf_info->phys; 1868 hbq->HBQ_host_buf.data_handle = buf_info->data_handle; 1869 hbq->HBQ_host_buf.dma_handle = buf_info->dma_handle; 1870 hbq->HBQ_host_buf.size = buf_info->size; 1871 hbq->HBQ_host_buf.tag = hbq_id; 1872 1873 bzero((char *)hbq->HBQ_host_buf.virt, buf_info->size); 1874 } 1875 return (0); 1876 1877 } /* emlxs_hbq_alloc() */ 1878 1879 1880 extern uint32_t 1881 emlxs_hbq_setup(emlxs_hba_t *hba, uint32_t hbq_id) 1882 { 1883 emlxs_port_t *port = &PPORT; 1884 HBQ_INIT_t *hbq; 1885 MATCHMAP *mp; 1886 HBQE_t *hbqE; 1887 MAILBOX *mb; 1888 void *ioa2; 1889 uint32_t j; 1890 uint32_t count; 1891 uint32_t size; 1892 uint32_t ringno; 1893 uint32_t seg; 1894 1895 switch (hbq_id) { 1896 case EMLXS_ELS_HBQ_ID: 1897 count = MEM_ELSBUF_COUNT; 1898 size = MEM_ELSBUF_SIZE; 1899 ringno = FC_ELS_RING; 1900 seg = MEM_ELSBUF; 1901 HBASTATS.ElsUbPosted = count; 1902 break; 1903 1904 case EMLXS_IP_HBQ_ID: 1905 count = MEM_IPBUF_COUNT; 1906 size = MEM_IPBUF_SIZE; 1907 ringno = FC_IP_RING; 1908 seg = MEM_IPBUF; 1909 HBASTATS.IpUbPosted = count; 1910 break; 1911 1912 case EMLXS_CT_HBQ_ID: 1913 count = MEM_CTBUF_COUNT; 1914 size = MEM_CTBUF_SIZE; 1915 ringno = FC_CT_RING; 1916 seg = MEM_CTBUF; 1917 HBASTATS.CtUbPosted = count; 1918 break; 1919 1920 #ifdef SFCT_SUPPORT 1921 case EMLXS_FCT_HBQ_ID: 1922 count = MEM_FCTBUF_COUNT; 1923 size = MEM_FCTBUF_SIZE; 1924 ringno = FC_FCT_RING; 1925 seg = MEM_FCTBUF; 1926 HBASTATS.FctUbPosted = count; 1927 break; 1928 #endif /* SFCT_SUPPORT */ 1929 1930 default: 1931 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg, 1932 "emlxs_hbq_setup: Invalid HBQ id. (%x)", hbq_id); 1933 return (1); 1934 } 1935 1936 /* Configure HBQ */ 1937 hbq = &hba->hbq_table[hbq_id]; 1938 hbq->HBQ_numEntries = count; 1939 1940 /* Get a Mailbox buffer to setup mailbox commands for CONFIG_HBQ */ 1941 if ((mb = (MAILBOX *) emlxs_mem_get(hba, (MEM_MBOX | MEM_PRI))) == 0) { 1942 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg, 1943 "emlxs_hbq_setup: Unable to get mailbox."); 1944 return (1); 1945 } 1946 /* Allocate HBQ Host buffer and Initialize the HBQEs */ 1947 if (emlxs_hbq_alloc(hba, hbq_id)) { 1948 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg, 1949 "emlxs_hbq_setup: Unable to allocate HBQ."); 1950 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb); 1951 return (1); 1952 } 1953 hbq->HBQ_recvNotify = 1; 1954 hbq->HBQ_num_mask = 0; /* Bind to ring */ 1955 hbq->HBQ_profile = 0; /* Selection profile 0=all, 7=logentry */ 1956 hbq->HBQ_ringMask = 1 << ringno; /* b0100 * ringno - Binds */ 1957 /* HBA to a ring e.g. */ 1958 /* Ring0=b0001, Ring1=b0010, Ring2=b0100 */ 1959 hbq->HBQ_headerLen = 0; /* 0 if not profile 4 or 5 */ 1960 hbq->HBQ_logEntry = 0; /* Set to 1 if this HBQ will be used for */ 1961 hbq->HBQ_id = hbq_id; 1962 hbq->HBQ_PutIdx_next = 0; 1963 hbq->HBQ_PutIdx = hbq->HBQ_numEntries - 1; 1964 hbq->HBQ_GetIdx = 0; 1965 hbq->HBQ_PostBufCnt = hbq->HBQ_numEntries; 1966 bzero(hbq->HBQ_PostBufs, sizeof (hbq->HBQ_PostBufs)); 1967 1968 /* Fill in POST BUFFERs in HBQE */ 1969 hbqE = (HBQE_t *)hbq->HBQ_host_buf.virt; 1970 for (j = 0; j < hbq->HBQ_numEntries; j++, hbqE++) { 1971 /* Allocate buffer to post */ 1972 if ((mp = (MATCHMAP *) emlxs_mem_get(hba, (seg | MEM_PRI))) == 1973 0) { 1974 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg, 1975 "emlxs_hbq_setup: Unable to allocate HBQ buffer. " 1976 "cnt=%d", j); 1977 emlxs_hbq_free_all(hba, hbq_id); 1978 return (1); 1979 } 1980 hbq->HBQ_PostBufs[j] = mp; 1981 1982 hbqE->unt.ext.HBQ_tag = hbq_id; 1983 hbqE->unt.ext.HBQE_tag = j; 1984 hbqE->bde.tus.f.bdeSize = size; 1985 hbqE->bde.tus.f.bdeFlags = 0; 1986 hbqE->unt.w = PCIMEM_LONG(hbqE->unt.w); 1987 hbqE->bde.tus.w = PCIMEM_LONG(hbqE->bde.tus.w); 1988 hbqE->bde.addrLow = 1989 PCIMEM_LONG((uint32_t)putPaddrLow(mp->phys)); 1990 hbqE->bde.addrHigh = 1991 PCIMEM_LONG((uint32_t)putPaddrHigh(mp->phys)); 1992 } 1993 1994 /* Issue CONFIG_HBQ */ 1995 emlxs_mb_config_hbq(hba, mb, hbq_id); 1996 if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) { 1997 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 1998 "emlxs_hbq_setup: Unable to config HBQ. cmd=%x status=%x", 1999 mb->mbxCommand, mb->mbxStatus); 2000 2001 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb); 2002 emlxs_hbq_free_all(hba, hbq_id); 2003 return (1); 2004 } 2005 /* Setup HBQ Get/Put indexes */ 2006 ioa2 = (void *) ((char *)hba->slim_addr + (hba->hgp_hbq_offset + 2007 (hbq_id * sizeof (uint32_t)))); 2008 WRITE_SLIM_ADDR(hba, (volatile uint32_t *) ioa2, hbq->HBQ_PutIdx); 2009 2010 hba->hbq_count++; 2011 2012 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb); 2013 2014 return (0); 2015 2016 } /* emlxs_hbq_setup */ 2017 2018 2019 static void 2020 emlxs_hbq_free_all(emlxs_hba_t *hba, uint32_t hbq_id) 2021 { 2022 HBQ_INIT_t *hbq; 2023 MBUF_INFO *buf_info; 2024 MBUF_INFO bufinfo; 2025 uint32_t seg; 2026 uint32_t j; 2027 2028 switch (hbq_id) { 2029 case EMLXS_ELS_HBQ_ID: 2030 seg = MEM_ELSBUF; 2031 HBASTATS.ElsUbPosted = 0; 2032 break; 2033 2034 case EMLXS_IP_HBQ_ID: 2035 seg = MEM_IPBUF; 2036 HBASTATS.IpUbPosted = 0; 2037 break; 2038 2039 case EMLXS_CT_HBQ_ID: 2040 seg = MEM_CTBUF; 2041 HBASTATS.CtUbPosted = 0; 2042 break; 2043 2044 #ifdef SFCT_SUPPORT 2045 case EMLXS_FCT_HBQ_ID: 2046 seg = MEM_FCTBUF; 2047 HBASTATS.FctUbPosted = 0; 2048 break; 2049 #endif /* SFCT_SUPPORT */ 2050 2051 default: 2052 return; 2053 } 2054 2055 2056 hbq = &hba->hbq_table[hbq_id]; 2057 2058 if (hbq->HBQ_host_buf.virt != 0) { 2059 for (j = 0; j < hbq->HBQ_PostBufCnt; j++) { 2060 (void) emlxs_mem_put(hba, seg, 2061 (uint8_t *)hbq->HBQ_PostBufs[j]); 2062 hbq->HBQ_PostBufs[j] = NULL; 2063 } 2064 hbq->HBQ_PostBufCnt = 0; 2065 2066 buf_info = &bufinfo; 2067 bzero(buf_info, sizeof (MBUF_INFO)); 2068 2069 buf_info->size = hbq->HBQ_host_buf.size; 2070 buf_info->virt = hbq->HBQ_host_buf.virt; 2071 buf_info->phys = hbq->HBQ_host_buf.phys; 2072 buf_info->dma_handle = hbq->HBQ_host_buf.dma_handle; 2073 buf_info->data_handle = hbq->HBQ_host_buf.data_handle; 2074 buf_info->flags = FC_MBUF_DMA; 2075 2076 emlxs_mem_free(hba, buf_info); 2077 2078 hbq->HBQ_host_buf.virt = NULL; 2079 } 2080 return; 2081 2082 } /* emlxs_hbq_free_all() */ 2083 2084 2085 extern void 2086 emlxs_update_HBQ_index(emlxs_hba_t *hba, uint32_t hbq_id) 2087 { 2088 void *ioa2; 2089 uint32_t status; 2090 uint32_t HBQ_PortGetIdx; 2091 HBQ_INIT_t *hbq; 2092 2093 switch (hbq_id) { 2094 case EMLXS_ELS_HBQ_ID: 2095 HBASTATS.ElsUbPosted++; 2096 break; 2097 2098 case EMLXS_IP_HBQ_ID: 2099 HBASTATS.IpUbPosted++; 2100 break; 2101 2102 case EMLXS_CT_HBQ_ID: 2103 HBASTATS.CtUbPosted++; 2104 break; 2105 2106 #ifdef SFCT_SUPPORT 2107 case EMLXS_FCT_HBQ_ID: 2108 HBASTATS.FctUbPosted++; 2109 break; 2110 #endif /* SFCT_SUPPORT */ 2111 2112 default: 2113 return; 2114 } 2115 2116 hbq = &hba->hbq_table[hbq_id]; 2117 2118 hbq->HBQ_PutIdx = (hbq->HBQ_PutIdx + 1 >= hbq->HBQ_numEntries) ? 0 : 2119 hbq->HBQ_PutIdx + 1; 2120 2121 if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) { 2122 HBQ_PortGetIdx = PCIMEM_LONG(((SLIM2 *) hba->slim2.virt)-> 2123 mbx.us.s2.HBQ_PortGetIdx[hbq_id]); 2124 2125 hbq->HBQ_GetIdx = HBQ_PortGetIdx; 2126 2127 if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) { 2128 return; 2129 } 2130 } 2131 ioa2 = (void *) ((char *)hba->slim_addr + (hba->hgp_hbq_offset + 2132 (hbq_id * sizeof (uint32_t)))); 2133 status = hbq->HBQ_PutIdx; 2134 WRITE_SLIM_ADDR(hba, (volatile uint32_t *) ioa2, status); 2135 2136 return; 2137 2138 } /* emlxs_update_HBQ_index() */ 2139 2140 #endif /* SLI3_SUPPORT */ 2141