1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2014 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/mempool.h> 25 #include <linux/slab.h> 26 #include <linux/pci.h> 27 #include <linux/interrupt.h> 28 29 #include <scsi/scsi.h> 30 #include <scsi/scsi_device.h> 31 #include <scsi/scsi_transport_fc.h> 32 #include <scsi/fc/fc_fs.h> 33 34 #include "lpfc_hw4.h" 35 #include "lpfc_hw.h" 36 #include "lpfc_sli.h" 37 #include "lpfc_sli4.h" 38 #include "lpfc_nl.h" 39 #include "lpfc_disc.h" 40 #include "lpfc.h" 41 #include "lpfc_scsi.h" 42 #include "lpfc_crtn.h" 43 #include "lpfc_logmsg.h" 44 45 #define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */ 46 #define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ 47 #define LPFC_DEVICE_DATA_POOL_SIZE 64 /* max elements in device data pool */ 48 #define LPFC_RRQ_POOL_SIZE 256 /* max elements in non-DMA pool */ 49 #define LPFC_MBX_POOL_SIZE 256 /* max elements in MBX non-DMA pool */ 50 51 /* lpfc_mbox_free_sli_mbox 52 * 53 * @phba: HBA to free memory for 54 * @mbox: mailbox command to free 55 * 56 * This routine detects the mbox type and calls the correct 57 * free routine to fully release all associated memory. 58 */ 59 static void 60 lpfc_mem_free_sli_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 61 { 62 /* Detect if the caller's mbox is an SLI4_CONFIG type. If so, this 63 * mailbox type requires a different cleanup routine. Otherwise, the 64 * mailbox is just an mbuf and mem_pool release. 65 */ 66 if (phba->sli_rev == LPFC_SLI_REV4 && 67 bf_get(lpfc_mqe_command, &mbox->u.mqe) == MBX_SLI4_CONFIG) { 68 lpfc_sli4_mbox_cmd_free(phba, mbox); 69 } else { 70 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 71 } 72 } 73 74 int 75 lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) { 76 size_t bytes; 77 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 78 79 if (max_xri <= 0) 80 return -ENOMEM; 81 bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) * 82 sizeof(unsigned long); 83 phba->cfg_rrq_xri_bitmap_sz = bytes; 84 phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, 85 bytes); 86 if (!phba->active_rrq_pool) 87 return -ENOMEM; 88 else 89 return 0; 90 } 91 92 /** 93 * lpfc_mem_alloc - create and allocate all PCI and memory pools 94 * @phba: HBA to allocate pools for 95 * @align: alignment requirement for blocks; must be a power of two 96 * 97 * Description: Creates and allocates PCI pools lpfc_mbuf_pool, 98 * lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools 99 * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask. 100 * 101 * Notes: Not interrupt-safe. Must be called with no locks held. If any 102 * allocation fails, frees all successfully allocated memory before returning. 103 * 104 * Returns: 105 * 0 on success 106 * -ENOMEM on failure (if any memory allocations fail) 107 **/ 108 int 109 lpfc_mem_alloc(struct lpfc_hba *phba, int align) 110 { 111 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 112 int i; 113 114 115 phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev, 116 LPFC_BPL_SIZE, 117 align, 0); 118 if (!phba->lpfc_mbuf_pool) 119 goto fail; 120 121 pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE, 122 sizeof(struct lpfc_dmabuf), 123 GFP_KERNEL); 124 if (!pool->elements) 125 goto fail_free_lpfc_mbuf_pool; 126 127 pool->max_count = 0; 128 pool->current_count = 0; 129 for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) { 130 pool->elements[i].virt = dma_pool_alloc(phba->lpfc_mbuf_pool, 131 GFP_KERNEL, &pool->elements[i].phys); 132 if (!pool->elements[i].virt) 133 goto fail_free_mbuf_pool; 134 pool->max_count++; 135 pool->current_count++; 136 } 137 138 phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MBX_POOL_SIZE, 139 sizeof(LPFC_MBOXQ_t)); 140 if (!phba->mbox_mem_pool) 141 goto fail_free_mbuf_pool; 142 143 phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, 144 sizeof(struct lpfc_nodelist)); 145 if (!phba->nlp_mem_pool) 146 goto fail_free_mbox_pool; 147 148 if (phba->sli_rev == LPFC_SLI_REV4) { 149 phba->rrq_pool = 150 mempool_create_kmalloc_pool(LPFC_RRQ_POOL_SIZE, 151 sizeof(struct lpfc_node_rrq)); 152 if (!phba->rrq_pool) 153 goto fail_free_nlp_mem_pool; 154 phba->lpfc_hrb_pool = dma_pool_create("lpfc_hrb_pool", 155 &phba->pcidev->dev, 156 LPFC_HDR_BUF_SIZE, align, 0); 157 if (!phba->lpfc_hrb_pool) 158 goto fail_free_rrq_mem_pool; 159 160 phba->lpfc_drb_pool = dma_pool_create("lpfc_drb_pool", 161 &phba->pcidev->dev, 162 LPFC_DATA_BUF_SIZE, align, 0); 163 if (!phba->lpfc_drb_pool) 164 goto fail_free_hrb_pool; 165 phba->lpfc_hbq_pool = NULL; 166 } else { 167 phba->lpfc_hbq_pool = dma_pool_create("lpfc_hbq_pool", 168 &phba->pcidev->dev, LPFC_BPL_SIZE, align, 0); 169 if (!phba->lpfc_hbq_pool) 170 goto fail_free_nlp_mem_pool; 171 phba->lpfc_hrb_pool = NULL; 172 phba->lpfc_drb_pool = NULL; 173 } 174 175 if (phba->cfg_EnableXLane) { 176 phba->device_data_mem_pool = mempool_create_kmalloc_pool( 177 LPFC_DEVICE_DATA_POOL_SIZE, 178 sizeof(struct lpfc_device_data)); 179 if (!phba->device_data_mem_pool) 180 goto fail_free_drb_pool; 181 } else { 182 phba->device_data_mem_pool = NULL; 183 } 184 185 return 0; 186 fail_free_drb_pool: 187 dma_pool_destroy(phba->lpfc_drb_pool); 188 phba->lpfc_drb_pool = NULL; 189 fail_free_hrb_pool: 190 dma_pool_destroy(phba->lpfc_hrb_pool); 191 phba->lpfc_hrb_pool = NULL; 192 fail_free_rrq_mem_pool: 193 mempool_destroy(phba->rrq_pool); 194 phba->rrq_pool = NULL; 195 fail_free_nlp_mem_pool: 196 mempool_destroy(phba->nlp_mem_pool); 197 phba->nlp_mem_pool = NULL; 198 fail_free_mbox_pool: 199 mempool_destroy(phba->mbox_mem_pool); 200 phba->mbox_mem_pool = NULL; 201 fail_free_mbuf_pool: 202 while (i--) 203 dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, 204 pool->elements[i].phys); 205 kfree(pool->elements); 206 fail_free_lpfc_mbuf_pool: 207 dma_pool_destroy(phba->lpfc_mbuf_pool); 208 phba->lpfc_mbuf_pool = NULL; 209 fail: 210 return -ENOMEM; 211 } 212 213 int 214 lpfc_nvmet_mem_alloc(struct lpfc_hba *phba) 215 { 216 phba->lpfc_nvmet_drb_pool = 217 dma_pool_create("lpfc_nvmet_drb_pool", 218 &phba->pcidev->dev, LPFC_NVMET_DATA_BUF_SIZE, 219 SGL_ALIGN_SZ, 0); 220 if (!phba->lpfc_nvmet_drb_pool) { 221 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 222 "6024 Can't enable NVME Target - no memory\n"); 223 return -ENOMEM; 224 } 225 return 0; 226 } 227 228 /** 229 * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc 230 * @phba: HBA to free memory for 231 * 232 * Description: Free the memory allocated by lpfc_mem_alloc routine. This 233 * routine is a the counterpart of lpfc_mem_alloc. 234 * 235 * Returns: None 236 **/ 237 void 238 lpfc_mem_free(struct lpfc_hba *phba) 239 { 240 int i; 241 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 242 struct lpfc_device_data *device_data; 243 244 /* Free HBQ pools */ 245 lpfc_sli_hbqbuf_free_all(phba); 246 dma_pool_destroy(phba->lpfc_nvmet_drb_pool); 247 phba->lpfc_nvmet_drb_pool = NULL; 248 249 dma_pool_destroy(phba->lpfc_drb_pool); 250 phba->lpfc_drb_pool = NULL; 251 252 dma_pool_destroy(phba->lpfc_hrb_pool); 253 phba->lpfc_hrb_pool = NULL; 254 255 dma_pool_destroy(phba->lpfc_hbq_pool); 256 phba->lpfc_hbq_pool = NULL; 257 258 mempool_destroy(phba->rrq_pool); 259 phba->rrq_pool = NULL; 260 261 /* Free NLP memory pool */ 262 mempool_destroy(phba->nlp_mem_pool); 263 phba->nlp_mem_pool = NULL; 264 if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) { 265 mempool_destroy(phba->active_rrq_pool); 266 phba->active_rrq_pool = NULL; 267 } 268 269 /* Free mbox memory pool */ 270 mempool_destroy(phba->mbox_mem_pool); 271 phba->mbox_mem_pool = NULL; 272 273 /* Free MBUF memory pool */ 274 for (i = 0; i < pool->current_count; i++) 275 dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, 276 pool->elements[i].phys); 277 kfree(pool->elements); 278 279 dma_pool_destroy(phba->lpfc_mbuf_pool); 280 phba->lpfc_mbuf_pool = NULL; 281 282 /* Free Device Data memory pool */ 283 if (phba->device_data_mem_pool) { 284 /* Ensure all objects have been returned to the pool */ 285 while (!list_empty(&phba->luns)) { 286 device_data = list_first_entry(&phba->luns, 287 struct lpfc_device_data, 288 listentry); 289 list_del(&device_data->listentry); 290 mempool_free(device_data, phba->device_data_mem_pool); 291 } 292 mempool_destroy(phba->device_data_mem_pool); 293 } 294 phba->device_data_mem_pool = NULL; 295 return; 296 } 297 298 /** 299 * lpfc_mem_free_all - Frees all PCI and driver memory 300 * @phba: HBA to free memory for 301 * 302 * Description: Free memory from PCI and driver memory pools and also those 303 * used : lpfc_sg_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees 304 * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees 305 * the VPI bitmask. 306 * 307 * Returns: None 308 **/ 309 void 310 lpfc_mem_free_all(struct lpfc_hba *phba) 311 { 312 struct lpfc_sli *psli = &phba->sli; 313 LPFC_MBOXQ_t *mbox, *next_mbox; 314 315 /* Free memory used in mailbox queue back to mailbox memory pool */ 316 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { 317 list_del(&mbox->list); 318 lpfc_mem_free_sli_mbox(phba, mbox); 319 } 320 /* Free memory used in mailbox cmpl list back to mailbox memory pool */ 321 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) { 322 list_del(&mbox->list); 323 lpfc_mem_free_sli_mbox(phba, mbox); 324 } 325 /* Free the active mailbox command back to the mailbox memory pool */ 326 spin_lock_irq(&phba->hbalock); 327 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 328 spin_unlock_irq(&phba->hbalock); 329 if (psli->mbox_active) { 330 mbox = psli->mbox_active; 331 lpfc_mem_free_sli_mbox(phba, mbox); 332 psli->mbox_active = NULL; 333 } 334 335 /* Free and destroy all the allocated memory pools */ 336 lpfc_mem_free(phba); 337 338 /* Free DMA buffer memory pool */ 339 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 340 phba->lpfc_sg_dma_buf_pool = NULL; 341 342 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); 343 phba->lpfc_cmd_rsp_buf_pool = NULL; 344 345 /* Free Congestion Data buffer */ 346 if (phba->cgn_i) { 347 dma_free_coherent(&phba->pcidev->dev, 348 sizeof(struct lpfc_cgn_info), 349 phba->cgn_i->virt, phba->cgn_i->phys); 350 kfree(phba->cgn_i); 351 phba->cgn_i = NULL; 352 } 353 354 /* Free RX Monitor */ 355 if (phba->rx_monitor) { 356 lpfc_rx_monitor_destroy_ring(phba->rx_monitor); 357 kfree(phba->rx_monitor); 358 phba->rx_monitor = NULL; 359 } 360 361 /* Free the iocb lookup array */ 362 kfree(psli->iocbq_lookup); 363 psli->iocbq_lookup = NULL; 364 365 return; 366 } 367 368 /** 369 * lpfc_mbuf_alloc - Allocate an mbuf from the lpfc_mbuf_pool PCI pool 370 * @phba: HBA which owns the pool to allocate from 371 * @mem_flags: indicates if this is a priority (MEM_PRI) allocation 372 * @handle: used to return the DMA-mapped address of the mbuf 373 * 374 * Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool. 375 * Allocates from generic dma_pool_alloc function first and if that fails and 376 * mem_flags has MEM_PRI set (the only defined flag), returns an mbuf from the 377 * HBA's pool. 378 * 379 * Notes: Not interrupt-safe. Must be called with no locks held. Takes 380 * phba->hbalock. 381 * 382 * Returns: 383 * pointer to the allocated mbuf on success 384 * NULL on failure 385 **/ 386 void * 387 lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) 388 { 389 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 390 unsigned long iflags; 391 void *ret; 392 393 ret = dma_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle); 394 395 spin_lock_irqsave(&phba->hbalock, iflags); 396 if (!ret && (mem_flags & MEM_PRI) && pool->current_count) { 397 pool->current_count--; 398 ret = pool->elements[pool->current_count].virt; 399 *handle = pool->elements[pool->current_count].phys; 400 } 401 spin_unlock_irqrestore(&phba->hbalock, iflags); 402 return ret; 403 } 404 405 /** 406 * __lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (locked) 407 * @phba: HBA which owns the pool to return to 408 * @virt: mbuf to free 409 * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed 410 * 411 * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if 412 * it is below its max_count, frees the mbuf otherwise. 413 * 414 * Notes: Must be called with phba->hbalock held to synchronize access to 415 * lpfc_mbuf_safety_pool. 416 * 417 * Returns: None 418 **/ 419 void 420 __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) 421 { 422 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 423 424 if (pool->current_count < pool->max_count) { 425 pool->elements[pool->current_count].virt = virt; 426 pool->elements[pool->current_count].phys = dma; 427 pool->current_count++; 428 } else { 429 dma_pool_free(phba->lpfc_mbuf_pool, virt, dma); 430 } 431 return; 432 } 433 434 /** 435 * lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (unlocked) 436 * @phba: HBA which owns the pool to return to 437 * @virt: mbuf to free 438 * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed 439 * 440 * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if 441 * it is below its max_count, frees the mbuf otherwise. 442 * 443 * Notes: Takes phba->hbalock. Can be called with or without other locks held. 444 * 445 * Returns: None 446 **/ 447 void 448 lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) 449 { 450 unsigned long iflags; 451 452 spin_lock_irqsave(&phba->hbalock, iflags); 453 __lpfc_mbuf_free(phba, virt, dma); 454 spin_unlock_irqrestore(&phba->hbalock, iflags); 455 return; 456 } 457 458 /** 459 * lpfc_nvmet_buf_alloc - Allocate an nvmet_buf from the 460 * lpfc_sg_dma_buf_pool PCI pool 461 * @phba: HBA which owns the pool to allocate from 462 * @mem_flags: indicates if this is a priority (MEM_PRI) allocation 463 * @handle: used to return the DMA-mapped address of the nvmet_buf 464 * 465 * Description: Allocates a DMA-mapped buffer from the lpfc_sg_dma_buf_pool 466 * PCI pool. Allocates from generic dma_pool_alloc function. 467 * 468 * Returns: 469 * pointer to the allocated nvmet_buf on success 470 * NULL on failure 471 **/ 472 void * 473 lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) 474 { 475 void *ret; 476 477 ret = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle); 478 return ret; 479 } 480 481 /** 482 * lpfc_nvmet_buf_free - Free an nvmet_buf from the lpfc_sg_dma_buf_pool 483 * PCI pool 484 * @phba: HBA which owns the pool to return to 485 * @virt: nvmet_buf to free 486 * @dma: the DMA-mapped address of the lpfc_sg_dma_buf_pool to be freed 487 * 488 * Returns: None 489 **/ 490 void 491 lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma) 492 { 493 dma_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma); 494 } 495 496 /** 497 * lpfc_els_hbq_alloc - Allocate an HBQ buffer 498 * @phba: HBA to allocate HBQ buffer for 499 * 500 * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI 501 * pool along a non-DMA-mapped container for it. 502 * 503 * Notes: Not interrupt-safe. Must be called with no locks held. 504 * 505 * Returns: 506 * pointer to HBQ on success 507 * NULL on failure 508 **/ 509 struct hbq_dmabuf * 510 lpfc_els_hbq_alloc(struct lpfc_hba *phba) 511 { 512 struct hbq_dmabuf *hbqbp; 513 514 hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); 515 if (!hbqbp) 516 return NULL; 517 518 hbqbp->dbuf.virt = dma_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL, 519 &hbqbp->dbuf.phys); 520 if (!hbqbp->dbuf.virt) { 521 kfree(hbqbp); 522 return NULL; 523 } 524 hbqbp->total_size = LPFC_BPL_SIZE; 525 return hbqbp; 526 } 527 528 /** 529 * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc 530 * @phba: HBA buffer was allocated for 531 * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc 532 * 533 * Description: Frees both the container and the DMA-mapped buffer returned by 534 * lpfc_els_hbq_alloc. 535 * 536 * Notes: Can be called with or without locks held. 537 * 538 * Returns: None 539 **/ 540 void 541 lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) 542 { 543 dma_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); 544 kfree(hbqbp); 545 return; 546 } 547 548 /** 549 * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer 550 * @phba: HBA to allocate a receive buffer for 551 * 552 * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI 553 * pool along a non-DMA-mapped container for it. 554 * 555 * Notes: Not interrupt-safe. Must be called with no locks held. 556 * 557 * Returns: 558 * pointer to HBQ on success 559 * NULL on failure 560 **/ 561 struct hbq_dmabuf * 562 lpfc_sli4_rb_alloc(struct lpfc_hba *phba) 563 { 564 struct hbq_dmabuf *dma_buf; 565 566 dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); 567 if (!dma_buf) 568 return NULL; 569 570 dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, 571 &dma_buf->hbuf.phys); 572 if (!dma_buf->hbuf.virt) { 573 kfree(dma_buf); 574 return NULL; 575 } 576 dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, 577 &dma_buf->dbuf.phys); 578 if (!dma_buf->dbuf.virt) { 579 dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, 580 dma_buf->hbuf.phys); 581 kfree(dma_buf); 582 return NULL; 583 } 584 dma_buf->total_size = LPFC_DATA_BUF_SIZE; 585 return dma_buf; 586 } 587 588 /** 589 * lpfc_sli4_rb_free - Frees a receive buffer 590 * @phba: HBA buffer was allocated for 591 * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc 592 * 593 * Description: Frees both the container and the DMA-mapped buffers returned by 594 * lpfc_sli4_rb_alloc. 595 * 596 * Notes: Can be called with or without locks held. 597 * 598 * Returns: None 599 **/ 600 void 601 lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab) 602 { 603 dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); 604 dma_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); 605 kfree(dmab); 606 } 607 608 /** 609 * lpfc_sli4_nvmet_alloc - Allocate an SLI4 Receive buffer 610 * @phba: HBA to allocate a receive buffer for 611 * 612 * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI 613 * pool along a non-DMA-mapped container for it. 614 * 615 * Returns: 616 * pointer to HBQ on success 617 * NULL on failure 618 **/ 619 struct rqb_dmabuf * 620 lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) 621 { 622 struct rqb_dmabuf *dma_buf; 623 624 dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL); 625 if (!dma_buf) 626 return NULL; 627 628 dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, 629 &dma_buf->hbuf.phys); 630 if (!dma_buf->hbuf.virt) { 631 kfree(dma_buf); 632 return NULL; 633 } 634 dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_nvmet_drb_pool, 635 GFP_KERNEL, &dma_buf->dbuf.phys); 636 if (!dma_buf->dbuf.virt) { 637 dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, 638 dma_buf->hbuf.phys); 639 kfree(dma_buf); 640 return NULL; 641 } 642 dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE; 643 return dma_buf; 644 } 645 646 /** 647 * lpfc_sli4_nvmet_free - Frees a receive buffer 648 * @phba: HBA buffer was allocated for 649 * @dmab: DMA Buffer container returned by lpfc_sli4_rbq_alloc 650 * 651 * Description: Frees both the container and the DMA-mapped buffers returned by 652 * lpfc_sli4_nvmet_alloc. 653 * 654 * Notes: Can be called with or without locks held. 655 * 656 * Returns: None 657 **/ 658 void 659 lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab) 660 { 661 dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); 662 dma_pool_free(phba->lpfc_nvmet_drb_pool, 663 dmab->dbuf.virt, dmab->dbuf.phys); 664 kfree(dmab); 665 } 666 667 /** 668 * lpfc_in_buf_free - Free a DMA buffer 669 * @phba: HBA buffer is associated with 670 * @mp: Buffer to free 671 * 672 * Description: Frees the given DMA buffer in the appropriate way given if the 673 * HBA is running in SLI3 mode with HBQs enabled. 674 * 675 * Notes: Takes phba->hbalock. Can be called with or without other locks held. 676 * 677 * Returns: None 678 **/ 679 void 680 lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) 681 { 682 struct hbq_dmabuf *hbq_entry; 683 unsigned long flags; 684 685 if (!mp) 686 return; 687 688 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 689 hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf); 690 /* Check whether HBQ is still in use */ 691 spin_lock_irqsave(&phba->hbalock, flags); 692 if (!phba->hbq_in_use) { 693 spin_unlock_irqrestore(&phba->hbalock, flags); 694 return; 695 } 696 list_del(&hbq_entry->dbuf.list); 697 if (hbq_entry->tag == -1) { 698 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 699 (phba, hbq_entry); 700 } else { 701 lpfc_sli_free_hbq(phba, hbq_entry); 702 } 703 spin_unlock_irqrestore(&phba->hbalock, flags); 704 } else { 705 lpfc_mbuf_free(phba, mp->virt, mp->phys); 706 kfree(mp); 707 } 708 return; 709 } 710 711 /** 712 * lpfc_rq_buf_free - Free a RQ DMA buffer 713 * @phba: HBA buffer is associated with 714 * @mp: Buffer to free 715 * 716 * Description: Frees the given DMA buffer in the appropriate way given by 717 * reposting it to its associated RQ so it can be reused. 718 * 719 * Notes: Takes phba->hbalock. Can be called with or without other locks held. 720 * 721 * Returns: None 722 **/ 723 void 724 lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) 725 { 726 struct lpfc_rqb *rqbp; 727 struct lpfc_rqe hrqe; 728 struct lpfc_rqe drqe; 729 struct rqb_dmabuf *rqb_entry; 730 unsigned long flags; 731 int rc; 732 733 if (!mp) 734 return; 735 736 rqb_entry = container_of(mp, struct rqb_dmabuf, hbuf); 737 rqbp = rqb_entry->hrq->rqbp; 738 739 spin_lock_irqsave(&phba->hbalock, flags); 740 list_del(&rqb_entry->hbuf.list); 741 hrqe.address_lo = putPaddrLow(rqb_entry->hbuf.phys); 742 hrqe.address_hi = putPaddrHigh(rqb_entry->hbuf.phys); 743 drqe.address_lo = putPaddrLow(rqb_entry->dbuf.phys); 744 drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys); 745 rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); 746 if (rc < 0) { 747 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 748 "6409 Cannot post to HRQ %d: %x %x %x " 749 "DRQ %x %x\n", 750 rqb_entry->hrq->queue_id, 751 rqb_entry->hrq->host_index, 752 rqb_entry->hrq->hba_index, 753 rqb_entry->hrq->entry_count, 754 rqb_entry->drq->host_index, 755 rqb_entry->drq->hba_index); 756 (rqbp->rqb_free_buffer)(phba, rqb_entry); 757 } else { 758 list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); 759 rqbp->buffer_count++; 760 } 761 762 spin_unlock_irqrestore(&phba->hbalock, flags); 763 } 764