1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2014 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/mempool.h> 25 #include <linux/slab.h> 26 #include <linux/pci.h> 27 #include <linux/interrupt.h> 28 29 #include <scsi/scsi.h> 30 #include <scsi/scsi_device.h> 31 #include <scsi/scsi_transport_fc.h> 32 #include <scsi/fc/fc_fs.h> 33 34 #include "lpfc_hw4.h" 35 #include "lpfc_hw.h" 36 #include "lpfc_sli.h" 37 #include "lpfc_sli4.h" 38 #include "lpfc_nl.h" 39 #include "lpfc_disc.h" 40 #include "lpfc.h" 41 #include "lpfc_scsi.h" 42 #include "lpfc_crtn.h" 43 #include "lpfc_logmsg.h" 44 45 #define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */ 46 #define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ 47 #define LPFC_DEVICE_DATA_POOL_SIZE 64 /* max elements in device data pool */ 48 49 int 50 lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) { 51 size_t bytes; 52 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 53 54 if (max_xri <= 0) 55 return -ENOMEM; 56 bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) * 57 sizeof(unsigned long); 58 phba->cfg_rrq_xri_bitmap_sz = bytes; 59 phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, 60 bytes); 61 if (!phba->active_rrq_pool) 62 return -ENOMEM; 63 else 64 return 0; 65 } 66 67 /** 68 * lpfc_mem_alloc - create and allocate all PCI and memory pools 69 * @phba: HBA to allocate pools for 70 * 71 * Description: Creates and allocates PCI pools lpfc_mbuf_pool, 72 * lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools 73 * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask. 74 * 75 * Notes: Not interrupt-safe. Must be called with no locks held. If any 76 * allocation fails, frees all successfully allocated memory before returning. 77 * 78 * Returns: 79 * 0 on success 80 * -ENOMEM on failure (if any memory allocations fail) 81 **/ 82 int 83 lpfc_mem_alloc(struct lpfc_hba *phba, int align) 84 { 85 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 86 int i; 87 88 89 phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev, 90 LPFC_BPL_SIZE, 91 align, 0); 92 if (!phba->lpfc_mbuf_pool) 93 goto fail; 94 95 pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE, 96 sizeof(struct lpfc_dmabuf), 97 GFP_KERNEL); 98 if (!pool->elements) 99 goto fail_free_lpfc_mbuf_pool; 100 101 pool->max_count = 0; 102 pool->current_count = 0; 103 for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) { 104 pool->elements[i].virt = dma_pool_alloc(phba->lpfc_mbuf_pool, 105 GFP_KERNEL, &pool->elements[i].phys); 106 if (!pool->elements[i].virt) 107 goto fail_free_mbuf_pool; 108 pool->max_count++; 109 pool->current_count++; 110 } 111 112 phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, 113 sizeof(LPFC_MBOXQ_t)); 114 if (!phba->mbox_mem_pool) 115 goto fail_free_mbuf_pool; 116 117 phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, 118 sizeof(struct lpfc_nodelist)); 119 if (!phba->nlp_mem_pool) 120 goto fail_free_mbox_pool; 121 122 if (phba->sli_rev == LPFC_SLI_REV4) { 123 phba->rrq_pool = 124 mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, 125 sizeof(struct lpfc_node_rrq)); 126 if (!phba->rrq_pool) 127 goto fail_free_nlp_mem_pool; 128 phba->lpfc_hrb_pool = dma_pool_create("lpfc_hrb_pool", 129 &phba->pcidev->dev, 130 LPFC_HDR_BUF_SIZE, align, 0); 131 if (!phba->lpfc_hrb_pool) 132 goto fail_free_rrq_mem_pool; 133 134 phba->lpfc_drb_pool = dma_pool_create("lpfc_drb_pool", 135 &phba->pcidev->dev, 136 LPFC_DATA_BUF_SIZE, align, 0); 137 if (!phba->lpfc_drb_pool) 138 goto fail_free_hrb_pool; 139 phba->lpfc_hbq_pool = NULL; 140 } else { 141 phba->lpfc_hbq_pool = dma_pool_create("lpfc_hbq_pool", 142 &phba->pcidev->dev, LPFC_BPL_SIZE, align, 0); 143 if (!phba->lpfc_hbq_pool) 144 goto fail_free_nlp_mem_pool; 145 phba->lpfc_hrb_pool = NULL; 146 phba->lpfc_drb_pool = NULL; 147 } 148 149 if (phba->cfg_EnableXLane) { 150 phba->device_data_mem_pool = mempool_create_kmalloc_pool( 151 LPFC_DEVICE_DATA_POOL_SIZE, 152 sizeof(struct lpfc_device_data)); 153 if (!phba->device_data_mem_pool) 154 goto fail_free_drb_pool; 155 } else { 156 phba->device_data_mem_pool = NULL; 157 } 158 159 return 0; 160 fail_free_drb_pool: 161 dma_pool_destroy(phba->lpfc_drb_pool); 162 phba->lpfc_drb_pool = NULL; 163 fail_free_hrb_pool: 164 dma_pool_destroy(phba->lpfc_hrb_pool); 165 phba->lpfc_hrb_pool = NULL; 166 fail_free_rrq_mem_pool: 167 mempool_destroy(phba->rrq_pool); 168 phba->rrq_pool = NULL; 169 fail_free_nlp_mem_pool: 170 mempool_destroy(phba->nlp_mem_pool); 171 phba->nlp_mem_pool = NULL; 172 fail_free_mbox_pool: 173 mempool_destroy(phba->mbox_mem_pool); 174 phba->mbox_mem_pool = NULL; 175 fail_free_mbuf_pool: 176 while (i--) 177 dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, 178 pool->elements[i].phys); 179 kfree(pool->elements); 180 fail_free_lpfc_mbuf_pool: 181 dma_pool_destroy(phba->lpfc_mbuf_pool); 182 phba->lpfc_mbuf_pool = NULL; 183 fail: 184 return -ENOMEM; 185 } 186 187 int 188 lpfc_nvmet_mem_alloc(struct lpfc_hba *phba) 189 { 190 phba->lpfc_nvmet_drb_pool = 191 dma_pool_create("lpfc_nvmet_drb_pool", 192 &phba->pcidev->dev, LPFC_NVMET_DATA_BUF_SIZE, 193 SGL_ALIGN_SZ, 0); 194 if (!phba->lpfc_nvmet_drb_pool) { 195 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 196 "6024 Can't enable NVME Target - no memory\n"); 197 return -ENOMEM; 198 } 199 return 0; 200 } 201 202 /** 203 * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc 204 * @phba: HBA to free memory for 205 * 206 * Description: Free the memory allocated by lpfc_mem_alloc routine. This 207 * routine is a the counterpart of lpfc_mem_alloc. 208 * 209 * Returns: None 210 **/ 211 void 212 lpfc_mem_free(struct lpfc_hba *phba) 213 { 214 int i; 215 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 216 struct lpfc_device_data *device_data; 217 218 /* Free HBQ pools */ 219 lpfc_sli_hbqbuf_free_all(phba); 220 dma_pool_destroy(phba->lpfc_nvmet_drb_pool); 221 phba->lpfc_nvmet_drb_pool = NULL; 222 223 dma_pool_destroy(phba->lpfc_drb_pool); 224 phba->lpfc_drb_pool = NULL; 225 226 dma_pool_destroy(phba->lpfc_hrb_pool); 227 phba->lpfc_hrb_pool = NULL; 228 229 dma_pool_destroy(phba->lpfc_hbq_pool); 230 phba->lpfc_hbq_pool = NULL; 231 232 mempool_destroy(phba->rrq_pool); 233 phba->rrq_pool = NULL; 234 235 /* Free NLP memory pool */ 236 mempool_destroy(phba->nlp_mem_pool); 237 phba->nlp_mem_pool = NULL; 238 if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) { 239 mempool_destroy(phba->active_rrq_pool); 240 phba->active_rrq_pool = NULL; 241 } 242 243 /* Free mbox memory pool */ 244 mempool_destroy(phba->mbox_mem_pool); 245 phba->mbox_mem_pool = NULL; 246 247 /* Free MBUF memory pool */ 248 for (i = 0; i < pool->current_count; i++) 249 dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, 250 pool->elements[i].phys); 251 kfree(pool->elements); 252 253 dma_pool_destroy(phba->lpfc_mbuf_pool); 254 phba->lpfc_mbuf_pool = NULL; 255 256 /* Free Device Data memory pool */ 257 if (phba->device_data_mem_pool) { 258 /* Ensure all objects have been returned to the pool */ 259 while (!list_empty(&phba->luns)) { 260 device_data = list_first_entry(&phba->luns, 261 struct lpfc_device_data, 262 listentry); 263 list_del(&device_data->listentry); 264 mempool_free(device_data, phba->device_data_mem_pool); 265 } 266 mempool_destroy(phba->device_data_mem_pool); 267 } 268 phba->device_data_mem_pool = NULL; 269 return; 270 } 271 272 /** 273 * lpfc_mem_free_all - Frees all PCI and driver memory 274 * @phba: HBA to free memory for 275 * 276 * Description: Free memory from PCI and driver memory pools and also those 277 * used : lpfc_sg_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees 278 * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees 279 * the VPI bitmask. 280 * 281 * Returns: None 282 **/ 283 void 284 lpfc_mem_free_all(struct lpfc_hba *phba) 285 { 286 struct lpfc_sli *psli = &phba->sli; 287 LPFC_MBOXQ_t *mbox, *next_mbox; 288 struct lpfc_dmabuf *mp; 289 290 /* Free memory used in mailbox queue back to mailbox memory pool */ 291 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { 292 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); 293 if (mp) { 294 lpfc_mbuf_free(phba, mp->virt, mp->phys); 295 kfree(mp); 296 } 297 list_del(&mbox->list); 298 mempool_free(mbox, phba->mbox_mem_pool); 299 } 300 /* Free memory used in mailbox cmpl list back to mailbox memory pool */ 301 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) { 302 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); 303 if (mp) { 304 lpfc_mbuf_free(phba, mp->virt, mp->phys); 305 kfree(mp); 306 } 307 list_del(&mbox->list); 308 mempool_free(mbox, phba->mbox_mem_pool); 309 } 310 /* Free the active mailbox command back to the mailbox memory pool */ 311 spin_lock_irq(&phba->hbalock); 312 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 313 spin_unlock_irq(&phba->hbalock); 314 if (psli->mbox_active) { 315 mbox = psli->mbox_active; 316 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); 317 if (mp) { 318 lpfc_mbuf_free(phba, mp->virt, mp->phys); 319 kfree(mp); 320 } 321 mempool_free(mbox, phba->mbox_mem_pool); 322 psli->mbox_active = NULL; 323 } 324 325 /* Free and destroy all the allocated memory pools */ 326 lpfc_mem_free(phba); 327 328 /* Free DMA buffer memory pool */ 329 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 330 phba->lpfc_sg_dma_buf_pool = NULL; 331 332 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); 333 phba->lpfc_cmd_rsp_buf_pool = NULL; 334 335 /* Free the iocb lookup array */ 336 kfree(psli->iocbq_lookup); 337 psli->iocbq_lookup = NULL; 338 339 return; 340 } 341 342 /** 343 * lpfc_mbuf_alloc - Allocate an mbuf from the lpfc_mbuf_pool PCI pool 344 * @phba: HBA which owns the pool to allocate from 345 * @mem_flags: indicates if this is a priority (MEM_PRI) allocation 346 * @handle: used to return the DMA-mapped address of the mbuf 347 * 348 * Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool. 349 * Allocates from generic dma_pool_alloc function first and if that fails and 350 * mem_flags has MEM_PRI set (the only defined flag), returns an mbuf from the 351 * HBA's pool. 352 * 353 * Notes: Not interrupt-safe. Must be called with no locks held. Takes 354 * phba->hbalock. 355 * 356 * Returns: 357 * pointer to the allocated mbuf on success 358 * NULL on failure 359 **/ 360 void * 361 lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) 362 { 363 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 364 unsigned long iflags; 365 void *ret; 366 367 ret = dma_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle); 368 369 spin_lock_irqsave(&phba->hbalock, iflags); 370 if (!ret && (mem_flags & MEM_PRI) && pool->current_count) { 371 pool->current_count--; 372 ret = pool->elements[pool->current_count].virt; 373 *handle = pool->elements[pool->current_count].phys; 374 } 375 spin_unlock_irqrestore(&phba->hbalock, iflags); 376 return ret; 377 } 378 379 /** 380 * __lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (locked) 381 * @phba: HBA which owns the pool to return to 382 * @virt: mbuf to free 383 * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed 384 * 385 * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if 386 * it is below its max_count, frees the mbuf otherwise. 387 * 388 * Notes: Must be called with phba->hbalock held to synchronize access to 389 * lpfc_mbuf_safety_pool. 390 * 391 * Returns: None 392 **/ 393 void 394 __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) 395 { 396 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 397 398 if (pool->current_count < pool->max_count) { 399 pool->elements[pool->current_count].virt = virt; 400 pool->elements[pool->current_count].phys = dma; 401 pool->current_count++; 402 } else { 403 dma_pool_free(phba->lpfc_mbuf_pool, virt, dma); 404 } 405 return; 406 } 407 408 /** 409 * lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (unlocked) 410 * @phba: HBA which owns the pool to return to 411 * @virt: mbuf to free 412 * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed 413 * 414 * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if 415 * it is below its max_count, frees the mbuf otherwise. 416 * 417 * Notes: Takes phba->hbalock. Can be called with or without other locks held. 418 * 419 * Returns: None 420 **/ 421 void 422 lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) 423 { 424 unsigned long iflags; 425 426 spin_lock_irqsave(&phba->hbalock, iflags); 427 __lpfc_mbuf_free(phba, virt, dma); 428 spin_unlock_irqrestore(&phba->hbalock, iflags); 429 return; 430 } 431 432 /** 433 * lpfc_nvmet_buf_alloc - Allocate an nvmet_buf from the 434 * lpfc_sg_dma_buf_pool PCI pool 435 * @phba: HBA which owns the pool to allocate from 436 * @mem_flags: indicates if this is a priority (MEM_PRI) allocation 437 * @handle: used to return the DMA-mapped address of the nvmet_buf 438 * 439 * Description: Allocates a DMA-mapped buffer from the lpfc_sg_dma_buf_pool 440 * PCI pool. Allocates from generic dma_pool_alloc function. 441 * 442 * Returns: 443 * pointer to the allocated nvmet_buf on success 444 * NULL on failure 445 **/ 446 void * 447 lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) 448 { 449 void *ret; 450 451 ret = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle); 452 return ret; 453 } 454 455 /** 456 * lpfc_nvmet_buf_free - Free an nvmet_buf from the lpfc_sg_dma_buf_pool 457 * PCI pool 458 * @phba: HBA which owns the pool to return to 459 * @virt: nvmet_buf to free 460 * @dma: the DMA-mapped address of the lpfc_sg_dma_buf_pool to be freed 461 * 462 * Returns: None 463 **/ 464 void 465 lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma) 466 { 467 dma_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma); 468 } 469 470 /** 471 * lpfc_els_hbq_alloc - Allocate an HBQ buffer 472 * @phba: HBA to allocate HBQ buffer for 473 * 474 * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI 475 * pool along a non-DMA-mapped container for it. 476 * 477 * Notes: Not interrupt-safe. Must be called with no locks held. 478 * 479 * Returns: 480 * pointer to HBQ on success 481 * NULL on failure 482 **/ 483 struct hbq_dmabuf * 484 lpfc_els_hbq_alloc(struct lpfc_hba *phba) 485 { 486 struct hbq_dmabuf *hbqbp; 487 488 hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); 489 if (!hbqbp) 490 return NULL; 491 492 hbqbp->dbuf.virt = dma_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL, 493 &hbqbp->dbuf.phys); 494 if (!hbqbp->dbuf.virt) { 495 kfree(hbqbp); 496 return NULL; 497 } 498 hbqbp->total_size = LPFC_BPL_SIZE; 499 return hbqbp; 500 } 501 502 /** 503 * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc 504 * @phba: HBA buffer was allocated for 505 * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc 506 * 507 * Description: Frees both the container and the DMA-mapped buffer returned by 508 * lpfc_els_hbq_alloc. 509 * 510 * Notes: Can be called with or without locks held. 511 * 512 * Returns: None 513 **/ 514 void 515 lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) 516 { 517 dma_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); 518 kfree(hbqbp); 519 return; 520 } 521 522 /** 523 * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer 524 * @phba: HBA to allocate a receive buffer for 525 * 526 * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI 527 * pool along a non-DMA-mapped container for it. 528 * 529 * Notes: Not interrupt-safe. Must be called with no locks held. 530 * 531 * Returns: 532 * pointer to HBQ on success 533 * NULL on failure 534 **/ 535 struct hbq_dmabuf * 536 lpfc_sli4_rb_alloc(struct lpfc_hba *phba) 537 { 538 struct hbq_dmabuf *dma_buf; 539 540 dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); 541 if (!dma_buf) 542 return NULL; 543 544 dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, 545 &dma_buf->hbuf.phys); 546 if (!dma_buf->hbuf.virt) { 547 kfree(dma_buf); 548 return NULL; 549 } 550 dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, 551 &dma_buf->dbuf.phys); 552 if (!dma_buf->dbuf.virt) { 553 dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, 554 dma_buf->hbuf.phys); 555 kfree(dma_buf); 556 return NULL; 557 } 558 dma_buf->total_size = LPFC_DATA_BUF_SIZE; 559 return dma_buf; 560 } 561 562 /** 563 * lpfc_sli4_rb_free - Frees a receive buffer 564 * @phba: HBA buffer was allocated for 565 * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc 566 * 567 * Description: Frees both the container and the DMA-mapped buffers returned by 568 * lpfc_sli4_rb_alloc. 569 * 570 * Notes: Can be called with or without locks held. 571 * 572 * Returns: None 573 **/ 574 void 575 lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab) 576 { 577 dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); 578 dma_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); 579 kfree(dmab); 580 } 581 582 /** 583 * lpfc_sli4_nvmet_alloc - Allocate an SLI4 Receive buffer 584 * @phba: HBA to allocate a receive buffer for 585 * 586 * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI 587 * pool along a non-DMA-mapped container for it. 588 * 589 * Notes: Not interrupt-safe. Must be called with no locks held. 590 * 591 * Returns: 592 * pointer to HBQ on success 593 * NULL on failure 594 **/ 595 struct rqb_dmabuf * 596 lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) 597 { 598 struct rqb_dmabuf *dma_buf; 599 600 dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL); 601 if (!dma_buf) 602 return NULL; 603 604 dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, 605 &dma_buf->hbuf.phys); 606 if (!dma_buf->hbuf.virt) { 607 kfree(dma_buf); 608 return NULL; 609 } 610 dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_nvmet_drb_pool, 611 GFP_KERNEL, &dma_buf->dbuf.phys); 612 if (!dma_buf->dbuf.virt) { 613 dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, 614 dma_buf->hbuf.phys); 615 kfree(dma_buf); 616 return NULL; 617 } 618 dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE; 619 return dma_buf; 620 } 621 622 /** 623 * lpfc_sli4_nvmet_free - Frees a receive buffer 624 * @phba: HBA buffer was allocated for 625 * @dmab: DMA Buffer container returned by lpfc_sli4_rbq_alloc 626 * 627 * Description: Frees both the container and the DMA-mapped buffers returned by 628 * lpfc_sli4_nvmet_alloc. 629 * 630 * Notes: Can be called with or without locks held. 631 * 632 * Returns: None 633 **/ 634 void 635 lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab) 636 { 637 dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); 638 dma_pool_free(phba->lpfc_nvmet_drb_pool, 639 dmab->dbuf.virt, dmab->dbuf.phys); 640 kfree(dmab); 641 } 642 643 /** 644 * lpfc_in_buf_free - Free a DMA buffer 645 * @phba: HBA buffer is associated with 646 * @mp: Buffer to free 647 * 648 * Description: Frees the given DMA buffer in the appropriate way given if the 649 * HBA is running in SLI3 mode with HBQs enabled. 650 * 651 * Notes: Takes phba->hbalock. Can be called with or without other locks held. 652 * 653 * Returns: None 654 **/ 655 void 656 lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) 657 { 658 struct hbq_dmabuf *hbq_entry; 659 unsigned long flags; 660 661 if (!mp) 662 return; 663 664 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 665 hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf); 666 /* Check whether HBQ is still in use */ 667 spin_lock_irqsave(&phba->hbalock, flags); 668 if (!phba->hbq_in_use) { 669 spin_unlock_irqrestore(&phba->hbalock, flags); 670 return; 671 } 672 list_del(&hbq_entry->dbuf.list); 673 if (hbq_entry->tag == -1) { 674 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 675 (phba, hbq_entry); 676 } else { 677 lpfc_sli_free_hbq(phba, hbq_entry); 678 } 679 spin_unlock_irqrestore(&phba->hbalock, flags); 680 } else { 681 lpfc_mbuf_free(phba, mp->virt, mp->phys); 682 kfree(mp); 683 } 684 return; 685 } 686 687 /** 688 * lpfc_rq_buf_free - Free a RQ DMA buffer 689 * @phba: HBA buffer is associated with 690 * @mp: Buffer to free 691 * 692 * Description: Frees the given DMA buffer in the appropriate way given by 693 * reposting it to its associated RQ so it can be reused. 694 * 695 * Notes: Takes phba->hbalock. Can be called with or without other locks held. 696 * 697 * Returns: None 698 **/ 699 void 700 lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) 701 { 702 struct lpfc_rqb *rqbp; 703 struct lpfc_rqe hrqe; 704 struct lpfc_rqe drqe; 705 struct rqb_dmabuf *rqb_entry; 706 unsigned long flags; 707 int rc; 708 709 if (!mp) 710 return; 711 712 rqb_entry = container_of(mp, struct rqb_dmabuf, hbuf); 713 rqbp = rqb_entry->hrq->rqbp; 714 715 spin_lock_irqsave(&phba->hbalock, flags); 716 list_del(&rqb_entry->hbuf.list); 717 hrqe.address_lo = putPaddrLow(rqb_entry->hbuf.phys); 718 hrqe.address_hi = putPaddrHigh(rqb_entry->hbuf.phys); 719 drqe.address_lo = putPaddrLow(rqb_entry->dbuf.phys); 720 drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys); 721 rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); 722 if (rc < 0) { 723 (rqbp->rqb_free_buffer)(phba, rqb_entry); 724 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 725 "6409 Cannot post to HRQ %d: %x %x %x " 726 "DRQ %x %x\n", 727 rqb_entry->hrq->queue_id, 728 rqb_entry->hrq->host_index, 729 rqb_entry->hrq->hba_index, 730 rqb_entry->hrq->entry_count, 731 rqb_entry->drq->host_index, 732 rqb_entry->drq->hba_index); 733 } else { 734 list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); 735 rqbp->buffer_count++; 736 } 737 738 spin_unlock_irqrestore(&phba->hbalock, flags); 739 } 740