1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23
24 #include <linux/mempool.h>
25 #include <linux/slab.h>
26 #include <linux/pci.h>
27 #include <linux/interrupt.h>
28
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_transport_fc.h>
32 #include <scsi/fc/fc_fs.h>
33
34 #include "lpfc_hw4.h"
35 #include "lpfc_hw.h"
36 #include "lpfc_sli.h"
37 #include "lpfc_sli4.h"
38 #include "lpfc_nl.h"
39 #include "lpfc_disc.h"
40 #include "lpfc.h"
41 #include "lpfc_scsi.h"
42 #include "lpfc_crtn.h"
43 #include "lpfc_logmsg.h"
44
45 #define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */
46 #define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
47 #define LPFC_DEVICE_DATA_POOL_SIZE 64 /* max elements in device data pool */
48 #define LPFC_RRQ_POOL_SIZE 256 /* max elements in non-DMA pool */
49 #define LPFC_MBX_POOL_SIZE 256 /* max elements in MBX non-DMA pool */
50
51 /* lpfc_mbox_free_sli_mbox
52 *
53 * @phba: HBA to free memory for
54 * @mbox: mailbox command to free
55 *
56 * This routine detects the mbox type and calls the correct
57 * free routine to fully release all associated memory.
58 */
59 static void
lpfc_mem_free_sli_mbox(struct lpfc_hba * phba,LPFC_MBOXQ_t * mbox)60 lpfc_mem_free_sli_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
61 {
62 /* Detect if the caller's mbox is an SLI4_CONFIG type. If so, this
63 * mailbox type requires a different cleanup routine. Otherwise, the
64 * mailbox is just an mbuf and mem_pool release.
65 */
66 if (phba->sli_rev == LPFC_SLI_REV4 &&
67 bf_get(lpfc_mqe_command, &mbox->u.mqe) == MBX_SLI4_CONFIG) {
68 lpfc_sli4_mbox_cmd_free(phba, mbox);
69 } else {
70 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
71 }
72 }
73
74 int
lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba * phba)75 lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
76 size_t bytes;
77 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
78
79 if (max_xri <= 0)
80 return -ENOMEM;
81 bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) *
82 sizeof(unsigned long);
83 phba->cfg_rrq_xri_bitmap_sz = bytes;
84 phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
85 bytes);
86 if (!phba->active_rrq_pool)
87 return -ENOMEM;
88 else
89 return 0;
90 }
91
92 /**
93 * lpfc_mem_alloc - create and allocate all PCI and memory pools
94 * @phba: HBA to allocate pools for
95 * @align: alignment requirement for blocks; must be a power of two
96 *
97 * Description: Creates and allocates PCI pools lpfc_mbuf_pool,
98 * lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
99 * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask.
100 *
101 * Notes: Not interrupt-safe. Must be called with no locks held. If any
102 * allocation fails, frees all successfully allocated memory before returning.
103 *
104 * Returns:
105 * 0 on success
106 * -ENOMEM on failure (if any memory allocations fail)
107 **/
108 int
lpfc_mem_alloc(struct lpfc_hba * phba,int align)109 lpfc_mem_alloc(struct lpfc_hba *phba, int align)
110 {
111 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
112 int i;
113
114
115 phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev,
116 LPFC_BPL_SIZE,
117 align, 0);
118 if (!phba->lpfc_mbuf_pool)
119 goto fail;
120
121 pool->elements = kmalloc_objs(struct lpfc_dmabuf, LPFC_MBUF_POOL_SIZE);
122 if (!pool->elements)
123 goto fail_free_lpfc_mbuf_pool;
124
125 pool->max_count = 0;
126 pool->current_count = 0;
127 for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) {
128 pool->elements[i].virt = dma_pool_alloc(phba->lpfc_mbuf_pool,
129 GFP_KERNEL, &pool->elements[i].phys);
130 if (!pool->elements[i].virt)
131 goto fail_free_mbuf_pool;
132 pool->max_count++;
133 pool->current_count++;
134 }
135
136 phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MBX_POOL_SIZE,
137 sizeof(LPFC_MBOXQ_t));
138 if (!phba->mbox_mem_pool)
139 goto fail_free_mbuf_pool;
140
141 phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
142 sizeof(struct lpfc_nodelist));
143 if (!phba->nlp_mem_pool)
144 goto fail_free_mbox_pool;
145
146 if (phba->sli_rev == LPFC_SLI_REV4) {
147 phba->rrq_pool =
148 mempool_create_kmalloc_pool(LPFC_RRQ_POOL_SIZE,
149 sizeof(struct lpfc_node_rrq));
150 if (!phba->rrq_pool)
151 goto fail_free_nlp_mem_pool;
152 phba->lpfc_hrb_pool = dma_pool_create("lpfc_hrb_pool",
153 &phba->pcidev->dev,
154 LPFC_HDR_BUF_SIZE, align, 0);
155 if (!phba->lpfc_hrb_pool)
156 goto fail_free_rrq_mem_pool;
157
158 phba->lpfc_drb_pool = dma_pool_create("lpfc_drb_pool",
159 &phba->pcidev->dev,
160 LPFC_DATA_BUF_SIZE, align, 0);
161 if (!phba->lpfc_drb_pool)
162 goto fail_free_hrb_pool;
163 phba->lpfc_hbq_pool = NULL;
164 } else {
165 phba->lpfc_hbq_pool = dma_pool_create("lpfc_hbq_pool",
166 &phba->pcidev->dev, LPFC_BPL_SIZE, align, 0);
167 if (!phba->lpfc_hbq_pool)
168 goto fail_free_nlp_mem_pool;
169 phba->lpfc_hrb_pool = NULL;
170 phba->lpfc_drb_pool = NULL;
171 }
172
173 if (phba->cfg_EnableXLane) {
174 phba->device_data_mem_pool = mempool_create_kmalloc_pool(
175 LPFC_DEVICE_DATA_POOL_SIZE,
176 sizeof(struct lpfc_device_data));
177 if (!phba->device_data_mem_pool)
178 goto fail_free_drb_pool;
179 } else {
180 phba->device_data_mem_pool = NULL;
181 }
182
183 return 0;
184 fail_free_drb_pool:
185 dma_pool_destroy(phba->lpfc_drb_pool);
186 phba->lpfc_drb_pool = NULL;
187 fail_free_hrb_pool:
188 dma_pool_destroy(phba->lpfc_hrb_pool);
189 phba->lpfc_hrb_pool = NULL;
190 fail_free_rrq_mem_pool:
191 mempool_destroy(phba->rrq_pool);
192 phba->rrq_pool = NULL;
193 fail_free_nlp_mem_pool:
194 mempool_destroy(phba->nlp_mem_pool);
195 phba->nlp_mem_pool = NULL;
196 fail_free_mbox_pool:
197 mempool_destroy(phba->mbox_mem_pool);
198 phba->mbox_mem_pool = NULL;
199 fail_free_mbuf_pool:
200 while (i--)
201 dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
202 pool->elements[i].phys);
203 kfree(pool->elements);
204 fail_free_lpfc_mbuf_pool:
205 dma_pool_destroy(phba->lpfc_mbuf_pool);
206 phba->lpfc_mbuf_pool = NULL;
207 fail:
208 return -ENOMEM;
209 }
210
211 int
lpfc_nvmet_mem_alloc(struct lpfc_hba * phba)212 lpfc_nvmet_mem_alloc(struct lpfc_hba *phba)
213 {
214 phba->lpfc_nvmet_drb_pool =
215 dma_pool_create("lpfc_nvmet_drb_pool",
216 &phba->pcidev->dev, LPFC_NVMET_DATA_BUF_SIZE,
217 SGL_ALIGN_SZ, 0);
218 if (!phba->lpfc_nvmet_drb_pool) {
219 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
220 "6024 Can't enable NVME Target - no memory\n");
221 return -ENOMEM;
222 }
223 return 0;
224 }
225
226 /**
227 * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc
228 * @phba: HBA to free memory for
229 *
230 * Description: Free the memory allocated by lpfc_mem_alloc routine. This
231 * routine is a the counterpart of lpfc_mem_alloc.
232 *
233 * Returns: None
234 **/
235 void
lpfc_mem_free(struct lpfc_hba * phba)236 lpfc_mem_free(struct lpfc_hba *phba)
237 {
238 int i;
239 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
240 struct lpfc_device_data *device_data;
241
242 /* Free HBQ pools */
243 lpfc_sli_hbqbuf_free_all(phba);
244 dma_pool_destroy(phba->lpfc_nvmet_drb_pool);
245 phba->lpfc_nvmet_drb_pool = NULL;
246
247 dma_pool_destroy(phba->lpfc_drb_pool);
248 phba->lpfc_drb_pool = NULL;
249
250 dma_pool_destroy(phba->lpfc_hrb_pool);
251 phba->lpfc_hrb_pool = NULL;
252
253 dma_pool_destroy(phba->lpfc_hbq_pool);
254 phba->lpfc_hbq_pool = NULL;
255
256 mempool_destroy(phba->rrq_pool);
257 phba->rrq_pool = NULL;
258
259 /* Free NLP memory pool */
260 mempool_destroy(phba->nlp_mem_pool);
261 phba->nlp_mem_pool = NULL;
262 if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) {
263 mempool_destroy(phba->active_rrq_pool);
264 phba->active_rrq_pool = NULL;
265 }
266
267 /* Free mbox memory pool */
268 mempool_destroy(phba->mbox_mem_pool);
269 phba->mbox_mem_pool = NULL;
270
271 /* Free MBUF memory pool */
272 for (i = 0; i < pool->current_count; i++)
273 dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
274 pool->elements[i].phys);
275 kfree(pool->elements);
276
277 dma_pool_destroy(phba->lpfc_mbuf_pool);
278 phba->lpfc_mbuf_pool = NULL;
279
280 /* Free Device Data memory pool */
281 if (phba->device_data_mem_pool) {
282 /* Ensure all objects have been returned to the pool */
283 while (!list_empty(&phba->luns)) {
284 device_data = list_first_entry(&phba->luns,
285 struct lpfc_device_data,
286 listentry);
287 list_del(&device_data->listentry);
288 mempool_free(device_data, phba->device_data_mem_pool);
289 }
290 mempool_destroy(phba->device_data_mem_pool);
291 }
292 phba->device_data_mem_pool = NULL;
293 return;
294 }
295
296 /**
297 * lpfc_mem_free_all - Frees all PCI and driver memory
298 * @phba: HBA to free memory for
299 *
300 * Description: Free memory from PCI and driver memory pools and also those
301 * used : lpfc_sg_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees
302 * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees
303 * the VPI bitmask.
304 *
305 * Returns: None
306 **/
307 void
lpfc_mem_free_all(struct lpfc_hba * phba)308 lpfc_mem_free_all(struct lpfc_hba *phba)
309 {
310 struct lpfc_sli *psli = &phba->sli;
311 LPFC_MBOXQ_t *mbox, *next_mbox;
312
313 /* Free memory used in mailbox queue back to mailbox memory pool */
314 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
315 list_del(&mbox->list);
316 lpfc_mem_free_sli_mbox(phba, mbox);
317 }
318 /* Free memory used in mailbox cmpl list back to mailbox memory pool */
319 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
320 list_del(&mbox->list);
321 lpfc_mem_free_sli_mbox(phba, mbox);
322 }
323 /* Free the active mailbox command back to the mailbox memory pool */
324 spin_lock_irq(&phba->hbalock);
325 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
326 spin_unlock_irq(&phba->hbalock);
327 if (psli->mbox_active) {
328 mbox = psli->mbox_active;
329 lpfc_mem_free_sli_mbox(phba, mbox);
330 psli->mbox_active = NULL;
331 }
332
333 /* Free and destroy all the allocated memory pools */
334 lpfc_mem_free(phba);
335
336 /* Free DMA buffer memory pool */
337 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
338 phba->lpfc_sg_dma_buf_pool = NULL;
339
340 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
341 phba->lpfc_cmd_rsp_buf_pool = NULL;
342
343 /* Free Congestion Data buffer */
344 if (phba->cgn_i) {
345 dma_free_coherent(&phba->pcidev->dev,
346 sizeof(struct lpfc_cgn_info),
347 phba->cgn_i->virt, phba->cgn_i->phys);
348 kfree(phba->cgn_i);
349 phba->cgn_i = NULL;
350 }
351
352 /* Free RX Monitor */
353 if (phba->rx_monitor) {
354 lpfc_rx_monitor_destroy_ring(phba->rx_monitor);
355 kfree(phba->rx_monitor);
356 phba->rx_monitor = NULL;
357 }
358
359 /* Free the iocb lookup array */
360 kfree(psli->iocbq_lookup);
361 psli->iocbq_lookup = NULL;
362
363 return;
364 }
365
366 /**
367 * lpfc_mbuf_alloc - Allocate an mbuf from the lpfc_mbuf_pool PCI pool
368 * @phba: HBA which owns the pool to allocate from
369 * @mem_flags: indicates if this is a priority (MEM_PRI) allocation
370 * @handle: used to return the DMA-mapped address of the mbuf
371 *
372 * Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool.
373 * Allocates from generic dma_pool_alloc function first and if that fails and
374 * mem_flags has MEM_PRI set (the only defined flag), returns an mbuf from the
375 * HBA's pool.
376 *
377 * Notes: Not interrupt-safe. Must be called with no locks held. Takes
378 * phba->hbalock.
379 *
380 * Returns:
381 * pointer to the allocated mbuf on success
382 * NULL on failure
383 **/
384 void *
lpfc_mbuf_alloc(struct lpfc_hba * phba,int mem_flags,dma_addr_t * handle)385 lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
386 {
387 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
388 unsigned long iflags;
389 void *ret;
390
391 ret = dma_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
392
393 spin_lock_irqsave(&phba->hbalock, iflags);
394 if (!ret && (mem_flags & MEM_PRI) && pool->current_count) {
395 pool->current_count--;
396 ret = pool->elements[pool->current_count].virt;
397 *handle = pool->elements[pool->current_count].phys;
398 }
399 spin_unlock_irqrestore(&phba->hbalock, iflags);
400 return ret;
401 }
402
403 /**
404 * __lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (locked)
405 * @phba: HBA which owns the pool to return to
406 * @virt: mbuf to free
407 * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
408 *
409 * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if
410 * it is below its max_count, frees the mbuf otherwise.
411 *
412 * Notes: Must be called with phba->hbalock held to synchronize access to
413 * lpfc_mbuf_safety_pool.
414 *
415 * Returns: None
416 **/
417 void
__lpfc_mbuf_free(struct lpfc_hba * phba,void * virt,dma_addr_t dma)418 __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
419 {
420 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
421
422 if (pool->current_count < pool->max_count) {
423 pool->elements[pool->current_count].virt = virt;
424 pool->elements[pool->current_count].phys = dma;
425 pool->current_count++;
426 } else {
427 dma_pool_free(phba->lpfc_mbuf_pool, virt, dma);
428 }
429 return;
430 }
431
432 /**
433 * lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (unlocked)
434 * @phba: HBA which owns the pool to return to
435 * @virt: mbuf to free
436 * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
437 *
438 * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if
439 * it is below its max_count, frees the mbuf otherwise.
440 *
441 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
442 *
443 * Returns: None
444 **/
445 void
lpfc_mbuf_free(struct lpfc_hba * phba,void * virt,dma_addr_t dma)446 lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
447 {
448 unsigned long iflags;
449
450 spin_lock_irqsave(&phba->hbalock, iflags);
451 __lpfc_mbuf_free(phba, virt, dma);
452 spin_unlock_irqrestore(&phba->hbalock, iflags);
453 return;
454 }
455
456 /**
457 * lpfc_nvmet_buf_alloc - Allocate an nvmet_buf from the
458 * lpfc_sg_dma_buf_pool PCI pool
459 * @phba: HBA which owns the pool to allocate from
460 * @mem_flags: indicates if this is a priority (MEM_PRI) allocation
461 * @handle: used to return the DMA-mapped address of the nvmet_buf
462 *
463 * Description: Allocates a DMA-mapped buffer from the lpfc_sg_dma_buf_pool
464 * PCI pool. Allocates from generic dma_pool_alloc function.
465 *
466 * Returns:
467 * pointer to the allocated nvmet_buf on success
468 * NULL on failure
469 **/
470 void *
lpfc_nvmet_buf_alloc(struct lpfc_hba * phba,int mem_flags,dma_addr_t * handle)471 lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
472 {
473 void *ret;
474
475 ret = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle);
476 return ret;
477 }
478
479 /**
480 * lpfc_nvmet_buf_free - Free an nvmet_buf from the lpfc_sg_dma_buf_pool
481 * PCI pool
482 * @phba: HBA which owns the pool to return to
483 * @virt: nvmet_buf to free
484 * @dma: the DMA-mapped address of the lpfc_sg_dma_buf_pool to be freed
485 *
486 * Returns: None
487 **/
488 void
lpfc_nvmet_buf_free(struct lpfc_hba * phba,void * virt,dma_addr_t dma)489 lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma)
490 {
491 dma_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma);
492 }
493
494 /**
495 * lpfc_els_hbq_alloc - Allocate an HBQ buffer
496 * @phba: HBA to allocate HBQ buffer for
497 *
498 * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI
499 * pool along a non-DMA-mapped container for it.
500 *
501 * Notes: Not interrupt-safe. Must be called with no locks held.
502 *
503 * Returns:
504 * pointer to HBQ on success
505 * NULL on failure
506 **/
507 struct hbq_dmabuf *
lpfc_els_hbq_alloc(struct lpfc_hba * phba)508 lpfc_els_hbq_alloc(struct lpfc_hba *phba)
509 {
510 struct hbq_dmabuf *hbqbp;
511
512 hbqbp = kzalloc_obj(struct hbq_dmabuf);
513 if (!hbqbp)
514 return NULL;
515
516 hbqbp->dbuf.virt = dma_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
517 &hbqbp->dbuf.phys);
518 if (!hbqbp->dbuf.virt) {
519 kfree(hbqbp);
520 return NULL;
521 }
522 hbqbp->total_size = LPFC_BPL_SIZE;
523 return hbqbp;
524 }
525
526 /**
527 * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
528 * @phba: HBA buffer was allocated for
529 * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc
530 *
531 * Description: Frees both the container and the DMA-mapped buffer returned by
532 * lpfc_els_hbq_alloc.
533 *
534 * Notes: Can be called with or without locks held.
535 *
536 * Returns: None
537 **/
538 void
lpfc_els_hbq_free(struct lpfc_hba * phba,struct hbq_dmabuf * hbqbp)539 lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
540 {
541 dma_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
542 kfree(hbqbp);
543 return;
544 }
545
546 /**
547 * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer
548 * @phba: HBA to allocate a receive buffer for
549 *
550 * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
551 * pool along a non-DMA-mapped container for it.
552 *
553 * Notes: Not interrupt-safe. Must be called with no locks held.
554 *
555 * Returns:
556 * pointer to HBQ on success
557 * NULL on failure
558 **/
559 struct hbq_dmabuf *
lpfc_sli4_rb_alloc(struct lpfc_hba * phba)560 lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
561 {
562 struct hbq_dmabuf *dma_buf;
563
564 dma_buf = kzalloc_obj(struct hbq_dmabuf);
565 if (!dma_buf)
566 return NULL;
567
568 dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
569 &dma_buf->hbuf.phys);
570 if (!dma_buf->hbuf.virt) {
571 kfree(dma_buf);
572 return NULL;
573 }
574 dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
575 &dma_buf->dbuf.phys);
576 if (!dma_buf->dbuf.virt) {
577 dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
578 dma_buf->hbuf.phys);
579 kfree(dma_buf);
580 return NULL;
581 }
582 dma_buf->total_size = LPFC_DATA_BUF_SIZE;
583 return dma_buf;
584 }
585
586 /**
587 * lpfc_sli4_rb_free - Frees a receive buffer
588 * @phba: HBA buffer was allocated for
589 * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc
590 *
591 * Description: Frees both the container and the DMA-mapped buffers returned by
592 * lpfc_sli4_rb_alloc.
593 *
594 * Notes: Can be called with or without locks held.
595 *
596 * Returns: None
597 **/
598 void
lpfc_sli4_rb_free(struct lpfc_hba * phba,struct hbq_dmabuf * dmab)599 lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
600 {
601 dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
602 dma_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
603 kfree(dmab);
604 }
605
606 /**
607 * lpfc_sli4_nvmet_alloc - Allocate an SLI4 Receive buffer
608 * @phba: HBA to allocate a receive buffer for
609 *
610 * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
611 * pool along a non-DMA-mapped container for it.
612 *
613 * Returns:
614 * pointer to HBQ on success
615 * NULL on failure
616 **/
617 struct rqb_dmabuf *
lpfc_sli4_nvmet_alloc(struct lpfc_hba * phba)618 lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
619 {
620 struct rqb_dmabuf *dma_buf;
621
622 dma_buf = kzalloc_obj(*dma_buf);
623 if (!dma_buf)
624 return NULL;
625
626 dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
627 &dma_buf->hbuf.phys);
628 if (!dma_buf->hbuf.virt) {
629 kfree(dma_buf);
630 return NULL;
631 }
632 dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_nvmet_drb_pool,
633 GFP_KERNEL, &dma_buf->dbuf.phys);
634 if (!dma_buf->dbuf.virt) {
635 dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
636 dma_buf->hbuf.phys);
637 kfree(dma_buf);
638 return NULL;
639 }
640 dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE;
641 return dma_buf;
642 }
643
644 /**
645 * lpfc_sli4_nvmet_free - Frees a receive buffer
646 * @phba: HBA buffer was allocated for
647 * @dmab: DMA Buffer container returned by lpfc_sli4_rbq_alloc
648 *
649 * Description: Frees both the container and the DMA-mapped buffers returned by
650 * lpfc_sli4_nvmet_alloc.
651 *
652 * Notes: Can be called with or without locks held.
653 *
654 * Returns: None
655 **/
656 void
lpfc_sli4_nvmet_free(struct lpfc_hba * phba,struct rqb_dmabuf * dmab)657 lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab)
658 {
659 dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
660 dma_pool_free(phba->lpfc_nvmet_drb_pool,
661 dmab->dbuf.virt, dmab->dbuf.phys);
662 kfree(dmab);
663 }
664
665 /**
666 * lpfc_in_buf_free - Free a DMA buffer
667 * @phba: HBA buffer is associated with
668 * @mp: Buffer to free
669 *
670 * Description: Frees the given DMA buffer in the appropriate way given if the
671 * HBA is running in SLI3 mode with HBQs enabled.
672 *
673 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
674 *
675 * Returns: None
676 **/
677 void
lpfc_in_buf_free(struct lpfc_hba * phba,struct lpfc_dmabuf * mp)678 lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
679 {
680 struct hbq_dmabuf *hbq_entry;
681 unsigned long flags;
682
683 if (!mp)
684 return;
685
686 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
687 hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf);
688 /* Check whether HBQ is still in use */
689 spin_lock_irqsave(&phba->hbalock, flags);
690 if (!phba->hbq_in_use) {
691 spin_unlock_irqrestore(&phba->hbalock, flags);
692 return;
693 }
694 list_del(&hbq_entry->dbuf.list);
695 if (hbq_entry->tag == -1) {
696 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
697 (phba, hbq_entry);
698 } else {
699 lpfc_sli_free_hbq(phba, hbq_entry);
700 }
701 spin_unlock_irqrestore(&phba->hbalock, flags);
702 } else {
703 lpfc_mbuf_free(phba, mp->virt, mp->phys);
704 kfree(mp);
705 }
706 return;
707 }
708
709 /**
710 * lpfc_rq_buf_free - Free a RQ DMA buffer
711 * @phba: HBA buffer is associated with
712 * @mp: Buffer to free
713 *
714 * Description: Frees the given DMA buffer in the appropriate way given by
715 * reposting it to its associated RQ so it can be reused.
716 *
717 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
718 *
719 * Returns: None
720 **/
721 void
lpfc_rq_buf_free(struct lpfc_hba * phba,struct lpfc_dmabuf * mp)722 lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
723 {
724 struct lpfc_rqb *rqbp;
725 struct lpfc_rqe hrqe;
726 struct lpfc_rqe drqe;
727 struct rqb_dmabuf *rqb_entry;
728 unsigned long flags;
729 int rc;
730
731 if (!mp)
732 return;
733
734 rqb_entry = container_of(mp, struct rqb_dmabuf, hbuf);
735 rqbp = rqb_entry->hrq->rqbp;
736
737 spin_lock_irqsave(&phba->hbalock, flags);
738 list_del(&rqb_entry->hbuf.list);
739 hrqe.address_lo = putPaddrLow(rqb_entry->hbuf.phys);
740 hrqe.address_hi = putPaddrHigh(rqb_entry->hbuf.phys);
741 drqe.address_lo = putPaddrLow(rqb_entry->dbuf.phys);
742 drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
743 rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
744 if (rc < 0) {
745 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
746 "6409 Cannot post to HRQ %d: %x %x %x "
747 "DRQ %x %x\n",
748 rqb_entry->hrq->queue_id,
749 rqb_entry->hrq->host_index,
750 rqb_entry->hrq->hba_index,
751 rqb_entry->hrq->entry_count,
752 rqb_entry->drq->host_index,
753 rqb_entry->drq->hba_index);
754 (rqbp->rqb_free_buffer)(phba, rqb_entry);
755 } else {
756 list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
757 rqbp->buffer_count++;
758 }
759
760 spin_unlock_irqrestore(&phba->hbalock, flags);
761 }
762