xref: /linux/drivers/infiniband/hw/bnxt_re/qplib_res.c (revision c964ced7726294d40913f2127c3f185a92cb4a41)
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: QPLib resource manager
37  */
38 
39 #define dev_fmt(fmt) "QPLIB: " fmt
40 
41 #include <linux/spinlock.h>
42 #include <linux/pci.h>
43 #include <linux/interrupt.h>
44 #include <linux/inetdevice.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/if_vlan.h>
47 #include <linux/vmalloc.h>
48 #include <rdma/ib_verbs.h>
49 #include <rdma/ib_umem.h>
50 
51 #include "roce_hsi.h"
52 #include "qplib_res.h"
53 #include "qplib_sp.h"
54 #include "qplib_rcfw.h"
55 
56 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
57 				      struct bnxt_qplib_stats *stats);
58 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
59 				      struct bnxt_qplib_chip_ctx *cctx,
60 				      struct bnxt_qplib_stats *stats);
61 
62 /* PBL */
__free_pbl(struct bnxt_qplib_res * res,struct bnxt_qplib_pbl * pbl,bool is_umem)63 static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
64 		       bool is_umem)
65 {
66 	struct pci_dev *pdev = res->pdev;
67 	int i;
68 
69 	if (!is_umem) {
70 		for (i = 0; i < pbl->pg_count; i++) {
71 			if (pbl->pg_arr[i])
72 				dma_free_coherent(&pdev->dev, pbl->pg_size,
73 						  (void *)((unsigned long)
74 						   pbl->pg_arr[i] &
75 						  PAGE_MASK),
76 						  pbl->pg_map_arr[i]);
77 			else
78 				dev_warn(&pdev->dev,
79 					 "PBL free pg_arr[%d] empty?!\n", i);
80 			pbl->pg_arr[i] = NULL;
81 		}
82 	}
83 	vfree(pbl->pg_arr);
84 	pbl->pg_arr = NULL;
85 	vfree(pbl->pg_map_arr);
86 	pbl->pg_map_arr = NULL;
87 	pbl->pg_count = 0;
88 	pbl->pg_size = 0;
89 }
90 
bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl * pbl,struct bnxt_qplib_sg_info * sginfo)91 static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
92 					   struct bnxt_qplib_sg_info *sginfo)
93 {
94 	struct ib_block_iter biter;
95 	int i = 0;
96 
97 	rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) {
98 		pbl->pg_map_arr[i] = rdma_block_iter_dma_address(&biter);
99 		pbl->pg_arr[i] = NULL;
100 		pbl->pg_count++;
101 		i++;
102 	}
103 }
104 
__alloc_pbl(struct bnxt_qplib_res * res,struct bnxt_qplib_pbl * pbl,struct bnxt_qplib_sg_info * sginfo)105 static int __alloc_pbl(struct bnxt_qplib_res *res,
106 		       struct bnxt_qplib_pbl *pbl,
107 		       struct bnxt_qplib_sg_info *sginfo)
108 {
109 	struct pci_dev *pdev = res->pdev;
110 	bool is_umem = false;
111 	u32 pages;
112 	int i;
113 
114 	if (sginfo->nopte)
115 		return 0;
116 	if (sginfo->umem)
117 		pages = ib_umem_num_dma_blocks(sginfo->umem, sginfo->pgsize);
118 	else
119 		pages = sginfo->npages;
120 	/* page ptr arrays */
121 	pbl->pg_arr = vmalloc_array(pages, sizeof(void *));
122 	if (!pbl->pg_arr)
123 		return -ENOMEM;
124 
125 	pbl->pg_map_arr = vmalloc_array(pages, sizeof(dma_addr_t));
126 	if (!pbl->pg_map_arr) {
127 		vfree(pbl->pg_arr);
128 		pbl->pg_arr = NULL;
129 		return -ENOMEM;
130 	}
131 	pbl->pg_count = 0;
132 	pbl->pg_size = sginfo->pgsize;
133 
134 	if (!sginfo->umem) {
135 		for (i = 0; i < pages; i++) {
136 			pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
137 							    pbl->pg_size,
138 							    &pbl->pg_map_arr[i],
139 							    GFP_KERNEL);
140 			if (!pbl->pg_arr[i])
141 				goto fail;
142 			pbl->pg_count++;
143 		}
144 	} else {
145 		is_umem = true;
146 		bnxt_qplib_fill_user_dma_pages(pbl, sginfo);
147 	}
148 
149 	return 0;
150 fail:
151 	__free_pbl(res, pbl, is_umem);
152 	return -ENOMEM;
153 }
154 
155 /* HWQ */
bnxt_qplib_free_hwq(struct bnxt_qplib_res * res,struct bnxt_qplib_hwq * hwq)156 void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
157 			 struct bnxt_qplib_hwq *hwq)
158 {
159 	int i;
160 
161 	if (!hwq->max_elements)
162 		return;
163 	if (hwq->level >= PBL_LVL_MAX)
164 		return;
165 
166 	for (i = 0; i < hwq->level + 1; i++) {
167 		if (i == hwq->level)
168 			__free_pbl(res, &hwq->pbl[i], hwq->is_user);
169 		else
170 			__free_pbl(res, &hwq->pbl[i], false);
171 	}
172 
173 	hwq->level = PBL_LVL_MAX;
174 	hwq->max_elements = 0;
175 	hwq->element_size = 0;
176 	hwq->prod = 0;
177 	hwq->cons = 0;
178 	hwq->cp_bit = 0;
179 }
180 
181 /* All HWQs are power of 2 in size */
182 
bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq * hwq,struct bnxt_qplib_hwq_attr * hwq_attr)183 int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
184 			      struct bnxt_qplib_hwq_attr *hwq_attr)
185 {
186 	u32 npages, aux_slots, pg_size, aux_pages = 0, aux_size = 0;
187 	struct bnxt_qplib_sg_info sginfo = {};
188 	u32 depth, stride, npbl, npde;
189 	dma_addr_t *src_phys_ptr, **dst_virt_ptr;
190 	struct bnxt_qplib_res *res;
191 	struct pci_dev *pdev;
192 	int i, rc, lvl;
193 
194 	res = hwq_attr->res;
195 	pdev = res->pdev;
196 	pg_size = hwq_attr->sginfo->pgsize;
197 	hwq->level = PBL_LVL_MAX;
198 
199 	depth = roundup_pow_of_two(hwq_attr->depth);
200 	stride = roundup_pow_of_two(hwq_attr->stride);
201 	if (hwq_attr->aux_depth) {
202 		aux_slots = hwq_attr->aux_depth;
203 		aux_size = roundup_pow_of_two(hwq_attr->aux_stride);
204 		aux_pages = (aux_slots * aux_size) / pg_size;
205 		if ((aux_slots * aux_size) % pg_size)
206 			aux_pages++;
207 	}
208 
209 	if (!hwq_attr->sginfo->umem) {
210 		hwq->is_user = false;
211 		npages = (depth * stride) / pg_size + aux_pages;
212 		if ((depth * stride) % pg_size)
213 			npages++;
214 		if (!npages)
215 			return -EINVAL;
216 		hwq_attr->sginfo->npages = npages;
217 	} else {
218 		npages = ib_umem_num_dma_blocks(hwq_attr->sginfo->umem,
219 						hwq_attr->sginfo->pgsize);
220 		hwq->is_user = true;
221 	}
222 
223 	if (npages == MAX_PBL_LVL_0_PGS && !hwq_attr->sginfo->nopte) {
224 		/* This request is Level 0, map PTE */
225 		rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], hwq_attr->sginfo);
226 		if (rc)
227 			goto fail;
228 		hwq->level = PBL_LVL_0;
229 		goto done;
230 	}
231 
232 	if (npages >= MAX_PBL_LVL_0_PGS) {
233 		if (npages > MAX_PBL_LVL_1_PGS) {
234 			u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
235 				    0 : PTU_PTE_VALID;
236 			/* 2 levels of indirection */
237 			npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
238 			if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
239 				npbl++;
240 			npde = npbl >> MAX_PDL_LVL_SHIFT;
241 			if (npbl % BIT(MAX_PDL_LVL_SHIFT))
242 				npde++;
243 			/* Alloc PDE pages */
244 			sginfo.pgsize = npde * pg_size;
245 			sginfo.npages = 1;
246 			rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
247 			if (rc)
248 				goto fail;
249 
250 			/* Alloc PBL pages */
251 			sginfo.npages = npbl;
252 			sginfo.pgsize = PAGE_SIZE;
253 			rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], &sginfo);
254 			if (rc)
255 				goto fail;
256 			/* Fill PDL with PBL page pointers */
257 			dst_virt_ptr =
258 				(dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
259 			src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
260 			for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
261 				dst_virt_ptr[0][i] = src_phys_ptr[i] | flag;
262 
263 			/* Alloc or init PTEs */
264 			rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
265 					 hwq_attr->sginfo);
266 			if (rc)
267 				goto fail;
268 			hwq->level = PBL_LVL_2;
269 			if (hwq_attr->sginfo->nopte)
270 				goto done;
271 			/* Fill PBLs with PTE pointers */
272 			dst_virt_ptr =
273 				(dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
274 			src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
275 			for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) {
276 				dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
277 					src_phys_ptr[i] | PTU_PTE_VALID;
278 			}
279 			if (hwq_attr->type == HWQ_TYPE_QUEUE) {
280 				/* Find the last pg of the size */
281 				i = hwq->pbl[PBL_LVL_2].pg_count;
282 				dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
283 								  PTU_PTE_LAST;
284 				if (i > 1)
285 					dst_virt_ptr[PTR_PG(i - 2)]
286 						    [PTR_IDX(i - 2)] |=
287 						    PTU_PTE_NEXT_TO_LAST;
288 			}
289 		} else { /* pages < 512 npbl = 1, npde = 0 */
290 			u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
291 				    0 : PTU_PTE_VALID;
292 
293 			/* 1 level of indirection */
294 			npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
295 			if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
296 				npbl++;
297 			sginfo.npages = npbl;
298 			sginfo.pgsize = PAGE_SIZE;
299 			/* Alloc PBL page */
300 			rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
301 			if (rc)
302 				goto fail;
303 			/* Alloc or init  PTEs */
304 			rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1],
305 					 hwq_attr->sginfo);
306 			if (rc)
307 				goto fail;
308 			hwq->level = PBL_LVL_1;
309 			if (hwq_attr->sginfo->nopte)
310 				goto done;
311 			/* Fill PBL with PTE pointers */
312 			dst_virt_ptr =
313 				(dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
314 			src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
315 			for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
316 				dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
317 					src_phys_ptr[i] | flag;
318 			if (hwq_attr->type == HWQ_TYPE_QUEUE) {
319 				/* Find the last pg of the size */
320 				i = hwq->pbl[PBL_LVL_1].pg_count;
321 				dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
322 								  PTU_PTE_LAST;
323 				if (i > 1)
324 					dst_virt_ptr[PTR_PG(i - 2)]
325 						    [PTR_IDX(i - 2)] |=
326 						    PTU_PTE_NEXT_TO_LAST;
327 			}
328 		}
329 	}
330 done:
331 	hwq->prod = 0;
332 	hwq->cons = 0;
333 	hwq->pdev = pdev;
334 	hwq->depth = hwq_attr->depth;
335 	hwq->max_elements = hwq->depth;
336 	hwq->element_size = stride;
337 	hwq->qe_ppg = pg_size / stride;
338 	/* For direct access to the elements */
339 	lvl = hwq->level;
340 	if (hwq_attr->sginfo->nopte && hwq->level)
341 		lvl = hwq->level - 1;
342 	hwq->pbl_ptr = hwq->pbl[lvl].pg_arr;
343 	hwq->pbl_dma_ptr = hwq->pbl[lvl].pg_map_arr;
344 	spin_lock_init(&hwq->lock);
345 
346 	return 0;
347 fail:
348 	bnxt_qplib_free_hwq(res, hwq);
349 	return -ENOMEM;
350 }
351 
352 /* Context Tables */
bnxt_qplib_free_ctx(struct bnxt_qplib_res * res,struct bnxt_qplib_ctx * ctx)353 void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
354 			 struct bnxt_qplib_ctx *ctx)
355 {
356 	int i;
357 
358 	bnxt_qplib_free_hwq(res, &ctx->qpc_tbl);
359 	bnxt_qplib_free_hwq(res, &ctx->mrw_tbl);
360 	bnxt_qplib_free_hwq(res, &ctx->srqc_tbl);
361 	bnxt_qplib_free_hwq(res, &ctx->cq_tbl);
362 	bnxt_qplib_free_hwq(res, &ctx->tim_tbl);
363 	for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
364 		bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.qtbl[i]);
365 	/* restore original pde level before destroy */
366 	ctx->tqm_ctx.pde.level = ctx->tqm_ctx.pde_level;
367 	bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.pde);
368 	bnxt_qplib_free_stats_ctx(res->pdev, &ctx->stats);
369 }
370 
bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res * res,struct bnxt_qplib_ctx * ctx)371 static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res,
372 				      struct bnxt_qplib_ctx *ctx)
373 {
374 	struct bnxt_qplib_hwq_attr hwq_attr = {};
375 	struct bnxt_qplib_sg_info sginfo = {};
376 	struct bnxt_qplib_tqm_ctx *tqmctx;
377 	int rc;
378 	int i;
379 
380 	tqmctx = &ctx->tqm_ctx;
381 
382 	sginfo.pgsize = PAGE_SIZE;
383 	sginfo.pgshft = PAGE_SHIFT;
384 	hwq_attr.sginfo = &sginfo;
385 	hwq_attr.res = res;
386 	hwq_attr.type = HWQ_TYPE_CTX;
387 	hwq_attr.depth = 512;
388 	hwq_attr.stride = sizeof(u64);
389 	/* Alloc pdl buffer */
390 	rc = bnxt_qplib_alloc_init_hwq(&tqmctx->pde, &hwq_attr);
391 	if (rc)
392 		goto out;
393 	/* Save original pdl level */
394 	tqmctx->pde_level = tqmctx->pde.level;
395 
396 	hwq_attr.stride = 1;
397 	for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
398 		if (!tqmctx->qcount[i])
399 			continue;
400 		hwq_attr.depth = ctx->qpc_count * tqmctx->qcount[i];
401 		rc = bnxt_qplib_alloc_init_hwq(&tqmctx->qtbl[i], &hwq_attr);
402 		if (rc)
403 			goto out;
404 	}
405 out:
406 	return rc;
407 }
408 
bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx * ctx)409 static void bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx *ctx)
410 {
411 	struct bnxt_qplib_hwq *tbl;
412 	dma_addr_t *dma_ptr;
413 	__le64 **pbl_ptr, *ptr;
414 	int i, j, k;
415 	int fnz_idx = -1;
416 	int pg_count;
417 
418 	pbl_ptr = (__le64 **)ctx->pde.pbl_ptr;
419 
420 	for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
421 	     i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
422 		tbl = &ctx->qtbl[i];
423 		if (!tbl->max_elements)
424 			continue;
425 		if (fnz_idx == -1)
426 			fnz_idx = i; /* first non-zero index */
427 		switch (tbl->level) {
428 		case PBL_LVL_2:
429 			pg_count = tbl->pbl[PBL_LVL_1].pg_count;
430 			for (k = 0; k < pg_count; k++) {
431 				ptr = &pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)];
432 				dma_ptr = &tbl->pbl[PBL_LVL_1].pg_map_arr[k];
433 				*ptr = cpu_to_le64(*dma_ptr | PTU_PTE_VALID);
434 			}
435 			break;
436 		case PBL_LVL_1:
437 		case PBL_LVL_0:
438 		default:
439 			ptr = &pbl_ptr[PTR_PG(j)][PTR_IDX(j)];
440 			*ptr = cpu_to_le64(tbl->pbl[PBL_LVL_0].pg_map_arr[0] |
441 					   PTU_PTE_VALID);
442 			break;
443 		}
444 	}
445 	if (fnz_idx == -1)
446 		fnz_idx = 0;
447 	/* update pde level as per page table programming */
448 	ctx->pde.level = (ctx->qtbl[fnz_idx].level == PBL_LVL_2) ? PBL_LVL_2 :
449 			  ctx->qtbl[fnz_idx].level + 1;
450 }
451 
bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res * res,struct bnxt_qplib_ctx * ctx)452 static int bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res *res,
453 				      struct bnxt_qplib_ctx *ctx)
454 {
455 	int rc;
456 
457 	rc = bnxt_qplib_alloc_tqm_rings(res, ctx);
458 	if (rc)
459 		goto fail;
460 
461 	bnxt_qplib_map_tqm_pgtbl(&ctx->tqm_ctx);
462 fail:
463 	return rc;
464 }
465 
466 /*
467  * Routine: bnxt_qplib_alloc_ctx
468  * Description:
469  *     Context tables are memories which are used by the chip fw.
470  *     The 6 tables defined are:
471  *             QPC ctx - holds QP states
472  *             MRW ctx - holds memory region and window
473  *             SRQ ctx - holds shared RQ states
474  *             CQ ctx - holds completion queue states
475  *             TQM ctx - holds Tx Queue Manager context
476  *             TIM ctx - holds timer context
477  *     Depending on the size of the tbl requested, either a 1 Page Buffer List
478  *     or a 1-to-2-stage indirection Page Directory List + 1 PBL is used
479  *     instead.
480  *     Table might be employed as follows:
481  *             For 0      < ctx size <= 1 PAGE, 0 level of ind is used
482  *             For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used
483  *             For 512    < ctx size <= MAX, 2 levels of ind is used
484  * Returns:
485  *     0 if success, else -ERRORS
486  */
bnxt_qplib_alloc_ctx(struct bnxt_qplib_res * res,struct bnxt_qplib_ctx * ctx,bool virt_fn,bool is_p5)487 int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
488 			 struct bnxt_qplib_ctx *ctx,
489 			 bool virt_fn, bool is_p5)
490 {
491 	struct bnxt_qplib_hwq_attr hwq_attr = {};
492 	struct bnxt_qplib_sg_info sginfo = {};
493 	int rc;
494 
495 	if (virt_fn || is_p5)
496 		goto stats_alloc;
497 
498 	/* QPC Tables */
499 	sginfo.pgsize = PAGE_SIZE;
500 	sginfo.pgshft = PAGE_SHIFT;
501 	hwq_attr.sginfo = &sginfo;
502 
503 	hwq_attr.res = res;
504 	hwq_attr.depth = ctx->qpc_count;
505 	hwq_attr.stride = BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE;
506 	hwq_attr.type = HWQ_TYPE_CTX;
507 	rc = bnxt_qplib_alloc_init_hwq(&ctx->qpc_tbl, &hwq_attr);
508 	if (rc)
509 		goto fail;
510 
511 	/* MRW Tables */
512 	hwq_attr.depth = ctx->mrw_count;
513 	hwq_attr.stride = BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE;
514 	rc = bnxt_qplib_alloc_init_hwq(&ctx->mrw_tbl, &hwq_attr);
515 	if (rc)
516 		goto fail;
517 
518 	/* SRQ Tables */
519 	hwq_attr.depth = ctx->srqc_count;
520 	hwq_attr.stride = BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE;
521 	rc = bnxt_qplib_alloc_init_hwq(&ctx->srqc_tbl, &hwq_attr);
522 	if (rc)
523 		goto fail;
524 
525 	/* CQ Tables */
526 	hwq_attr.depth = ctx->cq_count;
527 	hwq_attr.stride = BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE;
528 	rc = bnxt_qplib_alloc_init_hwq(&ctx->cq_tbl, &hwq_attr);
529 	if (rc)
530 		goto fail;
531 
532 	/* TQM Buffer */
533 	rc = bnxt_qplib_setup_tqm_rings(res, ctx);
534 	if (rc)
535 		goto fail;
536 	/* TIM Buffer */
537 	ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
538 	hwq_attr.depth = ctx->qpc_count * 16;
539 	hwq_attr.stride = 1;
540 	rc = bnxt_qplib_alloc_init_hwq(&ctx->tim_tbl, &hwq_attr);
541 	if (rc)
542 		goto fail;
543 stats_alloc:
544 	/* Stats */
545 	rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &ctx->stats);
546 	if (rc)
547 		goto fail;
548 
549 	return 0;
550 
551 fail:
552 	bnxt_qplib_free_ctx(res, ctx);
553 	return rc;
554 }
555 
bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_sgid_tbl * sgid_tbl)556 static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
557 				     struct bnxt_qplib_sgid_tbl *sgid_tbl)
558 {
559 	kfree(sgid_tbl->tbl);
560 	kfree(sgid_tbl->hw_id);
561 	kfree(sgid_tbl->ctx);
562 	kfree(sgid_tbl->vlan);
563 	sgid_tbl->tbl = NULL;
564 	sgid_tbl->hw_id = NULL;
565 	sgid_tbl->ctx = NULL;
566 	sgid_tbl->vlan = NULL;
567 	sgid_tbl->max = 0;
568 	sgid_tbl->active = 0;
569 }
570 
bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_sgid_tbl * sgid_tbl,u16 max)571 static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
572 				     struct bnxt_qplib_sgid_tbl *sgid_tbl,
573 				     u16 max)
574 {
575 	sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
576 	if (!sgid_tbl->tbl)
577 		return -ENOMEM;
578 
579 	sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL);
580 	if (!sgid_tbl->hw_id)
581 		goto out_free1;
582 
583 	sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL);
584 	if (!sgid_tbl->ctx)
585 		goto out_free2;
586 
587 	sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL);
588 	if (!sgid_tbl->vlan)
589 		goto out_free3;
590 
591 	sgid_tbl->max = max;
592 	return 0;
593 out_free3:
594 	kfree(sgid_tbl->ctx);
595 	sgid_tbl->ctx = NULL;
596 out_free2:
597 	kfree(sgid_tbl->hw_id);
598 	sgid_tbl->hw_id = NULL;
599 out_free1:
600 	kfree(sgid_tbl->tbl);
601 	sgid_tbl->tbl = NULL;
602 	return -ENOMEM;
603 };
604 
bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_sgid_tbl * sgid_tbl)605 static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
606 					struct bnxt_qplib_sgid_tbl *sgid_tbl)
607 {
608 	int i;
609 
610 	for (i = 0; i < sgid_tbl->max; i++) {
611 		if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
612 			   sizeof(bnxt_qplib_gid_zero)))
613 			bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
614 					    sgid_tbl->tbl[i].vlan_id, true);
615 	}
616 	memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
617 	memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
618 	memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
619 	sgid_tbl->active = 0;
620 }
621 
bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl * sgid_tbl,struct net_device * netdev)622 static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
623 				     struct net_device *netdev)
624 {
625 	u32 i;
626 
627 	for (i = 0; i < sgid_tbl->max; i++)
628 		sgid_tbl->tbl[i].vlan_id = 0xffff;
629 
630 	memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
631 }
632 
633 /* PDs */
bnxt_qplib_alloc_pd(struct bnxt_qplib_res * res,struct bnxt_qplib_pd * pd)634 int bnxt_qplib_alloc_pd(struct bnxt_qplib_res  *res, struct bnxt_qplib_pd *pd)
635 {
636 	struct bnxt_qplib_pd_tbl *pdt = &res->pd_tbl;
637 	u32 bit_num;
638 	int rc = 0;
639 
640 	mutex_lock(&res->pd_tbl_lock);
641 	bit_num = find_first_bit(pdt->tbl, pdt->max);
642 	if (bit_num == pdt->max) {
643 		rc = -ENOMEM;
644 		goto exit;
645 	}
646 
647 	/* Found unused PD */
648 	clear_bit(bit_num, pdt->tbl);
649 	pd->id = bit_num;
650 exit:
651 	mutex_unlock(&res->pd_tbl_lock);
652 	return rc;
653 }
654 
bnxt_qplib_dealloc_pd(struct bnxt_qplib_res * res,struct bnxt_qplib_pd_tbl * pdt,struct bnxt_qplib_pd * pd)655 int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
656 			  struct bnxt_qplib_pd_tbl *pdt,
657 			  struct bnxt_qplib_pd *pd)
658 {
659 	int rc = 0;
660 
661 	mutex_lock(&res->pd_tbl_lock);
662 	if (test_and_set_bit(pd->id, pdt->tbl)) {
663 		dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
664 			 pd->id);
665 		rc = -EINVAL;
666 		goto exit;
667 	}
668 	pd->id = 0;
669 exit:
670 	mutex_unlock(&res->pd_tbl_lock);
671 	return rc;
672 }
673 
bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl * pdt)674 static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
675 {
676 	kfree(pdt->tbl);
677 	pdt->tbl = NULL;
678 	pdt->max = 0;
679 }
680 
bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_pd_tbl * pdt,u32 max)681 static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
682 				   struct bnxt_qplib_pd_tbl *pdt,
683 				   u32 max)
684 {
685 	u32 bytes;
686 
687 	bytes = max >> 3;
688 	if (!bytes)
689 		bytes = 1;
690 	pdt->tbl = kmalloc(bytes, GFP_KERNEL);
691 	if (!pdt->tbl)
692 		return -ENOMEM;
693 
694 	pdt->max = max;
695 	memset((u8 *)pdt->tbl, 0xFF, bytes);
696 	mutex_init(&res->pd_tbl_lock);
697 
698 	return 0;
699 }
700 
701 /* DPIs */
bnxt_qplib_alloc_dpi(struct bnxt_qplib_res * res,struct bnxt_qplib_dpi * dpi,void * app,u8 type)702 int bnxt_qplib_alloc_dpi(struct bnxt_qplib_res *res,
703 			 struct bnxt_qplib_dpi *dpi,
704 			 void *app, u8 type)
705 {
706 	struct bnxt_qplib_dpi_tbl *dpit = &res->dpi_tbl;
707 	struct bnxt_qplib_reg_desc *reg;
708 	u32 bit_num;
709 	u64 umaddr;
710 
711 	reg = &dpit->wcreg;
712 	mutex_lock(&res->dpi_tbl_lock);
713 
714 	bit_num = find_first_bit(dpit->tbl, dpit->max);
715 	if (bit_num == dpit->max) {
716 		mutex_unlock(&res->dpi_tbl_lock);
717 		return -ENOMEM;
718 	}
719 
720 	/* Found unused DPI */
721 	clear_bit(bit_num, dpit->tbl);
722 	dpit->app_tbl[bit_num] = app;
723 
724 	dpi->bit = bit_num;
725 	dpi->dpi = bit_num + (reg->offset - dpit->ucreg.offset) / PAGE_SIZE;
726 
727 	umaddr = reg->bar_base + reg->offset + bit_num * PAGE_SIZE;
728 	dpi->umdbr = umaddr;
729 
730 	switch (type) {
731 	case BNXT_QPLIB_DPI_TYPE_KERNEL:
732 		/* privileged dbr was already mapped just initialize it. */
733 		dpi->umdbr = dpit->ucreg.bar_base +
734 			     dpit->ucreg.offset + bit_num * PAGE_SIZE;
735 		dpi->dbr = dpit->priv_db;
736 		dpi->dpi = dpi->bit;
737 		break;
738 	case BNXT_QPLIB_DPI_TYPE_WC:
739 		dpi->dbr = ioremap_wc(umaddr, PAGE_SIZE);
740 		break;
741 	default:
742 		dpi->dbr = ioremap(umaddr, PAGE_SIZE);
743 		break;
744 	}
745 
746 	dpi->type = type;
747 	mutex_unlock(&res->dpi_tbl_lock);
748 	return 0;
749 
750 }
751 
bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res * res,struct bnxt_qplib_dpi * dpi)752 int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
753 			   struct bnxt_qplib_dpi *dpi)
754 {
755 	struct bnxt_qplib_dpi_tbl *dpit = &res->dpi_tbl;
756 
757 	mutex_lock(&res->dpi_tbl_lock);
758 	if (dpi->dpi && dpi->type != BNXT_QPLIB_DPI_TYPE_KERNEL)
759 		pci_iounmap(res->pdev, dpi->dbr);
760 
761 	if (test_and_set_bit(dpi->bit, dpit->tbl)) {
762 		dev_warn(&res->pdev->dev,
763 			 "Freeing an unused DPI? dpi = %d, bit = %d\n",
764 				dpi->dpi, dpi->bit);
765 		mutex_unlock(&res->dpi_tbl_lock);
766 		return -EINVAL;
767 	}
768 	if (dpit->app_tbl)
769 		dpit->app_tbl[dpi->bit] = NULL;
770 	memset(dpi, 0, sizeof(*dpi));
771 	mutex_unlock(&res->dpi_tbl_lock);
772 	return 0;
773 }
774 
bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_dpi_tbl * dpit)775 static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res     *res,
776 				    struct bnxt_qplib_dpi_tbl *dpit)
777 {
778 	kfree(dpit->tbl);
779 	kfree(dpit->app_tbl);
780 	dpit->tbl = NULL;
781 	dpit->app_tbl = NULL;
782 	dpit->max = 0;
783 }
784 
bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_dev_attr * dev_attr)785 static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
786 				    struct bnxt_qplib_dev_attr *dev_attr)
787 {
788 	struct bnxt_qplib_dpi_tbl *dpit;
789 	struct bnxt_qplib_reg_desc *reg;
790 	unsigned long bar_len;
791 	u32 dbr_offset;
792 	u32 bytes;
793 
794 	dpit = &res->dpi_tbl;
795 	reg = &dpit->wcreg;
796 
797 	if (!bnxt_qplib_is_chip_gen_p5_p7(res->cctx)) {
798 		/* Offest should come from L2 driver */
799 		dbr_offset = dev_attr->l2_db_size;
800 		dpit->ucreg.offset = dbr_offset;
801 		dpit->wcreg.offset = dbr_offset;
802 	}
803 
804 	bar_len = pci_resource_len(res->pdev, reg->bar_id);
805 	dpit->max = (bar_len - reg->offset) / PAGE_SIZE;
806 	if (dev_attr->max_dpi)
807 		dpit->max = min_t(u32, dpit->max, dev_attr->max_dpi);
808 
809 	dpit->app_tbl = kcalloc(dpit->max,  sizeof(void *), GFP_KERNEL);
810 	if (!dpit->app_tbl)
811 		return -ENOMEM;
812 
813 	bytes = dpit->max >> 3;
814 	if (!bytes)
815 		bytes = 1;
816 
817 	dpit->tbl = kmalloc(bytes, GFP_KERNEL);
818 	if (!dpit->tbl) {
819 		kfree(dpit->app_tbl);
820 		dpit->app_tbl = NULL;
821 		return -ENOMEM;
822 	}
823 
824 	memset((u8 *)dpit->tbl, 0xFF, bytes);
825 	mutex_init(&res->dpi_tbl_lock);
826 	dpit->priv_db = dpit->ucreg.bar_reg + dpit->ucreg.offset;
827 
828 	return 0;
829 
830 }
831 
832 /* Stats */
bnxt_qplib_free_stats_ctx(struct pci_dev * pdev,struct bnxt_qplib_stats * stats)833 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
834 				      struct bnxt_qplib_stats *stats)
835 {
836 	if (stats->dma) {
837 		dma_free_coherent(&pdev->dev, stats->size,
838 				  stats->dma, stats->dma_map);
839 	}
840 	memset(stats, 0, sizeof(*stats));
841 	stats->fw_id = -1;
842 }
843 
bnxt_qplib_alloc_stats_ctx(struct pci_dev * pdev,struct bnxt_qplib_chip_ctx * cctx,struct bnxt_qplib_stats * stats)844 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
845 				      struct bnxt_qplib_chip_ctx *cctx,
846 				      struct bnxt_qplib_stats *stats)
847 {
848 	memset(stats, 0, sizeof(*stats));
849 	stats->fw_id = -1;
850 	stats->size = cctx->hw_stats_size;
851 	stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
852 					&stats->dma_map, GFP_KERNEL);
853 	if (!stats->dma) {
854 		dev_err(&pdev->dev, "Stats DMA allocation failed\n");
855 		return -ENOMEM;
856 	}
857 	return 0;
858 }
859 
bnxt_qplib_cleanup_res(struct bnxt_qplib_res * res)860 void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
861 {
862 	bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
863 }
864 
bnxt_qplib_init_res(struct bnxt_qplib_res * res)865 int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
866 {
867 	bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
868 
869 	return 0;
870 }
871 
bnxt_qplib_free_res(struct bnxt_qplib_res * res)872 void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
873 {
874 	bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
875 	bnxt_qplib_free_pd_tbl(&res->pd_tbl);
876 	bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
877 }
878 
bnxt_qplib_alloc_res(struct bnxt_qplib_res * res,struct pci_dev * pdev,struct net_device * netdev,struct bnxt_qplib_dev_attr * dev_attr)879 int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
880 			 struct net_device *netdev,
881 			 struct bnxt_qplib_dev_attr *dev_attr)
882 {
883 	int rc;
884 
885 	res->pdev = pdev;
886 	res->netdev = netdev;
887 
888 	rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
889 	if (rc)
890 		goto fail;
891 
892 	rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
893 	if (rc)
894 		goto fail;
895 
896 	rc = bnxt_qplib_alloc_dpi_tbl(res, dev_attr);
897 	if (rc)
898 		goto fail;
899 
900 	return 0;
901 fail:
902 	bnxt_qplib_free_res(res);
903 	return rc;
904 }
905 
bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res * res)906 void bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res *res)
907 {
908 	struct bnxt_qplib_reg_desc *reg;
909 
910 	reg = &res->dpi_tbl.ucreg;
911 	if (reg->bar_reg)
912 		pci_iounmap(res->pdev, reg->bar_reg);
913 	reg->bar_reg = NULL;
914 	reg->bar_base = 0;
915 	reg->len = 0;
916 	reg->bar_id = 0;
917 }
918 
bnxt_qplib_map_db_bar(struct bnxt_qplib_res * res)919 int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res)
920 {
921 	struct bnxt_qplib_reg_desc *ucreg;
922 	struct bnxt_qplib_reg_desc *wcreg;
923 
924 	wcreg = &res->dpi_tbl.wcreg;
925 	wcreg->bar_id = RCFW_DBR_PCI_BAR_REGION;
926 	wcreg->bar_base = pci_resource_start(res->pdev, wcreg->bar_id);
927 
928 	ucreg = &res->dpi_tbl.ucreg;
929 	ucreg->bar_id = RCFW_DBR_PCI_BAR_REGION;
930 	ucreg->bar_base = pci_resource_start(res->pdev, ucreg->bar_id);
931 	ucreg->len = ucreg->offset + PAGE_SIZE;
932 	if (!ucreg->len || ((ucreg->len & (PAGE_SIZE - 1)) != 0)) {
933 		dev_err(&res->pdev->dev, "QPLIB: invalid dbr length %d",
934 			(int)ucreg->len);
935 		return -EINVAL;
936 	}
937 	ucreg->bar_reg = ioremap(ucreg->bar_base, ucreg->len);
938 	if (!ucreg->bar_reg) {
939 		dev_err(&res->pdev->dev, "privileged dpi map failed!");
940 		return -ENOMEM;
941 	}
942 
943 	return 0;
944 }
945 
bnxt_qplib_determine_atomics(struct pci_dev * dev)946 int bnxt_qplib_determine_atomics(struct pci_dev *dev)
947 {
948 	int comp;
949 	u16 ctl2;
950 
951 	comp = pci_enable_atomic_ops_to_root(dev,
952 					     PCI_EXP_DEVCAP2_ATOMIC_COMP32);
953 	if (comp)
954 		return -EOPNOTSUPP;
955 	comp = pci_enable_atomic_ops_to_root(dev,
956 					     PCI_EXP_DEVCAP2_ATOMIC_COMP64);
957 	if (comp)
958 		return -EOPNOTSUPP;
959 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctl2);
960 	return !(ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
961 }
962