1 /* 2 * Broadcom NetXtreme-E RoCE driver. 3 * 4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term 5 * Broadcom refers to Broadcom Limited and/or its subsidiaries. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in 21 * the documentation and/or other materials provided with the 22 * distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Description: QPLib resource manager 37 */ 38 39 #define dev_fmt(fmt) "QPLIB: " fmt 40 41 #include <linux/spinlock.h> 42 #include <linux/pci.h> 43 #include <linux/interrupt.h> 44 #include <linux/inetdevice.h> 45 #include <linux/dma-mapping.h> 46 #include <linux/if_vlan.h> 47 #include <linux/vmalloc.h> 48 #include "roce_hsi.h" 49 #include "qplib_res.h" 50 #include "qplib_sp.h" 51 #include "qplib_rcfw.h" 52 53 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev, 54 struct bnxt_qplib_stats *stats); 55 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev, 56 struct bnxt_qplib_stats *stats); 57 58 /* PBL */ 59 static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl, 60 bool is_umem) 61 { 62 struct pci_dev *pdev = res->pdev; 63 int i; 64 65 if (!is_umem) { 66 for (i = 0; i < pbl->pg_count; i++) { 67 if (pbl->pg_arr[i]) 68 dma_free_coherent(&pdev->dev, pbl->pg_size, 69 (void *)((unsigned long) 70 pbl->pg_arr[i] & 71 PAGE_MASK), 72 pbl->pg_map_arr[i]); 73 else 74 dev_warn(&pdev->dev, 75 "PBL free pg_arr[%d] empty?!\n", i); 76 pbl->pg_arr[i] = NULL; 77 } 78 } 79 vfree(pbl->pg_arr); 80 pbl->pg_arr = NULL; 81 vfree(pbl->pg_map_arr); 82 pbl->pg_map_arr = NULL; 83 pbl->pg_count = 0; 84 pbl->pg_size = 0; 85 } 86 87 static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl, 88 struct bnxt_qplib_sg_info *sginfo) 89 { 90 struct scatterlist *sghead = sginfo->sghead; 91 struct sg_dma_page_iter sg_iter; 92 int i = 0; 93 94 for_each_sg_dma_page(sghead, &sg_iter, sginfo->nmap, 0) { 95 pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter); 96 pbl->pg_arr[i] = NULL; 97 pbl->pg_count++; 98 i++; 99 } 100 } 101 102 static int __alloc_pbl(struct bnxt_qplib_res *res, 103 struct bnxt_qplib_pbl *pbl, 104 struct bnxt_qplib_sg_info *sginfo) 105 { 106 struct pci_dev *pdev = res->pdev; 107 struct scatterlist *sghead; 108 bool is_umem = false; 109 u32 pages; 110 int i; 111 112 if (sginfo->nopte) 113 return 0; 114 pages = sginfo->npages; 115 sghead = sginfo->sghead; 116 /* page ptr arrays */ 117 pbl->pg_arr = vmalloc(pages * sizeof(void *)); 118 if (!pbl->pg_arr) 119 return -ENOMEM; 120 121 pbl->pg_map_arr = vmalloc(pages * sizeof(dma_addr_t)); 122 if (!pbl->pg_map_arr) { 123 vfree(pbl->pg_arr); 124 pbl->pg_arr = NULL; 125 return -ENOMEM; 126 } 127 pbl->pg_count = 0; 128 pbl->pg_size = sginfo->pgsize; 129 130 if (!sghead) { 131 for (i = 0; i < pages; i++) { 132 pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev, 133 pbl->pg_size, 134 &pbl->pg_map_arr[i], 135 GFP_KERNEL); 136 if (!pbl->pg_arr[i]) 137 goto fail; 138 pbl->pg_count++; 139 } 140 } else { 141 is_umem = true; 142 bnxt_qplib_fill_user_dma_pages(pbl, sginfo); 143 } 144 145 return 0; 146 fail: 147 __free_pbl(res, pbl, is_umem); 148 return -ENOMEM; 149 } 150 151 /* HWQ */ 152 void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res, 153 struct bnxt_qplib_hwq *hwq) 154 { 155 int i; 156 157 if (!hwq->max_elements) 158 return; 159 if (hwq->level >= PBL_LVL_MAX) 160 return; 161 162 for (i = 0; i < hwq->level + 1; i++) { 163 if (i == hwq->level) 164 __free_pbl(res, &hwq->pbl[i], hwq->is_user); 165 else 166 __free_pbl(res, &hwq->pbl[i], false); 167 } 168 169 hwq->level = PBL_LVL_MAX; 170 hwq->max_elements = 0; 171 hwq->element_size = 0; 172 hwq->prod = 0; 173 hwq->cons = 0; 174 hwq->cp_bit = 0; 175 } 176 177 /* All HWQs are power of 2 in size */ 178 179 int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq, 180 struct bnxt_qplib_hwq_attr *hwq_attr) 181 { 182 u32 npages, aux_slots, pg_size, aux_pages = 0, aux_size = 0; 183 struct bnxt_qplib_sg_info sginfo = {}; 184 u32 depth, stride, npbl, npde; 185 dma_addr_t *src_phys_ptr, **dst_virt_ptr; 186 struct scatterlist *sghead = NULL; 187 struct bnxt_qplib_res *res; 188 struct pci_dev *pdev; 189 int i, rc, lvl; 190 191 res = hwq_attr->res; 192 pdev = res->pdev; 193 sghead = hwq_attr->sginfo->sghead; 194 pg_size = hwq_attr->sginfo->pgsize; 195 hwq->level = PBL_LVL_MAX; 196 197 depth = roundup_pow_of_two(hwq_attr->depth); 198 stride = roundup_pow_of_two(hwq_attr->stride); 199 if (hwq_attr->aux_depth) { 200 aux_slots = hwq_attr->aux_depth; 201 aux_size = roundup_pow_of_two(hwq_attr->aux_stride); 202 aux_pages = (aux_slots * aux_size) / pg_size; 203 if ((aux_slots * aux_size) % pg_size) 204 aux_pages++; 205 } 206 207 if (!sghead) { 208 hwq->is_user = false; 209 npages = (depth * stride) / pg_size + aux_pages; 210 if ((depth * stride) % pg_size) 211 npages++; 212 if (!npages) 213 return -EINVAL; 214 hwq_attr->sginfo->npages = npages; 215 } else { 216 hwq->is_user = true; 217 npages = hwq_attr->sginfo->npages; 218 npages = (npages * PAGE_SIZE) / 219 BIT_ULL(hwq_attr->sginfo->pgshft); 220 if ((hwq_attr->sginfo->npages * PAGE_SIZE) % 221 BIT_ULL(hwq_attr->sginfo->pgshft)) 222 if (!npages) 223 npages++; 224 } 225 226 if (npages == MAX_PBL_LVL_0_PGS) { 227 /* This request is Level 0, map PTE */ 228 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], hwq_attr->sginfo); 229 if (rc) 230 goto fail; 231 hwq->level = PBL_LVL_0; 232 } 233 234 if (npages > MAX_PBL_LVL_0_PGS) { 235 if (npages > MAX_PBL_LVL_1_PGS) { 236 u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ? 237 0 : PTU_PTE_VALID; 238 /* 2 levels of indirection */ 239 npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT; 240 if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT)) 241 npbl++; 242 npde = npbl >> MAX_PDL_LVL_SHIFT; 243 if (npbl % BIT(MAX_PDL_LVL_SHIFT)) 244 npde++; 245 /* Alloc PDE pages */ 246 sginfo.pgsize = npde * pg_size; 247 sginfo.npages = 1; 248 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo); 249 250 /* Alloc PBL pages */ 251 sginfo.npages = npbl; 252 sginfo.pgsize = PAGE_SIZE; 253 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], &sginfo); 254 if (rc) 255 goto fail; 256 /* Fill PDL with PBL page pointers */ 257 dst_virt_ptr = 258 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr; 259 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr; 260 if (hwq_attr->type == HWQ_TYPE_MR) { 261 /* For MR it is expected that we supply only 1 contigous 262 * page i.e only 1 entry in the PDL that will contain 263 * all the PBLs for the user supplied memory region 264 */ 265 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; 266 i++) 267 dst_virt_ptr[0][i] = src_phys_ptr[i] | 268 flag; 269 } else { 270 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; 271 i++) 272 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] = 273 src_phys_ptr[i] | 274 PTU_PDE_VALID; 275 } 276 /* Alloc or init PTEs */ 277 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2], 278 hwq_attr->sginfo); 279 if (rc) 280 goto fail; 281 hwq->level = PBL_LVL_2; 282 if (hwq_attr->sginfo->nopte) 283 goto done; 284 /* Fill PBLs with PTE pointers */ 285 dst_virt_ptr = 286 (dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr; 287 src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr; 288 for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) { 289 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] = 290 src_phys_ptr[i] | PTU_PTE_VALID; 291 } 292 if (hwq_attr->type == HWQ_TYPE_QUEUE) { 293 /* Find the last pg of the size */ 294 i = hwq->pbl[PBL_LVL_2].pg_count; 295 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |= 296 PTU_PTE_LAST; 297 if (i > 1) 298 dst_virt_ptr[PTR_PG(i - 2)] 299 [PTR_IDX(i - 2)] |= 300 PTU_PTE_NEXT_TO_LAST; 301 } 302 } else { /* pages < 512 npbl = 1, npde = 0 */ 303 u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ? 304 0 : PTU_PTE_VALID; 305 306 /* 1 level of indirection */ 307 npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT; 308 if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT)) 309 npbl++; 310 sginfo.npages = npbl; 311 sginfo.pgsize = PAGE_SIZE; 312 /* Alloc PBL page */ 313 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo); 314 if (rc) 315 goto fail; 316 /* Alloc or init PTEs */ 317 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], 318 hwq_attr->sginfo); 319 if (rc) 320 goto fail; 321 hwq->level = PBL_LVL_1; 322 if (hwq_attr->sginfo->nopte) 323 goto done; 324 /* Fill PBL with PTE pointers */ 325 dst_virt_ptr = 326 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr; 327 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr; 328 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++) 329 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] = 330 src_phys_ptr[i] | flag; 331 if (hwq_attr->type == HWQ_TYPE_QUEUE) { 332 /* Find the last pg of the size */ 333 i = hwq->pbl[PBL_LVL_1].pg_count; 334 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |= 335 PTU_PTE_LAST; 336 if (i > 1) 337 dst_virt_ptr[PTR_PG(i - 2)] 338 [PTR_IDX(i - 2)] |= 339 PTU_PTE_NEXT_TO_LAST; 340 } 341 } 342 } 343 done: 344 hwq->prod = 0; 345 hwq->cons = 0; 346 hwq->pdev = pdev; 347 hwq->depth = hwq_attr->depth; 348 hwq->max_elements = depth; 349 hwq->element_size = stride; 350 /* For direct access to the elements */ 351 lvl = hwq->level; 352 if (hwq_attr->sginfo->nopte && hwq->level) 353 lvl = hwq->level - 1; 354 hwq->pbl_ptr = hwq->pbl[lvl].pg_arr; 355 hwq->pbl_dma_ptr = hwq->pbl[lvl].pg_map_arr; 356 spin_lock_init(&hwq->lock); 357 358 return 0; 359 fail: 360 bnxt_qplib_free_hwq(res, hwq); 361 return -ENOMEM; 362 } 363 364 /* Context Tables */ 365 void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res, 366 struct bnxt_qplib_ctx *ctx) 367 { 368 int i; 369 370 bnxt_qplib_free_hwq(res, &ctx->qpc_tbl); 371 bnxt_qplib_free_hwq(res, &ctx->mrw_tbl); 372 bnxt_qplib_free_hwq(res, &ctx->srqc_tbl); 373 bnxt_qplib_free_hwq(res, &ctx->cq_tbl); 374 bnxt_qplib_free_hwq(res, &ctx->tim_tbl); 375 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) 376 bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.qtbl[i]); 377 /* restore original pde level before destroy */ 378 ctx->tqm_ctx.pde.level = ctx->tqm_ctx.pde_level; 379 bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.pde); 380 bnxt_qplib_free_stats_ctx(res->pdev, &ctx->stats); 381 } 382 383 static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res, 384 struct bnxt_qplib_ctx *ctx) 385 { 386 struct bnxt_qplib_hwq_attr hwq_attr = {}; 387 struct bnxt_qplib_sg_info sginfo = {}; 388 struct bnxt_qplib_tqm_ctx *tqmctx; 389 int rc = 0; 390 int i; 391 392 tqmctx = &ctx->tqm_ctx; 393 394 sginfo.pgsize = PAGE_SIZE; 395 sginfo.pgshft = PAGE_SHIFT; 396 hwq_attr.sginfo = &sginfo; 397 hwq_attr.res = res; 398 hwq_attr.type = HWQ_TYPE_CTX; 399 hwq_attr.depth = 512; 400 hwq_attr.stride = sizeof(u64); 401 /* Alloc pdl buffer */ 402 rc = bnxt_qplib_alloc_init_hwq(&tqmctx->pde, &hwq_attr); 403 if (rc) 404 goto out; 405 /* Save original pdl level */ 406 tqmctx->pde_level = tqmctx->pde.level; 407 408 hwq_attr.stride = 1; 409 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) { 410 if (!tqmctx->qcount[i]) 411 continue; 412 hwq_attr.depth = ctx->qpc_count * tqmctx->qcount[i]; 413 rc = bnxt_qplib_alloc_init_hwq(&tqmctx->qtbl[i], &hwq_attr); 414 if (rc) 415 goto out; 416 } 417 out: 418 return rc; 419 } 420 421 static void bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx *ctx) 422 { 423 struct bnxt_qplib_hwq *tbl; 424 dma_addr_t *dma_ptr; 425 __le64 **pbl_ptr, *ptr; 426 int i, j, k; 427 int fnz_idx = -1; 428 int pg_count; 429 430 pbl_ptr = (__le64 **)ctx->pde.pbl_ptr; 431 432 for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ; 433 i++, j += MAX_TQM_ALLOC_BLK_SIZE) { 434 tbl = &ctx->qtbl[i]; 435 if (!tbl->max_elements) 436 continue; 437 if (fnz_idx == -1) 438 fnz_idx = i; /* first non-zero index */ 439 switch (tbl->level) { 440 case PBL_LVL_2: 441 pg_count = tbl->pbl[PBL_LVL_1].pg_count; 442 for (k = 0; k < pg_count; k++) { 443 ptr = &pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)]; 444 dma_ptr = &tbl->pbl[PBL_LVL_1].pg_map_arr[k]; 445 *ptr = cpu_to_le64(*dma_ptr | PTU_PTE_VALID); 446 } 447 break; 448 case PBL_LVL_1: 449 case PBL_LVL_0: 450 default: 451 ptr = &pbl_ptr[PTR_PG(j)][PTR_IDX(j)]; 452 *ptr = cpu_to_le64(tbl->pbl[PBL_LVL_0].pg_map_arr[0] | 453 PTU_PTE_VALID); 454 break; 455 } 456 } 457 if (fnz_idx == -1) 458 fnz_idx = 0; 459 /* update pde level as per page table programming */ 460 ctx->pde.level = (ctx->qtbl[fnz_idx].level == PBL_LVL_2) ? PBL_LVL_2 : 461 ctx->qtbl[fnz_idx].level + 1; 462 } 463 464 static int bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res *res, 465 struct bnxt_qplib_ctx *ctx) 466 { 467 int rc = 0; 468 469 rc = bnxt_qplib_alloc_tqm_rings(res, ctx); 470 if (rc) 471 goto fail; 472 473 bnxt_qplib_map_tqm_pgtbl(&ctx->tqm_ctx); 474 fail: 475 return rc; 476 } 477 478 /* 479 * Routine: bnxt_qplib_alloc_ctx 480 * Description: 481 * Context tables are memories which are used by the chip fw. 482 * The 6 tables defined are: 483 * QPC ctx - holds QP states 484 * MRW ctx - holds memory region and window 485 * SRQ ctx - holds shared RQ states 486 * CQ ctx - holds completion queue states 487 * TQM ctx - holds Tx Queue Manager context 488 * TIM ctx - holds timer context 489 * Depending on the size of the tbl requested, either a 1 Page Buffer List 490 * or a 1-to-2-stage indirection Page Directory List + 1 PBL is used 491 * instead. 492 * Table might be employed as follows: 493 * For 0 < ctx size <= 1 PAGE, 0 level of ind is used 494 * For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used 495 * For 512 < ctx size <= MAX, 2 levels of ind is used 496 * Returns: 497 * 0 if success, else -ERRORS 498 */ 499 int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res, 500 struct bnxt_qplib_ctx *ctx, 501 bool virt_fn, bool is_p5) 502 { 503 struct bnxt_qplib_hwq_attr hwq_attr = {}; 504 struct bnxt_qplib_sg_info sginfo = {}; 505 int rc = 0; 506 507 if (virt_fn || is_p5) 508 goto stats_alloc; 509 510 /* QPC Tables */ 511 sginfo.pgsize = PAGE_SIZE; 512 sginfo.pgshft = PAGE_SHIFT; 513 hwq_attr.sginfo = &sginfo; 514 515 hwq_attr.res = res; 516 hwq_attr.depth = ctx->qpc_count; 517 hwq_attr.stride = BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE; 518 hwq_attr.type = HWQ_TYPE_CTX; 519 rc = bnxt_qplib_alloc_init_hwq(&ctx->qpc_tbl, &hwq_attr); 520 if (rc) 521 goto fail; 522 523 /* MRW Tables */ 524 hwq_attr.depth = ctx->mrw_count; 525 hwq_attr.stride = BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE; 526 rc = bnxt_qplib_alloc_init_hwq(&ctx->mrw_tbl, &hwq_attr); 527 if (rc) 528 goto fail; 529 530 /* SRQ Tables */ 531 hwq_attr.depth = ctx->srqc_count; 532 hwq_attr.stride = BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE; 533 rc = bnxt_qplib_alloc_init_hwq(&ctx->srqc_tbl, &hwq_attr); 534 if (rc) 535 goto fail; 536 537 /* CQ Tables */ 538 hwq_attr.depth = ctx->cq_count; 539 hwq_attr.stride = BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE; 540 rc = bnxt_qplib_alloc_init_hwq(&ctx->cq_tbl, &hwq_attr); 541 if (rc) 542 goto fail; 543 544 /* TQM Buffer */ 545 rc = bnxt_qplib_setup_tqm_rings(res, ctx); 546 if (rc) 547 goto fail; 548 /* TIM Buffer */ 549 ctx->tim_tbl.max_elements = ctx->qpc_count * 16; 550 hwq_attr.depth = ctx->qpc_count * 16; 551 hwq_attr.stride = 1; 552 rc = bnxt_qplib_alloc_init_hwq(&ctx->tim_tbl, &hwq_attr); 553 if (rc) 554 goto fail; 555 stats_alloc: 556 /* Stats */ 557 rc = bnxt_qplib_alloc_stats_ctx(res->pdev, &ctx->stats); 558 if (rc) 559 goto fail; 560 561 return 0; 562 563 fail: 564 bnxt_qplib_free_ctx(res, ctx); 565 return rc; 566 } 567 568 /* GUID */ 569 void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid) 570 { 571 u8 mac[ETH_ALEN]; 572 573 /* MAC-48 to EUI-64 mapping */ 574 memcpy(mac, dev_addr, ETH_ALEN); 575 guid[0] = mac[0] ^ 2; 576 guid[1] = mac[1]; 577 guid[2] = mac[2]; 578 guid[3] = 0xff; 579 guid[4] = 0xfe; 580 guid[5] = mac[3]; 581 guid[6] = mac[4]; 582 guid[7] = mac[5]; 583 } 584 585 static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res, 586 struct bnxt_qplib_sgid_tbl *sgid_tbl) 587 { 588 kfree(sgid_tbl->tbl); 589 kfree(sgid_tbl->hw_id); 590 kfree(sgid_tbl->ctx); 591 kfree(sgid_tbl->vlan); 592 sgid_tbl->tbl = NULL; 593 sgid_tbl->hw_id = NULL; 594 sgid_tbl->ctx = NULL; 595 sgid_tbl->vlan = NULL; 596 sgid_tbl->max = 0; 597 sgid_tbl->active = 0; 598 } 599 600 static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res, 601 struct bnxt_qplib_sgid_tbl *sgid_tbl, 602 u16 max) 603 { 604 sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL); 605 if (!sgid_tbl->tbl) 606 return -ENOMEM; 607 608 sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL); 609 if (!sgid_tbl->hw_id) 610 goto out_free1; 611 612 sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL); 613 if (!sgid_tbl->ctx) 614 goto out_free2; 615 616 sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL); 617 if (!sgid_tbl->vlan) 618 goto out_free3; 619 620 sgid_tbl->max = max; 621 return 0; 622 out_free3: 623 kfree(sgid_tbl->ctx); 624 sgid_tbl->ctx = NULL; 625 out_free2: 626 kfree(sgid_tbl->hw_id); 627 sgid_tbl->hw_id = NULL; 628 out_free1: 629 kfree(sgid_tbl->tbl); 630 sgid_tbl->tbl = NULL; 631 return -ENOMEM; 632 }; 633 634 static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res, 635 struct bnxt_qplib_sgid_tbl *sgid_tbl) 636 { 637 int i; 638 639 for (i = 0; i < sgid_tbl->max; i++) { 640 if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero, 641 sizeof(bnxt_qplib_gid_zero))) 642 bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid, 643 sgid_tbl->tbl[i].vlan_id, true); 644 } 645 memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max); 646 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max); 647 memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max); 648 sgid_tbl->active = 0; 649 } 650 651 static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl, 652 struct net_device *netdev) 653 { 654 u32 i; 655 656 for (i = 0; i < sgid_tbl->max; i++) 657 sgid_tbl->tbl[i].vlan_id = 0xffff; 658 659 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max); 660 } 661 662 static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res, 663 struct bnxt_qplib_pkey_tbl *pkey_tbl) 664 { 665 if (!pkey_tbl->tbl) 666 dev_dbg(&res->pdev->dev, "PKEY tbl not present\n"); 667 else 668 kfree(pkey_tbl->tbl); 669 670 pkey_tbl->tbl = NULL; 671 pkey_tbl->max = 0; 672 pkey_tbl->active = 0; 673 } 674 675 static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res, 676 struct bnxt_qplib_pkey_tbl *pkey_tbl, 677 u16 max) 678 { 679 pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL); 680 if (!pkey_tbl->tbl) 681 return -ENOMEM; 682 683 pkey_tbl->max = max; 684 return 0; 685 }; 686 687 /* PDs */ 688 int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd) 689 { 690 u32 bit_num; 691 692 bit_num = find_first_bit(pdt->tbl, pdt->max); 693 if (bit_num == pdt->max) 694 return -ENOMEM; 695 696 /* Found unused PD */ 697 clear_bit(bit_num, pdt->tbl); 698 pd->id = bit_num; 699 return 0; 700 } 701 702 int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res, 703 struct bnxt_qplib_pd_tbl *pdt, 704 struct bnxt_qplib_pd *pd) 705 { 706 if (test_and_set_bit(pd->id, pdt->tbl)) { 707 dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n", 708 pd->id); 709 return -EINVAL; 710 } 711 pd->id = 0; 712 return 0; 713 } 714 715 static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt) 716 { 717 kfree(pdt->tbl); 718 pdt->tbl = NULL; 719 pdt->max = 0; 720 } 721 722 static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res, 723 struct bnxt_qplib_pd_tbl *pdt, 724 u32 max) 725 { 726 u32 bytes; 727 728 bytes = max >> 3; 729 if (!bytes) 730 bytes = 1; 731 pdt->tbl = kmalloc(bytes, GFP_KERNEL); 732 if (!pdt->tbl) 733 return -ENOMEM; 734 735 pdt->max = max; 736 memset((u8 *)pdt->tbl, 0xFF, bytes); 737 738 return 0; 739 } 740 741 /* DPIs */ 742 int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit, 743 struct bnxt_qplib_dpi *dpi, 744 void *app) 745 { 746 u32 bit_num; 747 748 bit_num = find_first_bit(dpit->tbl, dpit->max); 749 if (bit_num == dpit->max) 750 return -ENOMEM; 751 752 /* Found unused DPI */ 753 clear_bit(bit_num, dpit->tbl); 754 dpit->app_tbl[bit_num] = app; 755 756 dpi->dpi = bit_num; 757 dpi->dbr = dpit->dbr_bar_reg_iomem + (bit_num * PAGE_SIZE); 758 dpi->umdbr = dpit->unmapped_dbr + (bit_num * PAGE_SIZE); 759 760 return 0; 761 } 762 763 int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res, 764 struct bnxt_qplib_dpi_tbl *dpit, 765 struct bnxt_qplib_dpi *dpi) 766 { 767 if (dpi->dpi >= dpit->max) { 768 dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d\n", dpi->dpi); 769 return -EINVAL; 770 } 771 if (test_and_set_bit(dpi->dpi, dpit->tbl)) { 772 dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d\n", 773 dpi->dpi); 774 return -EINVAL; 775 } 776 if (dpit->app_tbl) 777 dpit->app_tbl[dpi->dpi] = NULL; 778 memset(dpi, 0, sizeof(*dpi)); 779 780 return 0; 781 } 782 783 static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res *res, 784 struct bnxt_qplib_dpi_tbl *dpit) 785 { 786 kfree(dpit->tbl); 787 kfree(dpit->app_tbl); 788 if (dpit->dbr_bar_reg_iomem) 789 pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem); 790 memset(dpit, 0, sizeof(*dpit)); 791 } 792 793 static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res, 794 struct bnxt_qplib_dpi_tbl *dpit, 795 u32 dbr_offset) 796 { 797 u32 dbr_bar_reg = RCFW_DBR_PCI_BAR_REGION; 798 resource_size_t bar_reg_base; 799 u32 dbr_len, bytes; 800 801 if (dpit->dbr_bar_reg_iomem) { 802 dev_err(&res->pdev->dev, "DBR BAR region %d already mapped\n", 803 dbr_bar_reg); 804 return -EALREADY; 805 } 806 807 bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg); 808 if (!bar_reg_base) { 809 dev_err(&res->pdev->dev, "BAR region %d resc start failed\n", 810 dbr_bar_reg); 811 return -ENOMEM; 812 } 813 814 dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset; 815 if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) { 816 dev_err(&res->pdev->dev, "Invalid DBR length %d\n", dbr_len); 817 return -ENOMEM; 818 } 819 820 dpit->dbr_bar_reg_iomem = ioremap(bar_reg_base + dbr_offset, 821 dbr_len); 822 if (!dpit->dbr_bar_reg_iomem) { 823 dev_err(&res->pdev->dev, 824 "FP: DBR BAR region %d mapping failed\n", dbr_bar_reg); 825 return -ENOMEM; 826 } 827 828 dpit->unmapped_dbr = bar_reg_base + dbr_offset; 829 dpit->max = dbr_len / PAGE_SIZE; 830 831 dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL); 832 if (!dpit->app_tbl) 833 goto unmap_io; 834 835 bytes = dpit->max >> 3; 836 if (!bytes) 837 bytes = 1; 838 839 dpit->tbl = kmalloc(bytes, GFP_KERNEL); 840 if (!dpit->tbl) { 841 kfree(dpit->app_tbl); 842 dpit->app_tbl = NULL; 843 goto unmap_io; 844 } 845 846 memset((u8 *)dpit->tbl, 0xFF, bytes); 847 848 return 0; 849 850 unmap_io: 851 pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem); 852 return -ENOMEM; 853 } 854 855 /* PKEYs */ 856 static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl) 857 { 858 memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max); 859 pkey_tbl->active = 0; 860 } 861 862 static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res, 863 struct bnxt_qplib_pkey_tbl *pkey_tbl) 864 { 865 u16 pkey = 0xFFFF; 866 867 memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max); 868 869 /* pkey default = 0xFFFF */ 870 bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false); 871 } 872 873 /* Stats */ 874 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev, 875 struct bnxt_qplib_stats *stats) 876 { 877 if (stats->dma) { 878 dma_free_coherent(&pdev->dev, stats->size, 879 stats->dma, stats->dma_map); 880 } 881 memset(stats, 0, sizeof(*stats)); 882 stats->fw_id = -1; 883 } 884 885 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev, 886 struct bnxt_qplib_stats *stats) 887 { 888 memset(stats, 0, sizeof(*stats)); 889 stats->fw_id = -1; 890 /* 128 byte aligned context memory is required only for 57500. 891 * However making this unconditional, it does not harm previous 892 * generation. 893 */ 894 stats->size = ALIGN(sizeof(struct ctx_hw_stats), 128); 895 stats->dma = dma_alloc_coherent(&pdev->dev, stats->size, 896 &stats->dma_map, GFP_KERNEL); 897 if (!stats->dma) { 898 dev_err(&pdev->dev, "Stats DMA allocation failed\n"); 899 return -ENOMEM; 900 } 901 return 0; 902 } 903 904 void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res) 905 { 906 bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl); 907 bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl); 908 } 909 910 int bnxt_qplib_init_res(struct bnxt_qplib_res *res) 911 { 912 bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev); 913 bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl); 914 915 return 0; 916 } 917 918 void bnxt_qplib_free_res(struct bnxt_qplib_res *res) 919 { 920 bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl); 921 bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl); 922 bnxt_qplib_free_pd_tbl(&res->pd_tbl); 923 bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl); 924 } 925 926 int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev, 927 struct net_device *netdev, 928 struct bnxt_qplib_dev_attr *dev_attr) 929 { 930 int rc = 0; 931 932 res->pdev = pdev; 933 res->netdev = netdev; 934 935 rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid); 936 if (rc) 937 goto fail; 938 939 rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey); 940 if (rc) 941 goto fail; 942 943 rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd); 944 if (rc) 945 goto fail; 946 947 rc = bnxt_qplib_alloc_dpi_tbl(res, &res->dpi_tbl, dev_attr->l2_db_size); 948 if (rc) 949 goto fail; 950 951 return 0; 952 fail: 953 bnxt_qplib_free_res(res); 954 return rc; 955 } 956