1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 #include <linux/pci.h> 13 14 #include "rvu_struct.h" 15 #include "rvu_reg.h" 16 #include "rvu.h" 17 #include "cgx.h" 18 19 static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) 20 { 21 if (blkaddr == BLKADDR_NIX0 && hw->nix0) 22 return hw->nix0; 23 24 return NULL; 25 } 26 27 static bool is_valid_txschq(struct rvu *rvu, int blkaddr, 28 int lvl, u16 pcifunc, u16 schq) 29 { 30 struct nix_txsch *txsch; 31 struct nix_hw *nix_hw; 32 33 nix_hw = get_nix_hw(rvu->hw, blkaddr); 34 if (!nix_hw) 35 return false; 36 37 txsch = &nix_hw->txsch[lvl]; 38 /* Check out of bounds */ 39 if (schq >= txsch->schq.max) 40 return false; 41 42 spin_lock(&rvu->rsrc_lock); 43 if (txsch->pfvf_map[schq] != pcifunc) { 44 spin_unlock(&rvu->rsrc_lock); 45 return false; 46 } 47 spin_unlock(&rvu->rsrc_lock); 48 return true; 49 } 50 51 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, 52 u64 format, bool v4, u64 *fidx) 53 { 54 struct nix_lso_format field = {0}; 55 56 /* IP's Length field */ 57 field.layer = NIX_TXLAYER_OL3; 58 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */ 59 field.offset = v4 ? 2 : 4; 60 field.sizem1 = 1; /* i.e 2 bytes */ 61 field.alg = NIX_LSOALG_ADD_PAYLEN; 62 rvu_write64(rvu, blkaddr, 63 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 64 *(u64 *)&field); 65 66 /* No ID field in IPv6 header */ 67 if (!v4) 68 return; 69 70 /* IP's ID field */ 71 field.layer = NIX_TXLAYER_OL3; 72 field.offset = 4; 73 field.sizem1 = 1; /* i.e 2 bytes */ 74 field.alg = NIX_LSOALG_ADD_SEGNUM; 75 rvu_write64(rvu, blkaddr, 76 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 77 *(u64 *)&field); 78 } 79 80 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr, 81 u64 format, u64 *fidx) 82 { 83 struct nix_lso_format field = {0}; 84 85 /* TCP's sequence number field */ 86 field.layer = NIX_TXLAYER_OL4; 87 field.offset = 4; 88 field.sizem1 = 3; /* i.e 4 bytes */ 89 field.alg = NIX_LSOALG_ADD_OFFSET; 90 rvu_write64(rvu, blkaddr, 91 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 92 *(u64 *)&field); 93 94 /* TCP's flags field */ 95 field.layer = NIX_TXLAYER_OL4; 96 field.offset = 12; 97 field.sizem1 = 0; /* not needed */ 98 field.alg = NIX_LSOALG_TCP_FLAGS; 99 rvu_write64(rvu, blkaddr, 100 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 101 *(u64 *)&field); 102 } 103 104 static void nix_setup_lso(struct rvu *rvu, int blkaddr) 105 { 106 u64 cfg, idx, fidx = 0; 107 108 /* Enable LSO */ 109 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG); 110 /* For TSO, set first and middle segment flags to 111 * mask out PSH, RST & FIN flags in TCP packet 112 */ 113 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16)); 114 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16); 115 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63)); 116 117 /* Configure format fields for TCPv4 segmentation offload */ 118 idx = NIX_LSO_FORMAT_IDX_TSOV4; 119 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx); 120 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 121 122 /* Set rest of the fields to NOP */ 123 for (; fidx < 8; fidx++) { 124 rvu_write64(rvu, blkaddr, 125 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 126 } 127 128 /* Configure format fields for TCPv6 segmentation offload */ 129 idx = NIX_LSO_FORMAT_IDX_TSOV6; 130 fidx = 0; 131 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx); 132 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 133 134 /* Set rest of the fields to NOP */ 135 for (; fidx < 8; fidx++) { 136 rvu_write64(rvu, blkaddr, 137 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 138 } 139 } 140 141 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) 142 { 143 kfree(pfvf->rq_bmap); 144 kfree(pfvf->sq_bmap); 145 kfree(pfvf->cq_bmap); 146 if (pfvf->rq_ctx) 147 qmem_free(rvu->dev, pfvf->rq_ctx); 148 if (pfvf->sq_ctx) 149 qmem_free(rvu->dev, pfvf->sq_ctx); 150 if (pfvf->cq_ctx) 151 qmem_free(rvu->dev, pfvf->cq_ctx); 152 if (pfvf->rss_ctx) 153 qmem_free(rvu->dev, pfvf->rss_ctx); 154 if (pfvf->nix_qints_ctx) 155 qmem_free(rvu->dev, pfvf->nix_qints_ctx); 156 if (pfvf->cq_ints_ctx) 157 qmem_free(rvu->dev, pfvf->cq_ints_ctx); 158 159 pfvf->rq_bmap = NULL; 160 pfvf->cq_bmap = NULL; 161 pfvf->sq_bmap = NULL; 162 pfvf->rq_ctx = NULL; 163 pfvf->sq_ctx = NULL; 164 pfvf->cq_ctx = NULL; 165 pfvf->rss_ctx = NULL; 166 pfvf->nix_qints_ctx = NULL; 167 pfvf->cq_ints_ctx = NULL; 168 } 169 170 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, 171 struct rvu_pfvf *pfvf, int nixlf, 172 int rss_sz, int rss_grps, int hwctx_size) 173 { 174 int err, grp, num_indices; 175 176 /* RSS is not requested for this NIXLF */ 177 if (!rss_sz) 178 return 0; 179 num_indices = rss_sz * rss_grps; 180 181 /* Alloc NIX RSS HW context memory and config the base */ 182 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size); 183 if (err) 184 return err; 185 186 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf), 187 (u64)pfvf->rss_ctx->iova); 188 189 /* Config full RSS table size, enable RSS and caching */ 190 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), 191 BIT_ULL(36) | BIT_ULL(4) | 192 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE)); 193 /* Config RSS group offset and sizes */ 194 for (grp = 0; grp < rss_grps; grp++) 195 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp), 196 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp)); 197 return 0; 198 } 199 200 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, 201 struct nix_aq_inst_s *inst) 202 { 203 struct admin_queue *aq = block->aq; 204 struct nix_aq_res_s *result; 205 int timeout = 1000; 206 u64 reg, head; 207 208 result = (struct nix_aq_res_s *)aq->res->base; 209 210 /* Get current head pointer where to append this instruction */ 211 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS); 212 head = (reg >> 4) & AQ_PTR_MASK; 213 214 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)), 215 (void *)inst, aq->inst->entry_sz); 216 memset(result, 0, sizeof(*result)); 217 /* sync into memory */ 218 wmb(); 219 220 /* Ring the doorbell and wait for result */ 221 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1); 222 while (result->compcode == NIX_AQ_COMP_NOTDONE) { 223 cpu_relax(); 224 udelay(1); 225 timeout--; 226 if (!timeout) 227 return -EBUSY; 228 } 229 230 if (result->compcode != NIX_AQ_COMP_GOOD) 231 /* TODO: Replace this with some error code */ 232 return -EBUSY; 233 234 return 0; 235 } 236 237 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, 238 struct nix_aq_enq_rsp *rsp) 239 { 240 struct rvu_hwinfo *hw = rvu->hw; 241 u16 pcifunc = req->hdr.pcifunc; 242 int nixlf, blkaddr, rc = 0; 243 struct nix_aq_inst_s inst; 244 struct rvu_block *block; 245 struct admin_queue *aq; 246 struct rvu_pfvf *pfvf; 247 void *ctx, *mask; 248 bool ena; 249 u64 cfg; 250 251 pfvf = rvu_get_pfvf(rvu, pcifunc); 252 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 253 if (!pfvf->nixlf || blkaddr < 0) 254 return NIX_AF_ERR_AF_LF_INVALID; 255 256 block = &hw->block[blkaddr]; 257 aq = block->aq; 258 if (!aq) { 259 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__); 260 return NIX_AF_ERR_AQ_ENQUEUE; 261 } 262 263 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 264 if (nixlf < 0) 265 return NIX_AF_ERR_AF_LF_INVALID; 266 267 switch (req->ctype) { 268 case NIX_AQ_CTYPE_RQ: 269 /* Check if index exceeds max no of queues */ 270 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize) 271 rc = NIX_AF_ERR_AQ_ENQUEUE; 272 break; 273 case NIX_AQ_CTYPE_SQ: 274 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize) 275 rc = NIX_AF_ERR_AQ_ENQUEUE; 276 break; 277 case NIX_AQ_CTYPE_CQ: 278 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize) 279 rc = NIX_AF_ERR_AQ_ENQUEUE; 280 break; 281 case NIX_AQ_CTYPE_RSS: 282 /* Check if RSS is enabled and qidx is within range */ 283 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf)); 284 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx || 285 (req->qidx >= (256UL << (cfg & 0xF)))) 286 rc = NIX_AF_ERR_AQ_ENQUEUE; 287 break; 288 default: 289 rc = NIX_AF_ERR_AQ_ENQUEUE; 290 } 291 292 if (rc) 293 return rc; 294 295 /* Check if SQ pointed SMQ belongs to this PF/VF or not */ 296 if (req->ctype == NIX_AQ_CTYPE_SQ && 297 req->op != NIX_AQ_INSTOP_WRITE) { 298 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ, 299 pcifunc, req->sq.smq)) 300 return NIX_AF_ERR_AQ_ENQUEUE; 301 } 302 303 memset(&inst, 0, sizeof(struct nix_aq_inst_s)); 304 inst.lf = nixlf; 305 inst.cindex = req->qidx; 306 inst.ctype = req->ctype; 307 inst.op = req->op; 308 /* Currently we are not supporting enqueuing multiple instructions, 309 * so always choose first entry in result memory. 310 */ 311 inst.res_addr = (u64)aq->res->iova; 312 313 /* Clean result + context memory */ 314 memset(aq->res->base, 0, aq->res->entry_sz); 315 /* Context needs to be written at RES_ADDR + 128 */ 316 ctx = aq->res->base + 128; 317 /* Mask needs to be written at RES_ADDR + 256 */ 318 mask = aq->res->base + 256; 319 320 switch (req->op) { 321 case NIX_AQ_INSTOP_WRITE: 322 if (req->ctype == NIX_AQ_CTYPE_RQ) 323 memcpy(mask, &req->rq_mask, 324 sizeof(struct nix_rq_ctx_s)); 325 else if (req->ctype == NIX_AQ_CTYPE_SQ) 326 memcpy(mask, &req->sq_mask, 327 sizeof(struct nix_sq_ctx_s)); 328 else if (req->ctype == NIX_AQ_CTYPE_CQ) 329 memcpy(mask, &req->cq_mask, 330 sizeof(struct nix_cq_ctx_s)); 331 else if (req->ctype == NIX_AQ_CTYPE_RSS) 332 memcpy(mask, &req->rss_mask, 333 sizeof(struct nix_rsse_s)); 334 /* Fall through */ 335 case NIX_AQ_INSTOP_INIT: 336 if (req->ctype == NIX_AQ_CTYPE_RQ) 337 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); 338 else if (req->ctype == NIX_AQ_CTYPE_SQ) 339 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s)); 340 else if (req->ctype == NIX_AQ_CTYPE_CQ) 341 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s)); 342 else if (req->ctype == NIX_AQ_CTYPE_RSS) 343 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s)); 344 break; 345 case NIX_AQ_INSTOP_NOP: 346 case NIX_AQ_INSTOP_READ: 347 case NIX_AQ_INSTOP_LOCK: 348 case NIX_AQ_INSTOP_UNLOCK: 349 break; 350 default: 351 rc = NIX_AF_ERR_AQ_ENQUEUE; 352 return rc; 353 } 354 355 spin_lock(&aq->lock); 356 357 /* Submit the instruction to AQ */ 358 rc = nix_aq_enqueue_wait(rvu, block, &inst); 359 if (rc) { 360 spin_unlock(&aq->lock); 361 return rc; 362 } 363 364 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */ 365 if (req->op == NIX_AQ_INSTOP_INIT) { 366 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena) 367 __set_bit(req->qidx, pfvf->rq_bmap); 368 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena) 369 __set_bit(req->qidx, pfvf->sq_bmap); 370 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena) 371 __set_bit(req->qidx, pfvf->cq_bmap); 372 } 373 374 if (req->op == NIX_AQ_INSTOP_WRITE) { 375 if (req->ctype == NIX_AQ_CTYPE_RQ) { 376 ena = (req->rq.ena & req->rq_mask.ena) | 377 (test_bit(req->qidx, pfvf->rq_bmap) & 378 ~req->rq_mask.ena); 379 if (ena) 380 __set_bit(req->qidx, pfvf->rq_bmap); 381 else 382 __clear_bit(req->qidx, pfvf->rq_bmap); 383 } 384 if (req->ctype == NIX_AQ_CTYPE_SQ) { 385 ena = (req->rq.ena & req->sq_mask.ena) | 386 (test_bit(req->qidx, pfvf->sq_bmap) & 387 ~req->sq_mask.ena); 388 if (ena) 389 __set_bit(req->qidx, pfvf->sq_bmap); 390 else 391 __clear_bit(req->qidx, pfvf->sq_bmap); 392 } 393 if (req->ctype == NIX_AQ_CTYPE_CQ) { 394 ena = (req->rq.ena & req->cq_mask.ena) | 395 (test_bit(req->qidx, pfvf->cq_bmap) & 396 ~req->cq_mask.ena); 397 if (ena) 398 __set_bit(req->qidx, pfvf->cq_bmap); 399 else 400 __clear_bit(req->qidx, pfvf->cq_bmap); 401 } 402 } 403 404 if (rsp) { 405 /* Copy read context into mailbox */ 406 if (req->op == NIX_AQ_INSTOP_READ) { 407 if (req->ctype == NIX_AQ_CTYPE_RQ) 408 memcpy(&rsp->rq, ctx, 409 sizeof(struct nix_rq_ctx_s)); 410 else if (req->ctype == NIX_AQ_CTYPE_SQ) 411 memcpy(&rsp->sq, ctx, 412 sizeof(struct nix_sq_ctx_s)); 413 else if (req->ctype == NIX_AQ_CTYPE_CQ) 414 memcpy(&rsp->cq, ctx, 415 sizeof(struct nix_cq_ctx_s)); 416 else if (req->ctype == NIX_AQ_CTYPE_RSS) 417 memcpy(&rsp->rss, ctx, 418 sizeof(struct nix_cq_ctx_s)); 419 } 420 } 421 422 spin_unlock(&aq->lock); 423 return 0; 424 } 425 426 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) 427 { 428 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 429 struct nix_aq_enq_req aq_req; 430 unsigned long *bmap; 431 int qidx, q_cnt = 0; 432 int err = 0, rc; 433 434 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx) 435 return NIX_AF_ERR_AQ_ENQUEUE; 436 437 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); 438 aq_req.hdr.pcifunc = req->hdr.pcifunc; 439 440 if (req->ctype == NIX_AQ_CTYPE_CQ) { 441 aq_req.cq.ena = 0; 442 aq_req.cq_mask.ena = 1; 443 q_cnt = pfvf->cq_ctx->qsize; 444 bmap = pfvf->cq_bmap; 445 } 446 if (req->ctype == NIX_AQ_CTYPE_SQ) { 447 aq_req.sq.ena = 0; 448 aq_req.sq_mask.ena = 1; 449 q_cnt = pfvf->sq_ctx->qsize; 450 bmap = pfvf->sq_bmap; 451 } 452 if (req->ctype == NIX_AQ_CTYPE_RQ) { 453 aq_req.rq.ena = 0; 454 aq_req.rq_mask.ena = 1; 455 q_cnt = pfvf->rq_ctx->qsize; 456 bmap = pfvf->rq_bmap; 457 } 458 459 aq_req.ctype = req->ctype; 460 aq_req.op = NIX_AQ_INSTOP_WRITE; 461 462 for (qidx = 0; qidx < q_cnt; qidx++) { 463 if (!test_bit(qidx, bmap)) 464 continue; 465 aq_req.qidx = qidx; 466 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL); 467 if (rc) { 468 err = rc; 469 dev_err(rvu->dev, "Failed to disable %s:%d context\n", 470 (req->ctype == NIX_AQ_CTYPE_CQ) ? 471 "CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ? 472 "RQ" : "SQ"), qidx); 473 } 474 } 475 476 return err; 477 } 478 479 int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu, 480 struct nix_aq_enq_req *req, 481 struct nix_aq_enq_rsp *rsp) 482 { 483 return rvu_nix_aq_enq_inst(rvu, req, rsp); 484 } 485 486 int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu, 487 struct hwctx_disable_req *req, 488 struct msg_rsp *rsp) 489 { 490 return nix_lf_hwctx_disable(rvu, req); 491 } 492 493 int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu, 494 struct nix_lf_alloc_req *req, 495 struct nix_lf_alloc_rsp *rsp) 496 { 497 int nixlf, qints, hwctx_size, err, rc = 0; 498 struct rvu_hwinfo *hw = rvu->hw; 499 u16 pcifunc = req->hdr.pcifunc; 500 struct rvu_block *block; 501 struct rvu_pfvf *pfvf; 502 u64 cfg, ctx_cfg; 503 int blkaddr; 504 505 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt) 506 return NIX_AF_ERR_PARAM; 507 508 pfvf = rvu_get_pfvf(rvu, pcifunc); 509 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 510 if (!pfvf->nixlf || blkaddr < 0) 511 return NIX_AF_ERR_AF_LF_INVALID; 512 513 block = &hw->block[blkaddr]; 514 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 515 if (nixlf < 0) 516 return NIX_AF_ERR_AF_LF_INVALID; 517 518 /* If RSS is being enabled, check if requested config is valid. 519 * RSS table size should be power of two, otherwise 520 * RSS_GRP::OFFSET + adder might go beyond that group or 521 * won't be able to use entire table. 522 */ 523 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE || 524 !is_power_of_2(req->rss_sz))) 525 return NIX_AF_ERR_RSS_SIZE_INVALID; 526 527 if (req->rss_sz && 528 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS)) 529 return NIX_AF_ERR_RSS_GRPS_INVALID; 530 531 /* Reset this NIX LF */ 532 err = rvu_lf_reset(rvu, block, nixlf); 533 if (err) { 534 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 535 block->addr - BLKADDR_NIX0, nixlf); 536 return NIX_AF_ERR_LF_RESET; 537 } 538 539 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3); 540 541 /* Alloc NIX RQ HW context memory and config the base */ 542 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF); 543 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size); 544 if (err) 545 goto free_mem; 546 547 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL); 548 if (!pfvf->rq_bmap) 549 goto free_mem; 550 551 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf), 552 (u64)pfvf->rq_ctx->iova); 553 554 /* Set caching and queue count in HW */ 555 cfg = BIT_ULL(36) | (req->rq_cnt - 1); 556 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg); 557 558 /* Alloc NIX SQ HW context memory and config the base */ 559 hwctx_size = 1UL << (ctx_cfg & 0xF); 560 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size); 561 if (err) 562 goto free_mem; 563 564 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL); 565 if (!pfvf->sq_bmap) 566 goto free_mem; 567 568 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf), 569 (u64)pfvf->sq_ctx->iova); 570 cfg = BIT_ULL(36) | (req->sq_cnt - 1); 571 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg); 572 573 /* Alloc NIX CQ HW context memory and config the base */ 574 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF); 575 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size); 576 if (err) 577 goto free_mem; 578 579 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL); 580 if (!pfvf->cq_bmap) 581 goto free_mem; 582 583 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf), 584 (u64)pfvf->cq_ctx->iova); 585 cfg = BIT_ULL(36) | (req->cq_cnt - 1); 586 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg); 587 588 /* Initialize receive side scaling (RSS) */ 589 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF); 590 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, 591 req->rss_sz, req->rss_grps, hwctx_size); 592 if (err) 593 goto free_mem; 594 595 /* Alloc memory for CQINT's HW contexts */ 596 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 597 qints = (cfg >> 24) & 0xFFF; 598 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF); 599 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size); 600 if (err) 601 goto free_mem; 602 603 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf), 604 (u64)pfvf->cq_ints_ctx->iova); 605 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36)); 606 607 /* Alloc memory for QINT's HW contexts */ 608 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 609 qints = (cfg >> 12) & 0xFFF; 610 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF); 611 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size); 612 if (err) 613 goto free_mem; 614 615 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf), 616 (u64)pfvf->nix_qints_ctx->iova); 617 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36)); 618 619 /* Enable LMTST for this NIX LF */ 620 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0)); 621 622 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC 623 * If requester has sent a 'RVU_DEFAULT_PF_FUNC' use this NIX LF's 624 * PCIFUNC itself. 625 */ 626 if (req->npa_func == RVU_DEFAULT_PF_FUNC) 627 cfg = pcifunc; 628 else 629 cfg = req->npa_func; 630 631 if (req->sso_func == RVU_DEFAULT_PF_FUNC) 632 cfg |= (u64)pcifunc << 16; 633 else 634 cfg |= (u64)req->sso_func << 16; 635 636 cfg |= (u64)req->xqe_sz << 33; 637 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg); 638 639 /* Config Rx pkt length, csum checks and apad enable / disable */ 640 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg); 641 642 goto exit; 643 644 free_mem: 645 nix_ctx_free(rvu, pfvf); 646 rc = -ENOMEM; 647 648 exit: 649 /* Set macaddr of this PF/VF */ 650 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 651 652 /* set SQB size info */ 653 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST); 654 rsp->sqb_size = (cfg >> 34) & 0xFFFF; 655 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4; 656 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6; 657 return rc; 658 } 659 660 int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req, 661 struct msg_rsp *rsp) 662 { 663 struct rvu_hwinfo *hw = rvu->hw; 664 u16 pcifunc = req->hdr.pcifunc; 665 struct rvu_block *block; 666 int blkaddr, nixlf, err; 667 struct rvu_pfvf *pfvf; 668 669 pfvf = rvu_get_pfvf(rvu, pcifunc); 670 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 671 if (!pfvf->nixlf || blkaddr < 0) 672 return NIX_AF_ERR_AF_LF_INVALID; 673 674 block = &hw->block[blkaddr]; 675 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 676 if (nixlf < 0) 677 return NIX_AF_ERR_AF_LF_INVALID; 678 679 /* Reset this NIX LF */ 680 err = rvu_lf_reset(rvu, block, nixlf); 681 if (err) { 682 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 683 block->addr - BLKADDR_NIX0, nixlf); 684 return NIX_AF_ERR_LF_RESET; 685 } 686 687 nix_ctx_free(rvu, pfvf); 688 689 return 0; 690 } 691 692 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 693 { 694 struct nix_txsch *txsch; 695 u64 cfg, reg; 696 int err, lvl; 697 698 /* Get scheduler queue count of each type and alloc 699 * bitmap for each for alloc/free/attach operations. 700 */ 701 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 702 txsch = &nix_hw->txsch[lvl]; 703 txsch->lvl = lvl; 704 switch (lvl) { 705 case NIX_TXSCH_LVL_SMQ: 706 reg = NIX_AF_MDQ_CONST; 707 break; 708 case NIX_TXSCH_LVL_TL4: 709 reg = NIX_AF_TL4_CONST; 710 break; 711 case NIX_TXSCH_LVL_TL3: 712 reg = NIX_AF_TL3_CONST; 713 break; 714 case NIX_TXSCH_LVL_TL2: 715 reg = NIX_AF_TL2_CONST; 716 break; 717 case NIX_TXSCH_LVL_TL1: 718 reg = NIX_AF_TL1_CONST; 719 break; 720 } 721 cfg = rvu_read64(rvu, blkaddr, reg); 722 txsch->schq.max = cfg & 0xFFFF; 723 err = rvu_alloc_bitmap(&txsch->schq); 724 if (err) 725 return err; 726 727 /* Allocate memory for scheduler queues to 728 * PF/VF pcifunc mapping info. 729 */ 730 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max, 731 sizeof(u16), GFP_KERNEL); 732 if (!txsch->pfvf_map) 733 return -ENOMEM; 734 } 735 return 0; 736 } 737 738 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr) 739 { 740 int idx, err; 741 u64 status; 742 743 /* Start X2P bus calibration */ 744 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 745 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9)); 746 /* Wait for calibration to complete */ 747 err = rvu_poll_reg(rvu, blkaddr, 748 NIX_AF_STATUS, BIT_ULL(10), false); 749 if (err) { 750 dev_err(rvu->dev, "NIX X2P bus calibration failed\n"); 751 return err; 752 } 753 754 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS); 755 /* Check if CGX devices are ready */ 756 for (idx = 0; idx < cgx_get_cgx_cnt(); idx++) { 757 if (status & (BIT_ULL(16 + idx))) 758 continue; 759 dev_err(rvu->dev, 760 "CGX%d didn't respond to NIX X2P calibration\n", idx); 761 err = -EBUSY; 762 } 763 764 /* Check if LBK is ready */ 765 if (!(status & BIT_ULL(19))) { 766 dev_err(rvu->dev, 767 "LBK didn't respond to NIX X2P calibration\n"); 768 err = -EBUSY; 769 } 770 771 /* Clear 'calibrate_x2p' bit */ 772 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 773 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9)); 774 if (err || (status & 0x3FFULL)) 775 dev_err(rvu->dev, 776 "NIX X2P calibration failed, status 0x%llx\n", status); 777 if (err) 778 return err; 779 return 0; 780 } 781 782 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) 783 { 784 u64 cfg; 785 int err; 786 787 /* Set admin queue endianness */ 788 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG); 789 #ifdef __BIG_ENDIAN 790 cfg |= BIT_ULL(1); 791 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 792 #else 793 cfg &= ~BIT_ULL(1); 794 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 795 #endif 796 797 /* Do not bypass NDC cache */ 798 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG); 799 cfg &= ~0x3FFEULL; 800 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg); 801 802 /* Result structure can be followed by RQ/SQ/CQ context at 803 * RES + 128bytes and a write mask at RES + 256 bytes, depending on 804 * operation type. Alloc sufficient result memory for all operations. 805 */ 806 err = rvu_aq_alloc(rvu, &block->aq, 807 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s), 808 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256); 809 if (err) 810 return err; 811 812 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE); 813 rvu_write64(rvu, block->addr, 814 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova); 815 return 0; 816 } 817 818 int rvu_nix_init(struct rvu *rvu) 819 { 820 struct rvu_hwinfo *hw = rvu->hw; 821 struct rvu_block *block; 822 int blkaddr, err; 823 u64 cfg; 824 825 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); 826 if (blkaddr < 0) 827 return 0; 828 block = &hw->block[blkaddr]; 829 830 /* Calibrate X2P bus to check if CGX/LBK links are fine */ 831 err = nix_calibrate_x2p(rvu, blkaddr); 832 if (err) 833 return err; 834 835 /* Set num of links of each type */ 836 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 837 hw->cgx = (cfg >> 12) & 0xF; 838 hw->lmac_per_cgx = (cfg >> 8) & 0xF; 839 hw->cgx_links = hw->cgx * hw->lmac_per_cgx; 840 hw->lbk_links = 1; 841 hw->sdp_links = 1; 842 843 /* Initialize admin queue */ 844 err = nix_aq_init(rvu, block); 845 if (err) 846 return err; 847 848 /* Restore CINT timer delay to HW reset values */ 849 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL); 850 851 /* Configure segmentation offload formats */ 852 nix_setup_lso(rvu, blkaddr); 853 854 if (blkaddr == BLKADDR_NIX0) { 855 hw->nix0 = devm_kzalloc(rvu->dev, 856 sizeof(struct nix_hw), GFP_KERNEL); 857 if (!hw->nix0) 858 return -ENOMEM; 859 860 err = nix_setup_txschq(rvu, hw->nix0, blkaddr); 861 if (err) 862 return err; 863 } 864 return 0; 865 } 866 867 void rvu_nix_freemem(struct rvu *rvu) 868 { 869 struct rvu_hwinfo *hw = rvu->hw; 870 struct rvu_block *block; 871 struct nix_txsch *txsch; 872 struct nix_hw *nix_hw; 873 int blkaddr, lvl; 874 875 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); 876 if (blkaddr < 0) 877 return; 878 879 block = &hw->block[blkaddr]; 880 rvu_aq_free(rvu, block->aq); 881 882 if (blkaddr == BLKADDR_NIX0) { 883 nix_hw = get_nix_hw(rvu->hw, blkaddr); 884 if (!nix_hw) 885 return; 886 887 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 888 txsch = &nix_hw->txsch[lvl]; 889 kfree(txsch->schq.bmap); 890 } 891 } 892 } 893