1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell. 5 * 6 */ 7 8 #include <linux/module.h> 9 #include <linux/pci.h> 10 11 #include "rvu_struct.h" 12 #include "rvu_reg.h" 13 #include "rvu.h" 14 #include "npc.h" 15 #include "mcs.h" 16 #include "cgx.h" 17 #include "lmac_common.h" 18 #include "rvu_npc_hash.h" 19 20 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc); 21 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 22 int type, int chan_id); 23 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, 24 int type, bool add); 25 static int nix_setup_ipolicers(struct rvu *rvu, 26 struct nix_hw *nix_hw, int blkaddr); 27 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw); 28 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, 29 struct nix_hw *nix_hw, u16 pcifunc); 30 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc); 31 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, 32 u32 leaf_prof); 33 static const char *nix_get_ctx_name(int ctype); 34 35 enum mc_tbl_sz { 36 MC_TBL_SZ_256, 37 MC_TBL_SZ_512, 38 MC_TBL_SZ_1K, 39 MC_TBL_SZ_2K, 40 MC_TBL_SZ_4K, 41 MC_TBL_SZ_8K, 42 MC_TBL_SZ_16K, 43 MC_TBL_SZ_32K, 44 MC_TBL_SZ_64K, 45 }; 46 47 enum mc_buf_cnt { 48 MC_BUF_CNT_8, 49 MC_BUF_CNT_16, 50 MC_BUF_CNT_32, 51 MC_BUF_CNT_64, 52 MC_BUF_CNT_128, 53 MC_BUF_CNT_256, 54 MC_BUF_CNT_512, 55 MC_BUF_CNT_1024, 56 MC_BUF_CNT_2048, 57 }; 58 59 enum nix_makr_fmt_indexes { 60 NIX_MARK_CFG_IP_DSCP_RED, 61 NIX_MARK_CFG_IP_DSCP_YELLOW, 62 NIX_MARK_CFG_IP_DSCP_YELLOW_RED, 63 NIX_MARK_CFG_IP_ECN_RED, 64 NIX_MARK_CFG_IP_ECN_YELLOW, 65 NIX_MARK_CFG_IP_ECN_YELLOW_RED, 66 NIX_MARK_CFG_VLAN_DEI_RED, 67 NIX_MARK_CFG_VLAN_DEI_YELLOW, 68 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED, 69 NIX_MARK_CFG_MAX, 70 }; 71 72 /* For now considering MC resources needed for broadcast 73 * pkt replication only. i.e 256 HWVFs + 12 PFs. 74 */ 75 #define MC_TBL_SIZE MC_TBL_SZ_2K 76 #define MC_BUF_CNT MC_BUF_CNT_1024 77 78 #define MC_TX_MAX 2048 79 80 struct mce { 81 struct hlist_node node; 82 u32 rq_rss_index; 83 u16 pcifunc; 84 u16 channel; 85 u8 dest_type; 86 u8 is_active; 87 u8 reserved[2]; 88 }; 89 90 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr) 91 { 92 int i = 0; 93 94 /*If blkaddr is 0, return the first nix block address*/ 95 if (blkaddr == 0) 96 return rvu->nix_blkaddr[blkaddr]; 97 98 while (i + 1 < MAX_NIX_BLKS) { 99 if (rvu->nix_blkaddr[i] == blkaddr) 100 return rvu->nix_blkaddr[i + 1]; 101 i++; 102 } 103 104 return 0; 105 } 106 107 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc) 108 { 109 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 110 int blkaddr; 111 112 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 113 if (!pfvf->nixlf || blkaddr < 0) 114 return false; 115 return true; 116 } 117 118 int rvu_get_nixlf_count(struct rvu *rvu) 119 { 120 int blkaddr = 0, max = 0; 121 struct rvu_block *block; 122 123 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 124 while (blkaddr) { 125 block = &rvu->hw->block[blkaddr]; 126 max += block->lf.max; 127 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 128 } 129 return max; 130 } 131 132 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr) 133 { 134 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 135 struct rvu_hwinfo *hw = rvu->hw; 136 int blkaddr; 137 138 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 139 if (!pfvf->nixlf || blkaddr < 0) 140 return NIX_AF_ERR_AF_LF_INVALID; 141 142 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 143 if (*nixlf < 0) 144 return NIX_AF_ERR_AF_LF_INVALID; 145 146 if (nix_blkaddr) 147 *nix_blkaddr = blkaddr; 148 149 return 0; 150 } 151 152 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc, 153 struct nix_hw **nix_hw, int *blkaddr) 154 { 155 struct rvu_pfvf *pfvf; 156 157 pfvf = rvu_get_pfvf(rvu, pcifunc); 158 *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 159 if (!pfvf->nixlf || *blkaddr < 0) 160 return NIX_AF_ERR_AF_LF_INVALID; 161 162 *nix_hw = get_nix_hw(rvu->hw, *blkaddr); 163 if (!*nix_hw) 164 return NIX_AF_ERR_INVALID_NIXBLK; 165 return 0; 166 } 167 168 static void nix_mce_list_init(struct nix_mce_list *list, int max) 169 { 170 INIT_HLIST_HEAD(&list->head); 171 list->count = 0; 172 list->max = max; 173 } 174 175 static int nix_alloc_mce_list(struct nix_mcast *mcast, int count, u8 dir) 176 { 177 struct rsrc_bmap *mce_counter; 178 int idx; 179 180 if (!mcast) 181 return -EINVAL; 182 183 mce_counter = &mcast->mce_counter[dir]; 184 if (!rvu_rsrc_check_contig(mce_counter, count)) 185 return -ENOSPC; 186 187 idx = rvu_alloc_rsrc_contig(mce_counter, count); 188 return idx; 189 } 190 191 static void nix_free_mce_list(struct nix_mcast *mcast, int count, int start, u8 dir) 192 { 193 struct rsrc_bmap *mce_counter; 194 195 if (!mcast) 196 return; 197 198 mce_counter = &mcast->mce_counter[dir]; 199 rvu_free_rsrc_contig(mce_counter, count, start); 200 } 201 202 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) 203 { 204 int nix_blkaddr = 0, i = 0; 205 struct rvu *rvu = hw->rvu; 206 207 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 208 while (nix_blkaddr) { 209 if (blkaddr == nix_blkaddr && hw->nix) 210 return &hw->nix[i]; 211 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 212 i++; 213 } 214 return NULL; 215 } 216 217 int nix_get_dwrr_mtu_reg(struct rvu_hwinfo *hw, int smq_link_type) 218 { 219 if (hw->cap.nix_multiple_dwrr_mtu) 220 return NIX_AF_DWRR_MTUX(smq_link_type); 221 222 if (smq_link_type == SMQ_LINK_TYPE_SDP) 223 return NIX_AF_DWRR_SDP_MTU; 224 225 /* Here it's same reg for RPM and LBK */ 226 return NIX_AF_DWRR_RPM_MTU; 227 } 228 229 u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu) 230 { 231 dwrr_mtu &= 0x1FULL; 232 233 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. 234 * Value of 4 is reserved for MTU value of 9728 bytes. 235 * Value of 5 is reserved for MTU value of 10240 bytes. 236 */ 237 switch (dwrr_mtu) { 238 case 4: 239 return 9728; 240 case 5: 241 return 10240; 242 default: 243 return BIT_ULL(dwrr_mtu); 244 } 245 246 return 0; 247 } 248 249 u32 convert_bytes_to_dwrr_mtu(u32 bytes) 250 { 251 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. 252 * Value of 4 is reserved for MTU value of 9728 bytes. 253 * Value of 5 is reserved for MTU value of 10240 bytes. 254 */ 255 if (bytes > BIT_ULL(16)) 256 return 0; 257 258 switch (bytes) { 259 case 9728: 260 return 4; 261 case 10240: 262 return 5; 263 default: 264 return ilog2(bytes); 265 } 266 267 return 0; 268 } 269 270 static void nix_rx_sync(struct rvu *rvu, int blkaddr) 271 { 272 int err; 273 274 /* Sync all in flight RX packets to LLC/DRAM */ 275 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); 276 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); 277 if (err) 278 dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n"); 279 280 /* SW_SYNC ensures all existing transactions are finished and pkts 281 * are written to LLC/DRAM, queues should be teared down after 282 * successful SW_SYNC. Due to a HW errata, in some rare scenarios 283 * an existing transaction might end after SW_SYNC operation. To 284 * ensure operation is fully done, do the SW_SYNC twice. 285 */ 286 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); 287 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); 288 if (err) 289 dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n"); 290 } 291 292 static bool is_valid_txschq(struct rvu *rvu, int blkaddr, 293 int lvl, u16 pcifunc, u16 schq) 294 { 295 struct rvu_hwinfo *hw = rvu->hw; 296 struct nix_txsch *txsch; 297 struct nix_hw *nix_hw; 298 u16 map_func; 299 300 nix_hw = get_nix_hw(rvu->hw, blkaddr); 301 if (!nix_hw) 302 return false; 303 304 txsch = &nix_hw->txsch[lvl]; 305 /* Check out of bounds */ 306 if (schq >= txsch->schq.max) 307 return false; 308 309 mutex_lock(&rvu->rsrc_lock); 310 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); 311 mutex_unlock(&rvu->rsrc_lock); 312 313 /* TLs aggegating traffic are shared across PF and VFs */ 314 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 315 if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc)) 316 return false; 317 else 318 return true; 319 } 320 321 if (map_func != pcifunc) 322 return false; 323 324 return true; 325 } 326 327 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf, 328 struct nix_lf_alloc_rsp *rsp, bool loop) 329 { 330 struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc); 331 u16 req_chan_base, req_chan_end, req_chan_cnt; 332 struct rvu_hwinfo *hw = rvu->hw; 333 struct sdp_node_info *sdp_info; 334 int pkind, pf, vf, lbkid, vfid; 335 u8 cgx_id, lmac_id; 336 bool from_vf; 337 int err; 338 339 pf = rvu_get_pf(pcifunc); 340 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && 341 type != NIX_INTF_TYPE_SDP) 342 return 0; 343 344 switch (type) { 345 case NIX_INTF_TYPE_CGX: 346 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf]; 347 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 348 349 pkind = rvu_npc_get_pkind(rvu, pf); 350 if (pkind < 0) { 351 dev_err(rvu->dev, 352 "PF_Func 0x%x: Invalid pkind\n", pcifunc); 353 return -EINVAL; 354 } 355 pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0); 356 pfvf->tx_chan_base = pfvf->rx_chan_base; 357 pfvf->rx_chan_cnt = 1; 358 pfvf->tx_chan_cnt = 1; 359 rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id; 360 361 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); 362 rvu_npc_set_pkind(rvu, pkind, pfvf); 363 364 break; 365 case NIX_INTF_TYPE_LBK: 366 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 367 368 /* If NIX1 block is present on the silicon then NIXes are 369 * assigned alternatively for lbk interfaces. NIX0 should 370 * send packets on lbk link 1 channels and NIX1 should send 371 * on lbk link 0 channels for the communication between 372 * NIX0 and NIX1. 373 */ 374 lbkid = 0; 375 if (rvu->hw->lbk_links > 1) 376 lbkid = vf & 0x1 ? 0 : 1; 377 378 /* By default NIX0 is configured to send packet on lbk link 1 379 * (which corresponds to LBK1), same packet will receive on 380 * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0 381 * (which corresponds to LBK2) packet will receive on NIX0 lbk 382 * link 1. 383 * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0 384 * transmits and receives on lbk link 0, whick corresponds 385 * to LBK1 block, back to back connectivity between NIX and 386 * LBK can be achieved (which is similar to 96xx) 387 * 388 * RX TX 389 * NIX0 lbk link 1 (LBK2) 1 (LBK1) 390 * NIX0 lbk link 0 (LBK0) 0 (LBK0) 391 * NIX1 lbk link 0 (LBK1) 0 (LBK2) 392 * NIX1 lbk link 1 (LBK3) 1 (LBK3) 393 */ 394 if (loop) 395 lbkid = !lbkid; 396 397 /* Note that AF's VFs work in pairs and talk over consecutive 398 * loopback channels.Therefore if odd number of AF VFs are 399 * enabled then the last VF remains with no pair. 400 */ 401 pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf); 402 pfvf->tx_chan_base = vf & 0x1 ? 403 rvu_nix_chan_lbk(rvu, lbkid, vf - 1) : 404 rvu_nix_chan_lbk(rvu, lbkid, vf + 1); 405 pfvf->rx_chan_cnt = 1; 406 pfvf->tx_chan_cnt = 1; 407 rsp->tx_link = hw->cgx_links + lbkid; 408 pfvf->lbkid = lbkid; 409 rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf); 410 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 411 pfvf->rx_chan_base, 412 pfvf->rx_chan_cnt); 413 414 break; 415 case NIX_INTF_TYPE_SDP: 416 from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); 417 parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; 418 sdp_info = parent_pf->sdp_info; 419 if (!sdp_info) { 420 dev_err(rvu->dev, "Invalid sdp_info pointer\n"); 421 return -EINVAL; 422 } 423 if (from_vf) { 424 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn + 425 sdp_info->num_pf_rings; 426 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 427 for (vfid = 0; vfid < vf; vfid++) 428 req_chan_base += sdp_info->vf_rings[vfid]; 429 req_chan_cnt = sdp_info->vf_rings[vf]; 430 req_chan_end = req_chan_base + req_chan_cnt - 1; 431 if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) || 432 req_chan_end > rvu_nix_chan_sdp(rvu, 255)) { 433 dev_err(rvu->dev, 434 "PF_Func 0x%x: Invalid channel base and count\n", 435 pcifunc); 436 return -EINVAL; 437 } 438 } else { 439 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn; 440 req_chan_cnt = sdp_info->num_pf_rings; 441 } 442 443 pfvf->rx_chan_base = req_chan_base; 444 pfvf->rx_chan_cnt = req_chan_cnt; 445 pfvf->tx_chan_base = pfvf->rx_chan_base; 446 pfvf->tx_chan_cnt = pfvf->rx_chan_cnt; 447 448 rsp->tx_link = hw->cgx_links + hw->lbk_links; 449 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 450 pfvf->rx_chan_base, 451 pfvf->rx_chan_cnt); 452 break; 453 } 454 455 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached 456 * RVU PF/VF's MAC address. 457 */ 458 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 459 pfvf->rx_chan_base, pfvf->mac_addr); 460 461 /* Add this PF_FUNC to bcast pkt replication list */ 462 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true); 463 if (err) { 464 dev_err(rvu->dev, 465 "Bcast list, failed to enable PF_FUNC 0x%x\n", 466 pcifunc); 467 return err; 468 } 469 /* Install MCAM rule matching Ethernet broadcast mac address */ 470 rvu_npc_install_bcast_match_entry(rvu, pcifunc, 471 nixlf, pfvf->rx_chan_base); 472 473 pfvf->maxlen = NIC_HW_MIN_FRS; 474 pfvf->minlen = NIC_HW_MIN_FRS; 475 476 return 0; 477 } 478 479 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) 480 { 481 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 482 int err; 483 484 pfvf->maxlen = 0; 485 pfvf->minlen = 0; 486 487 /* Remove this PF_FUNC from bcast pkt replication list */ 488 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false); 489 if (err) { 490 dev_err(rvu->dev, 491 "Bcast list, failed to disable PF_FUNC 0x%x\n", 492 pcifunc); 493 } 494 495 /* Free and disable any MCAM entries used by this NIX LF */ 496 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 497 498 /* Disable DMAC filters used */ 499 rvu_cgx_disable_dmac_entries(rvu, pcifunc); 500 } 501 502 #define NIX_BPIDS_PER_LMAC 8 503 #define NIX_BPIDS_PER_CPT 1 504 static int nix_setup_bpids(struct rvu *rvu, struct nix_hw *hw, int blkaddr) 505 { 506 struct nix_bp *bp = &hw->bp; 507 int err, max_bpids; 508 u64 cfg; 509 510 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 511 max_bpids = FIELD_GET(NIX_CONST_MAX_BPIDS, cfg); 512 513 /* Reserve the BPIds for CGX and SDP */ 514 bp->cgx_bpid_cnt = rvu->hw->cgx_links * NIX_BPIDS_PER_LMAC; 515 bp->sdp_bpid_cnt = rvu->hw->sdp_links * FIELD_GET(NIX_CONST_SDP_CHANS, cfg); 516 bp->free_pool_base = bp->cgx_bpid_cnt + bp->sdp_bpid_cnt + 517 NIX_BPIDS_PER_CPT; 518 bp->bpids.max = max_bpids - bp->free_pool_base; 519 520 err = rvu_alloc_bitmap(&bp->bpids); 521 if (err) 522 return err; 523 524 bp->fn_map = devm_kcalloc(rvu->dev, bp->bpids.max, 525 sizeof(u16), GFP_KERNEL); 526 if (!bp->fn_map) 527 return -ENOMEM; 528 529 bp->intf_map = devm_kcalloc(rvu->dev, bp->bpids.max, 530 sizeof(u8), GFP_KERNEL); 531 if (!bp->intf_map) 532 return -ENOMEM; 533 534 bp->ref_cnt = devm_kcalloc(rvu->dev, bp->bpids.max, 535 sizeof(u8), GFP_KERNEL); 536 if (!bp->ref_cnt) 537 return -ENOMEM; 538 539 return 0; 540 } 541 542 void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc) 543 { 544 int blkaddr, bpid, err; 545 struct nix_hw *nix_hw; 546 struct nix_bp *bp; 547 548 if (!is_lbk_vf(rvu, pcifunc)) 549 return; 550 551 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 552 if (err) 553 return; 554 555 bp = &nix_hw->bp; 556 557 mutex_lock(&rvu->rsrc_lock); 558 for (bpid = 0; bpid < bp->bpids.max; bpid++) { 559 if (bp->fn_map[bpid] == pcifunc) { 560 bp->ref_cnt[bpid]--; 561 if (bp->ref_cnt[bpid]) 562 continue; 563 rvu_free_rsrc(&bp->bpids, bpid); 564 bp->fn_map[bpid] = 0; 565 } 566 } 567 mutex_unlock(&rvu->rsrc_lock); 568 } 569 570 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, 571 struct nix_bp_cfg_req *req, 572 struct msg_rsp *rsp) 573 { 574 u16 pcifunc = req->hdr.pcifunc; 575 int blkaddr, pf, type, err; 576 u16 chan_base, chan, bpid; 577 struct rvu_pfvf *pfvf; 578 struct nix_hw *nix_hw; 579 struct nix_bp *bp; 580 u64 cfg; 581 582 pf = rvu_get_pf(pcifunc); 583 type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 584 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 585 return 0; 586 587 pfvf = rvu_get_pfvf(rvu, pcifunc); 588 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 589 if (err) 590 return err; 591 592 bp = &nix_hw->bp; 593 chan_base = pfvf->rx_chan_base + req->chan_base; 594 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 595 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 596 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 597 cfg & ~BIT_ULL(16)); 598 599 if (type == NIX_INTF_TYPE_LBK) { 600 bpid = cfg & GENMASK(8, 0); 601 mutex_lock(&rvu->rsrc_lock); 602 rvu_free_rsrc(&bp->bpids, bpid - bp->free_pool_base); 603 for (bpid = 0; bpid < bp->bpids.max; bpid++) { 604 if (bp->fn_map[bpid] == pcifunc) { 605 bp->fn_map[bpid] = 0; 606 bp->ref_cnt[bpid] = 0; 607 } 608 } 609 mutex_unlock(&rvu->rsrc_lock); 610 } 611 } 612 return 0; 613 } 614 615 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 616 int type, int chan_id) 617 { 618 int bpid, blkaddr, sdp_chan_base, err; 619 struct rvu_hwinfo *hw = rvu->hw; 620 struct rvu_pfvf *pfvf; 621 struct nix_hw *nix_hw; 622 u8 cgx_id, lmac_id; 623 struct nix_bp *bp; 624 625 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 626 627 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); 628 if (err) 629 return err; 630 631 bp = &nix_hw->bp; 632 633 /* Backpressure IDs range division 634 * CGX channles are mapped to (0 - 191) BPIDs 635 * LBK channles are mapped to (192 - 255) BPIDs 636 * SDP channles are mapped to (256 - 511) BPIDs 637 * 638 * Lmac channles and bpids mapped as follows 639 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15) 640 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) .... 641 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) .... 642 */ 643 switch (type) { 644 case NIX_INTF_TYPE_CGX: 645 if ((req->chan_base + req->chan_cnt) > NIX_BPIDS_PER_LMAC) 646 return NIX_AF_ERR_INVALID_BPID_REQ; 647 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 648 /* Assign bpid based on cgx, lmac and chan id */ 649 bpid = (cgx_id * hw->lmac_per_cgx * NIX_BPIDS_PER_LMAC) + 650 (lmac_id * NIX_BPIDS_PER_LMAC) + req->chan_base; 651 652 if (req->bpid_per_chan) 653 bpid += chan_id; 654 if (bpid > bp->cgx_bpid_cnt) 655 return NIX_AF_ERR_INVALID_BPID; 656 break; 657 658 case NIX_INTF_TYPE_LBK: 659 /* Alloc bpid from the free pool */ 660 mutex_lock(&rvu->rsrc_lock); 661 bpid = rvu_alloc_rsrc(&bp->bpids); 662 if (bpid < 0) { 663 mutex_unlock(&rvu->rsrc_lock); 664 return NIX_AF_ERR_INVALID_BPID; 665 } 666 bp->fn_map[bpid] = req->hdr.pcifunc; 667 bp->ref_cnt[bpid]++; 668 bpid += bp->free_pool_base; 669 mutex_unlock(&rvu->rsrc_lock); 670 break; 671 case NIX_INTF_TYPE_SDP: 672 if ((req->chan_base + req->chan_cnt) > bp->sdp_bpid_cnt) 673 return NIX_AF_ERR_INVALID_BPID_REQ; 674 675 /* Handle usecase of 2 SDP blocks */ 676 if (!hw->cap.programmable_chans) 677 sdp_chan_base = pfvf->rx_chan_base - NIX_CHAN_SDP_CH_START; 678 else 679 sdp_chan_base = pfvf->rx_chan_base - hw->sdp_chan_base; 680 681 bpid = bp->cgx_bpid_cnt + req->chan_base + sdp_chan_base; 682 if (req->bpid_per_chan) 683 bpid += chan_id; 684 685 if (bpid > (bp->cgx_bpid_cnt + bp->sdp_bpid_cnt)) 686 return NIX_AF_ERR_INVALID_BPID; 687 break; 688 default: 689 return -EINVAL; 690 } 691 return bpid; 692 } 693 694 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, 695 struct nix_bp_cfg_req *req, 696 struct nix_bp_cfg_rsp *rsp) 697 { 698 int blkaddr, pf, type, chan_id = 0; 699 u16 pcifunc = req->hdr.pcifunc; 700 struct rvu_pfvf *pfvf; 701 u16 chan_base, chan; 702 s16 bpid, bpid_base; 703 u64 cfg; 704 705 pf = rvu_get_pf(pcifunc); 706 type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 707 if (is_sdp_pfvf(pcifunc)) 708 type = NIX_INTF_TYPE_SDP; 709 710 /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */ 711 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && 712 type != NIX_INTF_TYPE_SDP) 713 return 0; 714 715 pfvf = rvu_get_pfvf(rvu, pcifunc); 716 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 717 718 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id); 719 chan_base = pfvf->rx_chan_base + req->chan_base; 720 bpid = bpid_base; 721 722 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 723 if (bpid < 0) { 724 dev_warn(rvu->dev, "Fail to enable backpressure\n"); 725 return -EINVAL; 726 } 727 728 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 729 cfg &= ~GENMASK_ULL(8, 0); 730 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 731 cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16)); 732 chan_id++; 733 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); 734 } 735 736 for (chan = 0; chan < req->chan_cnt; chan++) { 737 /* Map channel and bpid assign to it */ 738 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 | 739 (bpid_base & 0x3FF); 740 if (req->bpid_per_chan) 741 bpid_base++; 742 } 743 rsp->chan_cnt = req->chan_cnt; 744 745 return 0; 746 } 747 748 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, 749 u64 format, bool v4, u64 *fidx) 750 { 751 struct nix_lso_format field = {0}; 752 753 /* IP's Length field */ 754 field.layer = NIX_TXLAYER_OL3; 755 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */ 756 field.offset = v4 ? 2 : 4; 757 field.sizem1 = 1; /* i.e 2 bytes */ 758 field.alg = NIX_LSOALG_ADD_PAYLEN; 759 rvu_write64(rvu, blkaddr, 760 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 761 *(u64 *)&field); 762 763 /* No ID field in IPv6 header */ 764 if (!v4) 765 return; 766 767 /* IP's ID field */ 768 field.layer = NIX_TXLAYER_OL3; 769 field.offset = 4; 770 field.sizem1 = 1; /* i.e 2 bytes */ 771 field.alg = NIX_LSOALG_ADD_SEGNUM; 772 rvu_write64(rvu, blkaddr, 773 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 774 *(u64 *)&field); 775 } 776 777 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr, 778 u64 format, u64 *fidx) 779 { 780 struct nix_lso_format field = {0}; 781 782 /* TCP's sequence number field */ 783 field.layer = NIX_TXLAYER_OL4; 784 field.offset = 4; 785 field.sizem1 = 3; /* i.e 4 bytes */ 786 field.alg = NIX_LSOALG_ADD_OFFSET; 787 rvu_write64(rvu, blkaddr, 788 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 789 *(u64 *)&field); 790 791 /* TCP's flags field */ 792 field.layer = NIX_TXLAYER_OL4; 793 field.offset = 12; 794 field.sizem1 = 1; /* 2 bytes */ 795 field.alg = NIX_LSOALG_TCP_FLAGS; 796 rvu_write64(rvu, blkaddr, 797 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 798 *(u64 *)&field); 799 } 800 801 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 802 { 803 u64 cfg, idx, fidx = 0; 804 805 /* Get max HW supported format indices */ 806 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF; 807 nix_hw->lso.total = cfg; 808 809 /* Enable LSO */ 810 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG); 811 /* For TSO, set first and middle segment flags to 812 * mask out PSH, RST & FIN flags in TCP packet 813 */ 814 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16)); 815 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16); 816 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63)); 817 818 /* Setup default static LSO formats 819 * 820 * Configure format fields for TCPv4 segmentation offload 821 */ 822 idx = NIX_LSO_FORMAT_IDX_TSOV4; 823 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx); 824 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 825 826 /* Set rest of the fields to NOP */ 827 for (; fidx < 8; fidx++) { 828 rvu_write64(rvu, blkaddr, 829 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 830 } 831 nix_hw->lso.in_use++; 832 833 /* Configure format fields for TCPv6 segmentation offload */ 834 idx = NIX_LSO_FORMAT_IDX_TSOV6; 835 fidx = 0; 836 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx); 837 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 838 839 /* Set rest of the fields to NOP */ 840 for (; fidx < 8; fidx++) { 841 rvu_write64(rvu, blkaddr, 842 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 843 } 844 nix_hw->lso.in_use++; 845 } 846 847 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) 848 { 849 kfree(pfvf->rq_bmap); 850 kfree(pfvf->sq_bmap); 851 kfree(pfvf->cq_bmap); 852 if (pfvf->rq_ctx) 853 qmem_free(rvu->dev, pfvf->rq_ctx); 854 if (pfvf->sq_ctx) 855 qmem_free(rvu->dev, pfvf->sq_ctx); 856 if (pfvf->cq_ctx) 857 qmem_free(rvu->dev, pfvf->cq_ctx); 858 if (pfvf->rss_ctx) 859 qmem_free(rvu->dev, pfvf->rss_ctx); 860 if (pfvf->nix_qints_ctx) 861 qmem_free(rvu->dev, pfvf->nix_qints_ctx); 862 if (pfvf->cq_ints_ctx) 863 qmem_free(rvu->dev, pfvf->cq_ints_ctx); 864 865 pfvf->rq_bmap = NULL; 866 pfvf->cq_bmap = NULL; 867 pfvf->sq_bmap = NULL; 868 pfvf->rq_ctx = NULL; 869 pfvf->sq_ctx = NULL; 870 pfvf->cq_ctx = NULL; 871 pfvf->rss_ctx = NULL; 872 pfvf->nix_qints_ctx = NULL; 873 pfvf->cq_ints_ctx = NULL; 874 } 875 876 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, 877 struct rvu_pfvf *pfvf, int nixlf, 878 int rss_sz, int rss_grps, int hwctx_size, 879 u64 way_mask, bool tag_lsb_as_adder) 880 { 881 int err, grp, num_indices; 882 u64 val; 883 884 /* RSS is not requested for this NIXLF */ 885 if (!rss_sz) 886 return 0; 887 num_indices = rss_sz * rss_grps; 888 889 /* Alloc NIX RSS HW context memory and config the base */ 890 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size); 891 if (err) 892 return err; 893 894 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf), 895 (u64)pfvf->rss_ctx->iova); 896 897 /* Config full RSS table size, enable RSS and caching */ 898 val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 | 899 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE); 900 901 if (tag_lsb_as_adder) 902 val |= BIT_ULL(5); 903 904 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val); 905 /* Config RSS group offset and sizes */ 906 for (grp = 0; grp < rss_grps; grp++) 907 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp), 908 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp)); 909 return 0; 910 } 911 912 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, 913 struct nix_aq_inst_s *inst) 914 { 915 struct admin_queue *aq = block->aq; 916 struct nix_aq_res_s *result; 917 int timeout = 1000; 918 u64 reg, head; 919 int ret; 920 921 result = (struct nix_aq_res_s *)aq->res->base; 922 923 /* Get current head pointer where to append this instruction */ 924 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS); 925 head = (reg >> 4) & AQ_PTR_MASK; 926 927 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)), 928 (void *)inst, aq->inst->entry_sz); 929 memset(result, 0, sizeof(*result)); 930 /* sync into memory */ 931 wmb(); 932 933 /* Ring the doorbell and wait for result */ 934 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1); 935 while (result->compcode == NIX_AQ_COMP_NOTDONE) { 936 cpu_relax(); 937 udelay(1); 938 timeout--; 939 if (!timeout) 940 return -EBUSY; 941 } 942 943 if (result->compcode != NIX_AQ_COMP_GOOD) { 944 /* TODO: Replace this with some error code */ 945 if (result->compcode == NIX_AQ_COMP_CTX_FAULT || 946 result->compcode == NIX_AQ_COMP_LOCKERR || 947 result->compcode == NIX_AQ_COMP_CTX_POISON) { 948 ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX); 949 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX); 950 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX); 951 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX); 952 if (ret) 953 dev_err(rvu->dev, 954 "%s: Not able to unlock cachelines\n", __func__); 955 } 956 957 return -EBUSY; 958 } 959 960 return 0; 961 } 962 963 static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req, 964 u16 *smq, u16 *smq_mask) 965 { 966 struct nix_cn10k_aq_enq_req *aq_req; 967 968 if (!is_rvu_otx2(rvu)) { 969 aq_req = (struct nix_cn10k_aq_enq_req *)req; 970 *smq = aq_req->sq.smq; 971 *smq_mask = aq_req->sq_mask.smq; 972 } else { 973 *smq = req->sq.smq; 974 *smq_mask = req->sq_mask.smq; 975 } 976 } 977 978 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, 979 struct nix_aq_enq_req *req, 980 struct nix_aq_enq_rsp *rsp) 981 { 982 struct rvu_hwinfo *hw = rvu->hw; 983 u16 pcifunc = req->hdr.pcifunc; 984 int nixlf, blkaddr, rc = 0; 985 struct nix_aq_inst_s inst; 986 struct rvu_block *block; 987 struct admin_queue *aq; 988 struct rvu_pfvf *pfvf; 989 u16 smq, smq_mask; 990 void *ctx, *mask; 991 bool ena; 992 u64 cfg; 993 994 blkaddr = nix_hw->blkaddr; 995 block = &hw->block[blkaddr]; 996 aq = block->aq; 997 if (!aq) { 998 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__); 999 return NIX_AF_ERR_AQ_ENQUEUE; 1000 } 1001 1002 pfvf = rvu_get_pfvf(rvu, pcifunc); 1003 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1004 1005 /* Skip NIXLF check for broadcast MCE entry and bandwidth profile 1006 * operations done by AF itself. 1007 */ 1008 if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) || 1009 (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) { 1010 if (!pfvf->nixlf || nixlf < 0) 1011 return NIX_AF_ERR_AF_LF_INVALID; 1012 } 1013 1014 switch (req->ctype) { 1015 case NIX_AQ_CTYPE_RQ: 1016 /* Check if index exceeds max no of queues */ 1017 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize) 1018 rc = NIX_AF_ERR_AQ_ENQUEUE; 1019 break; 1020 case NIX_AQ_CTYPE_SQ: 1021 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize) 1022 rc = NIX_AF_ERR_AQ_ENQUEUE; 1023 break; 1024 case NIX_AQ_CTYPE_CQ: 1025 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize) 1026 rc = NIX_AF_ERR_AQ_ENQUEUE; 1027 break; 1028 case NIX_AQ_CTYPE_RSS: 1029 /* Check if RSS is enabled and qidx is within range */ 1030 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf)); 1031 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx || 1032 (req->qidx >= (256UL << (cfg & 0xF)))) 1033 rc = NIX_AF_ERR_AQ_ENQUEUE; 1034 break; 1035 case NIX_AQ_CTYPE_MCE: 1036 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG); 1037 1038 /* Check if index exceeds MCE list length */ 1039 if (!nix_hw->mcast.mce_ctx || 1040 (req->qidx >= (256UL << (cfg & 0xF)))) 1041 rc = NIX_AF_ERR_AQ_ENQUEUE; 1042 1043 /* Adding multicast lists for requests from PF/VFs is not 1044 * yet supported, so ignore this. 1045 */ 1046 if (rsp) 1047 rc = NIX_AF_ERR_AQ_ENQUEUE; 1048 break; 1049 case NIX_AQ_CTYPE_BANDPROF: 1050 if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req, 1051 nix_hw, pcifunc)) 1052 rc = NIX_AF_ERR_INVALID_BANDPROF; 1053 break; 1054 default: 1055 rc = NIX_AF_ERR_AQ_ENQUEUE; 1056 } 1057 1058 if (rc) 1059 return rc; 1060 1061 nix_get_aq_req_smq(rvu, req, &smq, &smq_mask); 1062 /* Check if SQ pointed SMQ belongs to this PF/VF or not */ 1063 if (req->ctype == NIX_AQ_CTYPE_SQ && 1064 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) || 1065 (req->op == NIX_AQ_INSTOP_WRITE && 1066 req->sq_mask.ena && req->sq.ena && smq_mask))) { 1067 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ, 1068 pcifunc, smq)) 1069 return NIX_AF_ERR_AQ_ENQUEUE; 1070 } 1071 1072 memset(&inst, 0, sizeof(struct nix_aq_inst_s)); 1073 inst.lf = nixlf; 1074 inst.cindex = req->qidx; 1075 inst.ctype = req->ctype; 1076 inst.op = req->op; 1077 /* Currently we are not supporting enqueuing multiple instructions, 1078 * so always choose first entry in result memory. 1079 */ 1080 inst.res_addr = (u64)aq->res->iova; 1081 1082 /* Hardware uses same aq->res->base for updating result of 1083 * previous instruction hence wait here till it is done. 1084 */ 1085 spin_lock(&aq->lock); 1086 1087 /* Clean result + context memory */ 1088 memset(aq->res->base, 0, aq->res->entry_sz); 1089 /* Context needs to be written at RES_ADDR + 128 */ 1090 ctx = aq->res->base + 128; 1091 /* Mask needs to be written at RES_ADDR + 256 */ 1092 mask = aq->res->base + 256; 1093 1094 switch (req->op) { 1095 case NIX_AQ_INSTOP_WRITE: 1096 if (req->ctype == NIX_AQ_CTYPE_RQ) 1097 memcpy(mask, &req->rq_mask, 1098 sizeof(struct nix_rq_ctx_s)); 1099 else if (req->ctype == NIX_AQ_CTYPE_SQ) 1100 memcpy(mask, &req->sq_mask, 1101 sizeof(struct nix_sq_ctx_s)); 1102 else if (req->ctype == NIX_AQ_CTYPE_CQ) 1103 memcpy(mask, &req->cq_mask, 1104 sizeof(struct nix_cq_ctx_s)); 1105 else if (req->ctype == NIX_AQ_CTYPE_RSS) 1106 memcpy(mask, &req->rss_mask, 1107 sizeof(struct nix_rsse_s)); 1108 else if (req->ctype == NIX_AQ_CTYPE_MCE) 1109 memcpy(mask, &req->mce_mask, 1110 sizeof(struct nix_rx_mce_s)); 1111 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 1112 memcpy(mask, &req->prof_mask, 1113 sizeof(struct nix_bandprof_s)); 1114 fallthrough; 1115 case NIX_AQ_INSTOP_INIT: 1116 if (req->ctype == NIX_AQ_CTYPE_RQ) 1117 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); 1118 else if (req->ctype == NIX_AQ_CTYPE_SQ) 1119 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s)); 1120 else if (req->ctype == NIX_AQ_CTYPE_CQ) 1121 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s)); 1122 else if (req->ctype == NIX_AQ_CTYPE_RSS) 1123 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s)); 1124 else if (req->ctype == NIX_AQ_CTYPE_MCE) 1125 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s)); 1126 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 1127 memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s)); 1128 break; 1129 case NIX_AQ_INSTOP_NOP: 1130 case NIX_AQ_INSTOP_READ: 1131 case NIX_AQ_INSTOP_LOCK: 1132 case NIX_AQ_INSTOP_UNLOCK: 1133 break; 1134 default: 1135 rc = NIX_AF_ERR_AQ_ENQUEUE; 1136 spin_unlock(&aq->lock); 1137 return rc; 1138 } 1139 1140 /* Submit the instruction to AQ */ 1141 rc = nix_aq_enqueue_wait(rvu, block, &inst); 1142 if (rc) { 1143 spin_unlock(&aq->lock); 1144 return rc; 1145 } 1146 1147 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */ 1148 if (req->op == NIX_AQ_INSTOP_INIT) { 1149 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena) 1150 __set_bit(req->qidx, pfvf->rq_bmap); 1151 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena) 1152 __set_bit(req->qidx, pfvf->sq_bmap); 1153 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena) 1154 __set_bit(req->qidx, pfvf->cq_bmap); 1155 } 1156 1157 if (req->op == NIX_AQ_INSTOP_WRITE) { 1158 if (req->ctype == NIX_AQ_CTYPE_RQ) { 1159 ena = (req->rq.ena & req->rq_mask.ena) | 1160 (test_bit(req->qidx, pfvf->rq_bmap) & 1161 ~req->rq_mask.ena); 1162 if (ena) 1163 __set_bit(req->qidx, pfvf->rq_bmap); 1164 else 1165 __clear_bit(req->qidx, pfvf->rq_bmap); 1166 } 1167 if (req->ctype == NIX_AQ_CTYPE_SQ) { 1168 ena = (req->rq.ena & req->sq_mask.ena) | 1169 (test_bit(req->qidx, pfvf->sq_bmap) & 1170 ~req->sq_mask.ena); 1171 if (ena) 1172 __set_bit(req->qidx, pfvf->sq_bmap); 1173 else 1174 __clear_bit(req->qidx, pfvf->sq_bmap); 1175 } 1176 if (req->ctype == NIX_AQ_CTYPE_CQ) { 1177 ena = (req->rq.ena & req->cq_mask.ena) | 1178 (test_bit(req->qidx, pfvf->cq_bmap) & 1179 ~req->cq_mask.ena); 1180 if (ena) 1181 __set_bit(req->qidx, pfvf->cq_bmap); 1182 else 1183 __clear_bit(req->qidx, pfvf->cq_bmap); 1184 } 1185 } 1186 1187 if (rsp) { 1188 /* Copy read context into mailbox */ 1189 if (req->op == NIX_AQ_INSTOP_READ) { 1190 if (req->ctype == NIX_AQ_CTYPE_RQ) 1191 memcpy(&rsp->rq, ctx, 1192 sizeof(struct nix_rq_ctx_s)); 1193 else if (req->ctype == NIX_AQ_CTYPE_SQ) 1194 memcpy(&rsp->sq, ctx, 1195 sizeof(struct nix_sq_ctx_s)); 1196 else if (req->ctype == NIX_AQ_CTYPE_CQ) 1197 memcpy(&rsp->cq, ctx, 1198 sizeof(struct nix_cq_ctx_s)); 1199 else if (req->ctype == NIX_AQ_CTYPE_RSS) 1200 memcpy(&rsp->rss, ctx, 1201 sizeof(struct nix_rsse_s)); 1202 else if (req->ctype == NIX_AQ_CTYPE_MCE) 1203 memcpy(&rsp->mce, ctx, 1204 sizeof(struct nix_rx_mce_s)); 1205 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 1206 memcpy(&rsp->prof, ctx, 1207 sizeof(struct nix_bandprof_s)); 1208 } 1209 } 1210 1211 spin_unlock(&aq->lock); 1212 return 0; 1213 } 1214 1215 static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw, 1216 struct nix_aq_enq_req *req, u8 ctype) 1217 { 1218 struct nix_cn10k_aq_enq_req aq_req; 1219 struct nix_cn10k_aq_enq_rsp aq_rsp; 1220 int rc, word; 1221 1222 if (req->ctype != NIX_AQ_CTYPE_CQ) 1223 return 0; 1224 1225 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 1226 req->hdr.pcifunc, ctype, req->qidx); 1227 if (rc) { 1228 dev_err(rvu->dev, 1229 "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n", 1230 __func__, nix_get_ctx_name(ctype), req->qidx, 1231 req->hdr.pcifunc); 1232 return rc; 1233 } 1234 1235 /* Make copy of original context & mask which are required 1236 * for resubmission 1237 */ 1238 memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s)); 1239 memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s)); 1240 1241 /* exclude fields which HW can update */ 1242 aq_req.cq_mask.cq_err = 0; 1243 aq_req.cq_mask.wrptr = 0; 1244 aq_req.cq_mask.tail = 0; 1245 aq_req.cq_mask.head = 0; 1246 aq_req.cq_mask.avg_level = 0; 1247 aq_req.cq_mask.update_time = 0; 1248 aq_req.cq_mask.substream = 0; 1249 1250 /* Context mask (cq_mask) holds mask value of fields which 1251 * are changed in AQ WRITE operation. 1252 * for example cq.drop = 0xa; 1253 * cq_mask.drop = 0xff; 1254 * Below logic performs '&' between cq and cq_mask so that non 1255 * updated fields are masked out for request and response 1256 * comparison 1257 */ 1258 for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64); 1259 word++) { 1260 *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &= 1261 (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); 1262 *(u64 *)((u8 *)&aq_req.cq + word * 8) &= 1263 (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); 1264 } 1265 1266 if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s))) 1267 return NIX_AF_ERR_AQ_CTX_RETRY_WRITE; 1268 1269 return 0; 1270 } 1271 1272 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, 1273 struct nix_aq_enq_rsp *rsp) 1274 { 1275 struct nix_hw *nix_hw; 1276 int err, retries = 5; 1277 int blkaddr; 1278 1279 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); 1280 if (blkaddr < 0) 1281 return NIX_AF_ERR_AF_LF_INVALID; 1282 1283 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1284 if (!nix_hw) 1285 return NIX_AF_ERR_INVALID_NIXBLK; 1286 1287 retry: 1288 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); 1289 1290 /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic' 1291 * As a work around perfrom CQ context read after each AQ write. If AQ 1292 * read shows AQ write is not updated perform AQ write again. 1293 */ 1294 if (!err && req->op == NIX_AQ_INSTOP_WRITE) { 1295 err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ); 1296 if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) { 1297 if (retries--) 1298 goto retry; 1299 else 1300 return NIX_AF_ERR_CQ_CTX_WRITE_ERR; 1301 } 1302 } 1303 1304 return err; 1305 } 1306 1307 static const char *nix_get_ctx_name(int ctype) 1308 { 1309 switch (ctype) { 1310 case NIX_AQ_CTYPE_CQ: 1311 return "CQ"; 1312 case NIX_AQ_CTYPE_SQ: 1313 return "SQ"; 1314 case NIX_AQ_CTYPE_RQ: 1315 return "RQ"; 1316 case NIX_AQ_CTYPE_RSS: 1317 return "RSS"; 1318 } 1319 return ""; 1320 } 1321 1322 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) 1323 { 1324 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 1325 struct nix_aq_enq_req aq_req; 1326 unsigned long *bmap; 1327 int qidx, q_cnt = 0; 1328 int err = 0, rc; 1329 1330 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx) 1331 return NIX_AF_ERR_AQ_ENQUEUE; 1332 1333 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); 1334 aq_req.hdr.pcifunc = req->hdr.pcifunc; 1335 1336 if (req->ctype == NIX_AQ_CTYPE_CQ) { 1337 aq_req.cq.ena = 0; 1338 aq_req.cq_mask.ena = 1; 1339 aq_req.cq.bp_ena = 0; 1340 aq_req.cq_mask.bp_ena = 1; 1341 q_cnt = pfvf->cq_ctx->qsize; 1342 bmap = pfvf->cq_bmap; 1343 } 1344 if (req->ctype == NIX_AQ_CTYPE_SQ) { 1345 aq_req.sq.ena = 0; 1346 aq_req.sq_mask.ena = 1; 1347 q_cnt = pfvf->sq_ctx->qsize; 1348 bmap = pfvf->sq_bmap; 1349 } 1350 if (req->ctype == NIX_AQ_CTYPE_RQ) { 1351 aq_req.rq.ena = 0; 1352 aq_req.rq_mask.ena = 1; 1353 q_cnt = pfvf->rq_ctx->qsize; 1354 bmap = pfvf->rq_bmap; 1355 } 1356 1357 aq_req.ctype = req->ctype; 1358 aq_req.op = NIX_AQ_INSTOP_WRITE; 1359 1360 for (qidx = 0; qidx < q_cnt; qidx++) { 1361 if (!test_bit(qidx, bmap)) 1362 continue; 1363 aq_req.qidx = qidx; 1364 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL); 1365 if (rc) { 1366 err = rc; 1367 dev_err(rvu->dev, "Failed to disable %s:%d context\n", 1368 nix_get_ctx_name(req->ctype), qidx); 1369 } 1370 } 1371 1372 return err; 1373 } 1374 1375 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 1376 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req) 1377 { 1378 struct nix_aq_enq_req lock_ctx_req; 1379 int err; 1380 1381 if (req->op != NIX_AQ_INSTOP_INIT) 1382 return 0; 1383 1384 if (req->ctype == NIX_AQ_CTYPE_MCE || 1385 req->ctype == NIX_AQ_CTYPE_DYNO) 1386 return 0; 1387 1388 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req)); 1389 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc; 1390 lock_ctx_req.ctype = req->ctype; 1391 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK; 1392 lock_ctx_req.qidx = req->qidx; 1393 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL); 1394 if (err) 1395 dev_err(rvu->dev, 1396 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n", 1397 req->hdr.pcifunc, 1398 nix_get_ctx_name(req->ctype), req->qidx); 1399 return err; 1400 } 1401 1402 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 1403 struct nix_aq_enq_req *req, 1404 struct nix_aq_enq_rsp *rsp) 1405 { 1406 int err; 1407 1408 err = rvu_nix_aq_enq_inst(rvu, req, rsp); 1409 if (!err) 1410 err = nix_lf_hwctx_lockdown(rvu, req); 1411 return err; 1412 } 1413 #else 1414 1415 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 1416 struct nix_aq_enq_req *req, 1417 struct nix_aq_enq_rsp *rsp) 1418 { 1419 return rvu_nix_aq_enq_inst(rvu, req, rsp); 1420 } 1421 #endif 1422 /* CN10K mbox handler */ 1423 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu, 1424 struct nix_cn10k_aq_enq_req *req, 1425 struct nix_cn10k_aq_enq_rsp *rsp) 1426 { 1427 return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req, 1428 (struct nix_aq_enq_rsp *)rsp); 1429 } 1430 1431 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, 1432 struct hwctx_disable_req *req, 1433 struct msg_rsp *rsp) 1434 { 1435 return nix_lf_hwctx_disable(rvu, req); 1436 } 1437 1438 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, 1439 struct nix_lf_alloc_req *req, 1440 struct nix_lf_alloc_rsp *rsp) 1441 { 1442 int nixlf, qints, hwctx_size, intf, err, rc = 0; 1443 struct rvu_hwinfo *hw = rvu->hw; 1444 u16 pcifunc = req->hdr.pcifunc; 1445 struct rvu_block *block; 1446 struct rvu_pfvf *pfvf; 1447 u64 cfg, ctx_cfg; 1448 int blkaddr; 1449 1450 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt) 1451 return NIX_AF_ERR_PARAM; 1452 1453 if (req->way_mask) 1454 req->way_mask &= 0xFFFF; 1455 1456 pfvf = rvu_get_pfvf(rvu, pcifunc); 1457 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1458 if (!pfvf->nixlf || blkaddr < 0) 1459 return NIX_AF_ERR_AF_LF_INVALID; 1460 1461 block = &hw->block[blkaddr]; 1462 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1463 if (nixlf < 0) 1464 return NIX_AF_ERR_AF_LF_INVALID; 1465 1466 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */ 1467 if (req->npa_func) { 1468 /* If default, use 'this' NIXLF's PFFUNC */ 1469 if (req->npa_func == RVU_DEFAULT_PF_FUNC) 1470 req->npa_func = pcifunc; 1471 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA)) 1472 return NIX_AF_INVAL_NPA_PF_FUNC; 1473 } 1474 1475 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */ 1476 if (req->sso_func) { 1477 /* If default, use 'this' NIXLF's PFFUNC */ 1478 if (req->sso_func == RVU_DEFAULT_PF_FUNC) 1479 req->sso_func = pcifunc; 1480 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO)) 1481 return NIX_AF_INVAL_SSO_PF_FUNC; 1482 } 1483 1484 /* If RSS is being enabled, check if requested config is valid. 1485 * RSS table size should be power of two, otherwise 1486 * RSS_GRP::OFFSET + adder might go beyond that group or 1487 * won't be able to use entire table. 1488 */ 1489 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE || 1490 !is_power_of_2(req->rss_sz))) 1491 return NIX_AF_ERR_RSS_SIZE_INVALID; 1492 1493 if (req->rss_sz && 1494 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS)) 1495 return NIX_AF_ERR_RSS_GRPS_INVALID; 1496 1497 /* Reset this NIX LF */ 1498 err = rvu_lf_reset(rvu, block, nixlf); 1499 if (err) { 1500 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1501 block->addr - BLKADDR_NIX0, nixlf); 1502 return NIX_AF_ERR_LF_RESET; 1503 } 1504 1505 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3); 1506 1507 /* Alloc NIX RQ HW context memory and config the base */ 1508 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF); 1509 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size); 1510 if (err) 1511 goto free_mem; 1512 1513 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL); 1514 if (!pfvf->rq_bmap) 1515 goto free_mem; 1516 1517 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf), 1518 (u64)pfvf->rq_ctx->iova); 1519 1520 /* Set caching and queue count in HW */ 1521 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20; 1522 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg); 1523 1524 /* Alloc NIX SQ HW context memory and config the base */ 1525 hwctx_size = 1UL << (ctx_cfg & 0xF); 1526 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size); 1527 if (err) 1528 goto free_mem; 1529 1530 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL); 1531 if (!pfvf->sq_bmap) 1532 goto free_mem; 1533 1534 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf), 1535 (u64)pfvf->sq_ctx->iova); 1536 1537 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20; 1538 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg); 1539 1540 /* Alloc NIX CQ HW context memory and config the base */ 1541 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF); 1542 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size); 1543 if (err) 1544 goto free_mem; 1545 1546 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL); 1547 if (!pfvf->cq_bmap) 1548 goto free_mem; 1549 1550 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf), 1551 (u64)pfvf->cq_ctx->iova); 1552 1553 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20; 1554 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg); 1555 1556 /* Initialize receive side scaling (RSS) */ 1557 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF); 1558 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz, 1559 req->rss_grps, hwctx_size, req->way_mask, 1560 !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER)); 1561 if (err) 1562 goto free_mem; 1563 1564 /* Alloc memory for CQINT's HW contexts */ 1565 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1566 qints = (cfg >> 24) & 0xFFF; 1567 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF); 1568 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size); 1569 if (err) 1570 goto free_mem; 1571 1572 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf), 1573 (u64)pfvf->cq_ints_ctx->iova); 1574 1575 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), 1576 BIT_ULL(36) | req->way_mask << 20); 1577 1578 /* Alloc memory for QINT's HW contexts */ 1579 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1580 qints = (cfg >> 12) & 0xFFF; 1581 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF); 1582 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size); 1583 if (err) 1584 goto free_mem; 1585 1586 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf), 1587 (u64)pfvf->nix_qints_ctx->iova); 1588 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), 1589 BIT_ULL(36) | req->way_mask << 20); 1590 1591 /* Setup VLANX TPID's. 1592 * Use VLAN1 for 802.1Q 1593 * and VLAN0 for 802.1AD. 1594 */ 1595 cfg = (0x8100ULL << 16) | 0x88A8ULL; 1596 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 1597 1598 /* Enable LMTST for this NIX LF */ 1599 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0)); 1600 1601 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */ 1602 if (req->npa_func) 1603 cfg = req->npa_func; 1604 if (req->sso_func) 1605 cfg |= (u64)req->sso_func << 16; 1606 1607 cfg |= (u64)req->xqe_sz << 33; 1608 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg); 1609 1610 /* Config Rx pkt length, csum checks and apad enable / disable */ 1611 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg); 1612 1613 /* Configure pkind for TX parse config */ 1614 cfg = NPC_TX_DEF_PKIND; 1615 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg); 1616 1617 intf = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 1618 if (is_sdp_pfvf(pcifunc)) 1619 intf = NIX_INTF_TYPE_SDP; 1620 1621 err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp, 1622 !!(req->flags & NIX_LF_LBK_BLK_SEL)); 1623 if (err) 1624 goto free_mem; 1625 1626 /* Disable NPC entries as NIXLF's contexts are not initialized yet */ 1627 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 1628 1629 /* Configure RX VTAG Type 7 (strip) for vf vlan */ 1630 rvu_write64(rvu, blkaddr, 1631 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7), 1632 VTAGSIZE_T4 | VTAG_STRIP); 1633 1634 goto exit; 1635 1636 free_mem: 1637 nix_ctx_free(rvu, pfvf); 1638 rc = -ENOMEM; 1639 1640 exit: 1641 /* Set macaddr of this PF/VF */ 1642 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 1643 1644 /* set SQB size info */ 1645 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST); 1646 rsp->sqb_size = (cfg >> 34) & 0xFFFF; 1647 rsp->rx_chan_base = pfvf->rx_chan_base; 1648 rsp->tx_chan_base = pfvf->tx_chan_base; 1649 rsp->rx_chan_cnt = pfvf->rx_chan_cnt; 1650 rsp->tx_chan_cnt = pfvf->tx_chan_cnt; 1651 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4; 1652 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6; 1653 /* Get HW supported stat count */ 1654 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 1655 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF); 1656 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF); 1657 /* Get count of CQ IRQs and error IRQs supported per LF */ 1658 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1659 rsp->qints = ((cfg >> 12) & 0xFFF); 1660 rsp->cints = ((cfg >> 24) & 0xFFF); 1661 rsp->cgx_links = hw->cgx_links; 1662 rsp->lbk_links = hw->lbk_links; 1663 rsp->sdp_links = hw->sdp_links; 1664 1665 return rc; 1666 } 1667 1668 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req, 1669 struct msg_rsp *rsp) 1670 { 1671 struct rvu_hwinfo *hw = rvu->hw; 1672 u16 pcifunc = req->hdr.pcifunc; 1673 struct rvu_block *block; 1674 int blkaddr, nixlf, err; 1675 struct rvu_pfvf *pfvf; 1676 1677 pfvf = rvu_get_pfvf(rvu, pcifunc); 1678 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1679 if (!pfvf->nixlf || blkaddr < 0) 1680 return NIX_AF_ERR_AF_LF_INVALID; 1681 1682 block = &hw->block[blkaddr]; 1683 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1684 if (nixlf < 0) 1685 return NIX_AF_ERR_AF_LF_INVALID; 1686 1687 if (req->flags & NIX_LF_DISABLE_FLOWS) 1688 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 1689 else 1690 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 1691 1692 /* Free any tx vtag def entries used by this NIX LF */ 1693 if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG)) 1694 nix_free_tx_vtag_entries(rvu, pcifunc); 1695 1696 nix_interface_deinit(rvu, pcifunc, nixlf); 1697 1698 /* Reset this NIX LF */ 1699 err = rvu_lf_reset(rvu, block, nixlf); 1700 if (err) { 1701 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1702 block->addr - BLKADDR_NIX0, nixlf); 1703 return NIX_AF_ERR_LF_RESET; 1704 } 1705 1706 nix_ctx_free(rvu, pfvf); 1707 1708 return 0; 1709 } 1710 1711 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, 1712 struct nix_mark_format_cfg *req, 1713 struct nix_mark_format_cfg_rsp *rsp) 1714 { 1715 u16 pcifunc = req->hdr.pcifunc; 1716 struct nix_hw *nix_hw; 1717 struct rvu_pfvf *pfvf; 1718 int blkaddr, rc; 1719 u32 cfg; 1720 1721 pfvf = rvu_get_pfvf(rvu, pcifunc); 1722 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1723 if (!pfvf->nixlf || blkaddr < 0) 1724 return NIX_AF_ERR_AF_LF_INVALID; 1725 1726 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1727 if (!nix_hw) 1728 return NIX_AF_ERR_INVALID_NIXBLK; 1729 1730 cfg = (((u32)req->offset & 0x7) << 16) | 1731 (((u32)req->y_mask & 0xF) << 12) | 1732 (((u32)req->y_val & 0xF) << 8) | 1733 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF); 1734 1735 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg); 1736 if (rc < 0) { 1737 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)", 1738 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 1739 return NIX_AF_ERR_MARK_CFG_FAIL; 1740 } 1741 1742 rsp->mark_format_idx = rc; 1743 return 0; 1744 } 1745 1746 /* Handle shaper update specially for few revisions */ 1747 static bool 1748 handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf, 1749 int lvl, u64 reg, u64 regval) 1750 { 1751 u64 regbase, oldval, sw_xoff = 0; 1752 u64 dbgval, md_debug0 = 0; 1753 unsigned long poll_tmo; 1754 bool rate_reg = 0; 1755 u32 schq; 1756 1757 regbase = reg & 0xFFFF; 1758 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 1759 1760 /* Check for rate register */ 1761 switch (lvl) { 1762 case NIX_TXSCH_LVL_TL1: 1763 md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq); 1764 sw_xoff = NIX_AF_TL1X_SW_XOFF(schq); 1765 1766 rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0)); 1767 break; 1768 case NIX_TXSCH_LVL_TL2: 1769 md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq); 1770 sw_xoff = NIX_AF_TL2X_SW_XOFF(schq); 1771 1772 rate_reg = (regbase == NIX_AF_TL2X_CIR(0) || 1773 regbase == NIX_AF_TL2X_PIR(0)); 1774 break; 1775 case NIX_TXSCH_LVL_TL3: 1776 md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq); 1777 sw_xoff = NIX_AF_TL3X_SW_XOFF(schq); 1778 1779 rate_reg = (regbase == NIX_AF_TL3X_CIR(0) || 1780 regbase == NIX_AF_TL3X_PIR(0)); 1781 break; 1782 case NIX_TXSCH_LVL_TL4: 1783 md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq); 1784 sw_xoff = NIX_AF_TL4X_SW_XOFF(schq); 1785 1786 rate_reg = (regbase == NIX_AF_TL4X_CIR(0) || 1787 regbase == NIX_AF_TL4X_PIR(0)); 1788 break; 1789 case NIX_TXSCH_LVL_MDQ: 1790 sw_xoff = NIX_AF_MDQX_SW_XOFF(schq); 1791 rate_reg = (regbase == NIX_AF_MDQX_CIR(0) || 1792 regbase == NIX_AF_MDQX_PIR(0)); 1793 break; 1794 } 1795 1796 if (!rate_reg) 1797 return false; 1798 1799 /* Nothing special to do when state is not toggled */ 1800 oldval = rvu_read64(rvu, blkaddr, reg); 1801 if ((oldval & 0x1) == (regval & 0x1)) { 1802 rvu_write64(rvu, blkaddr, reg, regval); 1803 return true; 1804 } 1805 1806 /* PIR/CIR disable */ 1807 if (!(regval & 0x1)) { 1808 rvu_write64(rvu, blkaddr, sw_xoff, 1); 1809 rvu_write64(rvu, blkaddr, reg, 0); 1810 udelay(4); 1811 rvu_write64(rvu, blkaddr, sw_xoff, 0); 1812 return true; 1813 } 1814 1815 /* PIR/CIR enable */ 1816 rvu_write64(rvu, blkaddr, sw_xoff, 1); 1817 if (md_debug0) { 1818 poll_tmo = jiffies + usecs_to_jiffies(10000); 1819 /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */ 1820 do { 1821 if (time_after(jiffies, poll_tmo)) { 1822 dev_err(rvu->dev, 1823 "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n", 1824 nixlf, schq, lvl); 1825 goto exit; 1826 } 1827 usleep_range(1, 5); 1828 dbgval = rvu_read64(rvu, blkaddr, md_debug0); 1829 } while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48))); 1830 } 1831 rvu_write64(rvu, blkaddr, reg, regval); 1832 exit: 1833 rvu_write64(rvu, blkaddr, sw_xoff, 0); 1834 return true; 1835 } 1836 1837 static void nix_reset_tx_schedule(struct rvu *rvu, int blkaddr, 1838 int lvl, int schq) 1839 { 1840 u64 tlx_parent = 0, tlx_schedule = 0; 1841 1842 switch (lvl) { 1843 case NIX_TXSCH_LVL_TL2: 1844 tlx_parent = NIX_AF_TL2X_PARENT(schq); 1845 tlx_schedule = NIX_AF_TL2X_SCHEDULE(schq); 1846 break; 1847 case NIX_TXSCH_LVL_TL3: 1848 tlx_parent = NIX_AF_TL3X_PARENT(schq); 1849 tlx_schedule = NIX_AF_TL3X_SCHEDULE(schq); 1850 break; 1851 case NIX_TXSCH_LVL_TL4: 1852 tlx_parent = NIX_AF_TL4X_PARENT(schq); 1853 tlx_schedule = NIX_AF_TL4X_SCHEDULE(schq); 1854 break; 1855 case NIX_TXSCH_LVL_MDQ: 1856 /* no need to reset SMQ_CFG as HW clears this CSR 1857 * on SMQ flush 1858 */ 1859 tlx_parent = NIX_AF_MDQX_PARENT(schq); 1860 tlx_schedule = NIX_AF_MDQX_SCHEDULE(schq); 1861 break; 1862 default: 1863 return; 1864 } 1865 1866 if (tlx_parent) 1867 rvu_write64(rvu, blkaddr, tlx_parent, 0x0); 1868 1869 if (tlx_schedule) 1870 rvu_write64(rvu, blkaddr, tlx_schedule, 0x0); 1871 } 1872 1873 /* Disable shaping of pkts by a scheduler queue 1874 * at a given scheduler level. 1875 */ 1876 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, 1877 int nixlf, int lvl, int schq) 1878 { 1879 struct rvu_hwinfo *hw = rvu->hw; 1880 u64 cir_reg = 0, pir_reg = 0; 1881 u64 cfg; 1882 1883 switch (lvl) { 1884 case NIX_TXSCH_LVL_TL1: 1885 cir_reg = NIX_AF_TL1X_CIR(schq); 1886 pir_reg = 0; /* PIR not available at TL1 */ 1887 break; 1888 case NIX_TXSCH_LVL_TL2: 1889 cir_reg = NIX_AF_TL2X_CIR(schq); 1890 pir_reg = NIX_AF_TL2X_PIR(schq); 1891 break; 1892 case NIX_TXSCH_LVL_TL3: 1893 cir_reg = NIX_AF_TL3X_CIR(schq); 1894 pir_reg = NIX_AF_TL3X_PIR(schq); 1895 break; 1896 case NIX_TXSCH_LVL_TL4: 1897 cir_reg = NIX_AF_TL4X_CIR(schq); 1898 pir_reg = NIX_AF_TL4X_PIR(schq); 1899 break; 1900 case NIX_TXSCH_LVL_MDQ: 1901 cir_reg = NIX_AF_MDQX_CIR(schq); 1902 pir_reg = NIX_AF_MDQX_PIR(schq); 1903 break; 1904 } 1905 1906 /* Shaper state toggle needs wait/poll */ 1907 if (hw->cap.nix_shaper_toggle_wait) { 1908 if (cir_reg) 1909 handle_txschq_shaper_update(rvu, blkaddr, nixlf, 1910 lvl, cir_reg, 0); 1911 if (pir_reg) 1912 handle_txschq_shaper_update(rvu, blkaddr, nixlf, 1913 lvl, pir_reg, 0); 1914 return; 1915 } 1916 1917 if (!cir_reg) 1918 return; 1919 cfg = rvu_read64(rvu, blkaddr, cir_reg); 1920 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0)); 1921 1922 if (!pir_reg) 1923 return; 1924 cfg = rvu_read64(rvu, blkaddr, pir_reg); 1925 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0)); 1926 } 1927 1928 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, 1929 int lvl, int schq) 1930 { 1931 struct rvu_hwinfo *hw = rvu->hw; 1932 int link_level; 1933 int link; 1934 1935 if (lvl >= hw->cap.nix_tx_aggr_lvl) 1936 return; 1937 1938 /* Reset TL4's SDP link config */ 1939 if (lvl == NIX_TXSCH_LVL_TL4) 1940 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00); 1941 1942 link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? 1943 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; 1944 if (lvl != link_level) 1945 return; 1946 1947 /* Reset TL2's CGX or LBK link config */ 1948 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) 1949 rvu_write64(rvu, blkaddr, 1950 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); 1951 } 1952 1953 static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr, 1954 int lvl, int schq) 1955 { 1956 struct rvu_hwinfo *hw = rvu->hw; 1957 u64 reg; 1958 1959 /* Skip this if shaping is not supported */ 1960 if (!hw->cap.nix_shaping) 1961 return; 1962 1963 /* Clear level specific SW_XOFF */ 1964 switch (lvl) { 1965 case NIX_TXSCH_LVL_TL1: 1966 reg = NIX_AF_TL1X_SW_XOFF(schq); 1967 break; 1968 case NIX_TXSCH_LVL_TL2: 1969 reg = NIX_AF_TL2X_SW_XOFF(schq); 1970 break; 1971 case NIX_TXSCH_LVL_TL3: 1972 reg = NIX_AF_TL3X_SW_XOFF(schq); 1973 break; 1974 case NIX_TXSCH_LVL_TL4: 1975 reg = NIX_AF_TL4X_SW_XOFF(schq); 1976 break; 1977 case NIX_TXSCH_LVL_MDQ: 1978 reg = NIX_AF_MDQX_SW_XOFF(schq); 1979 break; 1980 default: 1981 return; 1982 } 1983 1984 rvu_write64(rvu, blkaddr, reg, 0x0); 1985 } 1986 1987 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) 1988 { 1989 struct rvu_hwinfo *hw = rvu->hw; 1990 int pf = rvu_get_pf(pcifunc); 1991 u8 cgx_id = 0, lmac_id = 0; 1992 1993 if (is_lbk_vf(rvu, pcifunc)) {/* LBK links */ 1994 return hw->cgx_links; 1995 } else if (is_pf_cgxmapped(rvu, pf)) { 1996 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1997 return (cgx_id * hw->lmac_per_cgx) + lmac_id; 1998 } 1999 2000 /* SDP link */ 2001 return hw->cgx_links + hw->lbk_links; 2002 } 2003 2004 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, 2005 int link, int *start, int *end) 2006 { 2007 struct rvu_hwinfo *hw = rvu->hw; 2008 int pf = rvu_get_pf(pcifunc); 2009 2010 if (is_lbk_vf(rvu, pcifunc)) { /* LBK links */ 2011 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 2012 *end = *start + hw->cap.nix_txsch_per_lbk_lmac; 2013 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */ 2014 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 2015 *end = *start + hw->cap.nix_txsch_per_cgx_lmac; 2016 } else { /* SDP link */ 2017 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) + 2018 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links); 2019 *end = *start + hw->cap.nix_txsch_per_sdp_lmac; 2020 } 2021 } 2022 2023 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc, 2024 struct nix_hw *nix_hw, 2025 struct nix_txsch_alloc_req *req) 2026 { 2027 struct rvu_hwinfo *hw = rvu->hw; 2028 int schq, req_schq, free_cnt; 2029 struct nix_txsch *txsch; 2030 int link, start, end; 2031 2032 txsch = &nix_hw->txsch[lvl]; 2033 req_schq = req->schq_contig[lvl] + req->schq[lvl]; 2034 2035 if (!req_schq) 2036 return 0; 2037 2038 link = nix_get_tx_link(rvu, pcifunc); 2039 2040 /* For traffic aggregating scheduler level, one queue is enough */ 2041 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 2042 if (req_schq != 1) 2043 return NIX_AF_ERR_TLX_ALLOC_FAIL; 2044 return 0; 2045 } 2046 2047 /* Get free SCHQ count and check if request can be accomodated */ 2048 if (hw->cap.nix_fixed_txschq_mapping) { 2049 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 2050 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK); 2051 if (end <= txsch->schq.max && schq < end && 2052 !test_bit(schq, txsch->schq.bmap)) 2053 free_cnt = 1; 2054 else 2055 free_cnt = 0; 2056 } else { 2057 free_cnt = rvu_rsrc_free_count(&txsch->schq); 2058 } 2059 2060 if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC || 2061 req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC) 2062 return NIX_AF_ERR_TLX_ALLOC_FAIL; 2063 2064 /* If contiguous queues are needed, check for availability */ 2065 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] && 2066 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl])) 2067 return NIX_AF_ERR_TLX_ALLOC_FAIL; 2068 2069 return 0; 2070 } 2071 2072 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch, 2073 struct nix_txsch_alloc_rsp *rsp, 2074 int lvl, int start, int end) 2075 { 2076 struct rvu_hwinfo *hw = rvu->hw; 2077 u16 pcifunc = rsp->hdr.pcifunc; 2078 int idx, schq; 2079 2080 /* For traffic aggregating levels, queue alloc is based 2081 * on transmit link to which PF_FUNC is mapped to. 2082 */ 2083 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 2084 /* A single TL queue is allocated */ 2085 if (rsp->schq_contig[lvl]) { 2086 rsp->schq_contig[lvl] = 1; 2087 rsp->schq_contig_list[lvl][0] = start; 2088 } 2089 2090 /* Both contig and non-contig reqs doesn't make sense here */ 2091 if (rsp->schq_contig[lvl]) 2092 rsp->schq[lvl] = 0; 2093 2094 if (rsp->schq[lvl]) { 2095 rsp->schq[lvl] = 1; 2096 rsp->schq_list[lvl][0] = start; 2097 } 2098 return; 2099 } 2100 2101 /* Adjust the queue request count if HW supports 2102 * only one queue per level configuration. 2103 */ 2104 if (hw->cap.nix_fixed_txschq_mapping) { 2105 idx = pcifunc & RVU_PFVF_FUNC_MASK; 2106 schq = start + idx; 2107 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) { 2108 rsp->schq_contig[lvl] = 0; 2109 rsp->schq[lvl] = 0; 2110 return; 2111 } 2112 2113 if (rsp->schq_contig[lvl]) { 2114 rsp->schq_contig[lvl] = 1; 2115 set_bit(schq, txsch->schq.bmap); 2116 rsp->schq_contig_list[lvl][0] = schq; 2117 rsp->schq[lvl] = 0; 2118 } else if (rsp->schq[lvl]) { 2119 rsp->schq[lvl] = 1; 2120 set_bit(schq, txsch->schq.bmap); 2121 rsp->schq_list[lvl][0] = schq; 2122 } 2123 return; 2124 } 2125 2126 /* Allocate contiguous queue indices requesty first */ 2127 if (rsp->schq_contig[lvl]) { 2128 schq = bitmap_find_next_zero_area(txsch->schq.bmap, 2129 txsch->schq.max, start, 2130 rsp->schq_contig[lvl], 0); 2131 if (schq >= end) 2132 rsp->schq_contig[lvl] = 0; 2133 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) { 2134 set_bit(schq, txsch->schq.bmap); 2135 rsp->schq_contig_list[lvl][idx] = schq; 2136 schq++; 2137 } 2138 } 2139 2140 /* Allocate non-contiguous queue indices */ 2141 if (rsp->schq[lvl]) { 2142 idx = 0; 2143 for (schq = start; schq < end; schq++) { 2144 if (!test_bit(schq, txsch->schq.bmap)) { 2145 set_bit(schq, txsch->schq.bmap); 2146 rsp->schq_list[lvl][idx++] = schq; 2147 } 2148 if (idx == rsp->schq[lvl]) 2149 break; 2150 } 2151 /* Update how many were allocated */ 2152 rsp->schq[lvl] = idx; 2153 } 2154 } 2155 2156 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, 2157 struct nix_txsch_alloc_req *req, 2158 struct nix_txsch_alloc_rsp *rsp) 2159 { 2160 struct rvu_hwinfo *hw = rvu->hw; 2161 u16 pcifunc = req->hdr.pcifunc; 2162 int link, blkaddr, rc = 0; 2163 int lvl, idx, start, end; 2164 struct nix_txsch *txsch; 2165 struct nix_hw *nix_hw; 2166 u32 *pfvf_map; 2167 int nixlf; 2168 u16 schq; 2169 2170 rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2171 if (rc) 2172 return rc; 2173 2174 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2175 if (!nix_hw) 2176 return NIX_AF_ERR_INVALID_NIXBLK; 2177 2178 mutex_lock(&rvu->rsrc_lock); 2179 2180 /* Check if request is valid as per HW capabilities 2181 * and can be accomodated. 2182 */ 2183 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2184 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req); 2185 if (rc) 2186 goto err; 2187 } 2188 2189 /* Allocate requested Tx scheduler queues */ 2190 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2191 txsch = &nix_hw->txsch[lvl]; 2192 pfvf_map = txsch->pfvf_map; 2193 2194 if (!req->schq[lvl] && !req->schq_contig[lvl]) 2195 continue; 2196 2197 rsp->schq[lvl] = req->schq[lvl]; 2198 rsp->schq_contig[lvl] = req->schq_contig[lvl]; 2199 2200 link = nix_get_tx_link(rvu, pcifunc); 2201 2202 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 2203 start = link; 2204 end = link; 2205 } else if (hw->cap.nix_fixed_txschq_mapping) { 2206 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 2207 } else { 2208 start = 0; 2209 end = txsch->schq.max; 2210 } 2211 2212 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end); 2213 2214 /* Reset queue config */ 2215 for (idx = 0; idx < req->schq_contig[lvl]; idx++) { 2216 schq = rsp->schq_contig_list[lvl][idx]; 2217 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 2218 NIX_TXSCHQ_CFG_DONE)) 2219 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 2220 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2221 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2222 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2223 } 2224 2225 for (idx = 0; idx < req->schq[lvl]; idx++) { 2226 schq = rsp->schq_list[lvl][idx]; 2227 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 2228 NIX_TXSCHQ_CFG_DONE)) 2229 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 2230 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2231 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2232 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2233 } 2234 } 2235 2236 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl; 2237 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO; 2238 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr, 2239 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? 2240 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; 2241 goto exit; 2242 err: 2243 rc = NIX_AF_ERR_TLX_ALLOC_FAIL; 2244 exit: 2245 mutex_unlock(&rvu->rsrc_lock); 2246 return rc; 2247 } 2248 2249 static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq, 2250 struct nix_smq_flush_ctx *smq_flush_ctx) 2251 { 2252 struct nix_smq_tree_ctx *smq_tree_ctx; 2253 u64 parent_off, regval; 2254 u16 schq; 2255 int lvl; 2256 2257 smq_flush_ctx->smq = smq; 2258 2259 schq = smq; 2260 for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) { 2261 smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl]; 2262 if (lvl == NIX_TXSCH_LVL_TL1) { 2263 smq_flush_ctx->tl1_schq = schq; 2264 smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq); 2265 smq_tree_ctx->pir_off = 0; 2266 smq_tree_ctx->pir_val = 0; 2267 parent_off = 0; 2268 } else if (lvl == NIX_TXSCH_LVL_TL2) { 2269 smq_flush_ctx->tl2_schq = schq; 2270 smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq); 2271 smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq); 2272 parent_off = NIX_AF_TL2X_PARENT(schq); 2273 } else if (lvl == NIX_TXSCH_LVL_TL3) { 2274 smq_tree_ctx->cir_off = NIX_AF_TL3X_CIR(schq); 2275 smq_tree_ctx->pir_off = NIX_AF_TL3X_PIR(schq); 2276 parent_off = NIX_AF_TL3X_PARENT(schq); 2277 } else if (lvl == NIX_TXSCH_LVL_TL4) { 2278 smq_tree_ctx->cir_off = NIX_AF_TL4X_CIR(schq); 2279 smq_tree_ctx->pir_off = NIX_AF_TL4X_PIR(schq); 2280 parent_off = NIX_AF_TL4X_PARENT(schq); 2281 } else if (lvl == NIX_TXSCH_LVL_MDQ) { 2282 smq_tree_ctx->cir_off = NIX_AF_MDQX_CIR(schq); 2283 smq_tree_ctx->pir_off = NIX_AF_MDQX_PIR(schq); 2284 parent_off = NIX_AF_MDQX_PARENT(schq); 2285 } 2286 /* save cir/pir register values */ 2287 smq_tree_ctx->cir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->cir_off); 2288 if (smq_tree_ctx->pir_off) 2289 smq_tree_ctx->pir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->pir_off); 2290 2291 /* get parent txsch node */ 2292 if (parent_off) { 2293 regval = rvu_read64(rvu, blkaddr, parent_off); 2294 schq = (regval >> 16) & 0x1FF; 2295 } 2296 } 2297 } 2298 2299 static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr, 2300 struct nix_smq_flush_ctx *smq_flush_ctx, bool enable) 2301 { 2302 struct nix_txsch *txsch; 2303 struct nix_hw *nix_hw; 2304 u64 regoff; 2305 int tl2; 2306 2307 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2308 if (!nix_hw) 2309 return; 2310 2311 /* loop through all TL2s with matching PF_FUNC */ 2312 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2]; 2313 for (tl2 = 0; tl2 < txsch->schq.max; tl2++) { 2314 /* skip the smq(flush) TL2 */ 2315 if (tl2 == smq_flush_ctx->tl2_schq) 2316 continue; 2317 /* skip unused TL2s */ 2318 if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE) 2319 continue; 2320 /* skip if PF_FUNC doesn't match */ 2321 if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) != 2322 (TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] & 2323 ~RVU_PFVF_FUNC_MASK))) 2324 continue; 2325 /* enable/disable XOFF */ 2326 regoff = NIX_AF_TL2X_SW_XOFF(tl2); 2327 if (enable) 2328 rvu_write64(rvu, blkaddr, regoff, 0x1); 2329 else 2330 rvu_write64(rvu, blkaddr, regoff, 0x0); 2331 } 2332 } 2333 2334 static void nix_smq_flush_enadis_rate(struct rvu *rvu, int blkaddr, 2335 struct nix_smq_flush_ctx *smq_flush_ctx, bool enable) 2336 { 2337 u64 cir_off, pir_off, cir_val, pir_val; 2338 struct nix_smq_tree_ctx *smq_tree_ctx; 2339 int lvl; 2340 2341 for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) { 2342 smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl]; 2343 cir_off = smq_tree_ctx->cir_off; 2344 cir_val = smq_tree_ctx->cir_val; 2345 pir_off = smq_tree_ctx->pir_off; 2346 pir_val = smq_tree_ctx->pir_val; 2347 2348 if (enable) { 2349 rvu_write64(rvu, blkaddr, cir_off, cir_val); 2350 if (lvl != NIX_TXSCH_LVL_TL1) 2351 rvu_write64(rvu, blkaddr, pir_off, pir_val); 2352 } else { 2353 rvu_write64(rvu, blkaddr, cir_off, 0x0); 2354 if (lvl != NIX_TXSCH_LVL_TL1) 2355 rvu_write64(rvu, blkaddr, pir_off, 0x0); 2356 } 2357 } 2358 } 2359 2360 static int nix_smq_flush(struct rvu *rvu, int blkaddr, 2361 int smq, u16 pcifunc, int nixlf) 2362 { 2363 struct nix_smq_flush_ctx *smq_flush_ctx; 2364 int pf = rvu_get_pf(pcifunc); 2365 u8 cgx_id = 0, lmac_id = 0; 2366 int err, restore_tx_en = 0; 2367 u64 cfg; 2368 2369 if (!is_rvu_otx2(rvu)) { 2370 /* Skip SMQ flush if pkt count is zero */ 2371 cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(smq)); 2372 if (!cfg) 2373 return 0; 2374 } 2375 2376 /* enable cgx tx if disabled */ 2377 if (is_pf_cgxmapped(rvu, pf)) { 2378 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 2379 restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), 2380 lmac_id, true); 2381 } 2382 2383 /* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */ 2384 smq_flush_ctx = kzalloc(sizeof(*smq_flush_ctx), GFP_KERNEL); 2385 if (!smq_flush_ctx) 2386 return -ENOMEM; 2387 nix_smq_flush_fill_ctx(rvu, blkaddr, smq, smq_flush_ctx); 2388 nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true); 2389 nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false); 2390 2391 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); 2392 /* Do SMQ flush and set enqueue xoff */ 2393 cfg |= BIT_ULL(50) | BIT_ULL(49); 2394 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); 2395 2396 /* Disable backpressure from physical link, 2397 * otherwise SMQ flush may stall. 2398 */ 2399 rvu_cgx_enadis_rx_bp(rvu, pf, false); 2400 2401 /* Wait for flush to complete */ 2402 err = rvu_poll_reg(rvu, blkaddr, 2403 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true); 2404 if (err) 2405 dev_info(rvu->dev, 2406 "NIXLF%d: SMQ%d flush failed, txlink might be busy\n", 2407 nixlf, smq); 2408 2409 /* clear XOFF on TL2s */ 2410 nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true); 2411 nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false); 2412 kfree(smq_flush_ctx); 2413 2414 rvu_cgx_enadis_rx_bp(rvu, pf, true); 2415 /* restore cgx tx state */ 2416 if (restore_tx_en) 2417 rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); 2418 return err; 2419 } 2420 2421 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) 2422 { 2423 int blkaddr, nixlf, lvl, schq, err; 2424 struct rvu_hwinfo *hw = rvu->hw; 2425 struct nix_txsch *txsch; 2426 struct nix_hw *nix_hw; 2427 u16 map_func; 2428 2429 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2430 if (blkaddr < 0) 2431 return NIX_AF_ERR_AF_LF_INVALID; 2432 2433 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2434 if (!nix_hw) 2435 return NIX_AF_ERR_INVALID_NIXBLK; 2436 2437 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 2438 if (nixlf < 0) 2439 return NIX_AF_ERR_AF_LF_INVALID; 2440 2441 /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/ 2442 mutex_lock(&rvu->rsrc_lock); 2443 for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2444 txsch = &nix_hw->txsch[lvl]; 2445 2446 if (lvl >= hw->cap.nix_tx_aggr_lvl) 2447 continue; 2448 2449 for (schq = 0; schq < txsch->schq.max; schq++) { 2450 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2451 continue; 2452 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2453 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); 2454 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2455 } 2456 } 2457 nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1, 2458 nix_get_tx_link(rvu, pcifunc)); 2459 2460 /* On PF cleanup, clear cfg done flag as 2461 * PF would have changed default config. 2462 */ 2463 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) { 2464 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1]; 2465 schq = nix_get_tx_link(rvu, pcifunc); 2466 /* Do not clear pcifunc in txsch->pfvf_map[schq] because 2467 * VF might be using this TL1 queue 2468 */ 2469 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); 2470 txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0); 2471 } 2472 2473 /* Flush SMQs */ 2474 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; 2475 for (schq = 0; schq < txsch->schq.max; schq++) { 2476 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2477 continue; 2478 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 2479 } 2480 2481 /* Now free scheduler queues to free pool */ 2482 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2483 /* TLs above aggregation level are shared across all PF 2484 * and it's VFs, hence skip freeing them. 2485 */ 2486 if (lvl >= hw->cap.nix_tx_aggr_lvl) 2487 continue; 2488 2489 txsch = &nix_hw->txsch[lvl]; 2490 for (schq = 0; schq < txsch->schq.max; schq++) { 2491 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2492 continue; 2493 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2494 rvu_free_rsrc(&txsch->schq, schq); 2495 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 2496 } 2497 } 2498 mutex_unlock(&rvu->rsrc_lock); 2499 2500 err = rvu_ndc_sync(rvu, blkaddr, nixlf, NIX_AF_NDC_TX_SYNC); 2501 if (err) 2502 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf); 2503 2504 return 0; 2505 } 2506 2507 static int nix_txschq_free_one(struct rvu *rvu, 2508 struct nix_txsch_free_req *req) 2509 { 2510 struct rvu_hwinfo *hw = rvu->hw; 2511 u16 pcifunc = req->hdr.pcifunc; 2512 int lvl, schq, nixlf, blkaddr; 2513 struct nix_txsch *txsch; 2514 struct nix_hw *nix_hw; 2515 u32 *pfvf_map; 2516 int rc; 2517 2518 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2519 if (blkaddr < 0) 2520 return NIX_AF_ERR_AF_LF_INVALID; 2521 2522 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2523 if (!nix_hw) 2524 return NIX_AF_ERR_INVALID_NIXBLK; 2525 2526 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 2527 if (nixlf < 0) 2528 return NIX_AF_ERR_AF_LF_INVALID; 2529 2530 lvl = req->schq_lvl; 2531 schq = req->schq; 2532 txsch = &nix_hw->txsch[lvl]; 2533 2534 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max) 2535 return 0; 2536 2537 pfvf_map = txsch->pfvf_map; 2538 mutex_lock(&rvu->rsrc_lock); 2539 2540 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) { 2541 rc = NIX_AF_ERR_TLX_INVALID; 2542 goto err; 2543 } 2544 2545 /* Clear SW_XOFF of this resource only. 2546 * For SMQ level, all path XOFF's 2547 * need to be made clear by user 2548 */ 2549 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); 2550 2551 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2552 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2553 2554 /* Flush if it is a SMQ. Onus of disabling 2555 * TL2/3 queue links before SMQ flush is on user 2556 */ 2557 if (lvl == NIX_TXSCH_LVL_SMQ && 2558 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) { 2559 rc = NIX_AF_SMQ_FLUSH_FAILED; 2560 goto err; 2561 } 2562 2563 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2564 2565 /* Free the resource */ 2566 rvu_free_rsrc(&txsch->schq, schq); 2567 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 2568 mutex_unlock(&rvu->rsrc_lock); 2569 return 0; 2570 err: 2571 mutex_unlock(&rvu->rsrc_lock); 2572 return rc; 2573 } 2574 2575 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, 2576 struct nix_txsch_free_req *req, 2577 struct msg_rsp *rsp) 2578 { 2579 if (req->flags & TXSCHQ_FREE_ALL) 2580 return nix_txschq_free(rvu, req->hdr.pcifunc); 2581 else 2582 return nix_txschq_free_one(rvu, req); 2583 } 2584 2585 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, 2586 int lvl, u64 reg, u64 regval) 2587 { 2588 u64 regbase = reg & 0xFFFF; 2589 u16 schq, parent; 2590 2591 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg)) 2592 return false; 2593 2594 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2595 /* Check if this schq belongs to this PF/VF or not */ 2596 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq)) 2597 return false; 2598 2599 parent = (regval >> 16) & 0x1FF; 2600 /* Validate MDQ's TL4 parent */ 2601 if (regbase == NIX_AF_MDQX_PARENT(0) && 2602 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent)) 2603 return false; 2604 2605 /* Validate TL4's TL3 parent */ 2606 if (regbase == NIX_AF_TL4X_PARENT(0) && 2607 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent)) 2608 return false; 2609 2610 /* Validate TL3's TL2 parent */ 2611 if (regbase == NIX_AF_TL3X_PARENT(0) && 2612 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent)) 2613 return false; 2614 2615 /* Validate TL2's TL1 parent */ 2616 if (regbase == NIX_AF_TL2X_PARENT(0) && 2617 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent)) 2618 return false; 2619 2620 return true; 2621 } 2622 2623 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg) 2624 { 2625 u64 regbase; 2626 2627 if (hw->cap.nix_shaping) 2628 return true; 2629 2630 /* If shaping and coloring is not supported, then 2631 * *_CIR and *_PIR registers should not be configured. 2632 */ 2633 regbase = reg & 0xFFFF; 2634 2635 switch (lvl) { 2636 case NIX_TXSCH_LVL_TL1: 2637 if (regbase == NIX_AF_TL1X_CIR(0)) 2638 return false; 2639 break; 2640 case NIX_TXSCH_LVL_TL2: 2641 if (regbase == NIX_AF_TL2X_CIR(0) || 2642 regbase == NIX_AF_TL2X_PIR(0)) 2643 return false; 2644 break; 2645 case NIX_TXSCH_LVL_TL3: 2646 if (regbase == NIX_AF_TL3X_CIR(0) || 2647 regbase == NIX_AF_TL3X_PIR(0)) 2648 return false; 2649 break; 2650 case NIX_TXSCH_LVL_TL4: 2651 if (regbase == NIX_AF_TL4X_CIR(0) || 2652 regbase == NIX_AF_TL4X_PIR(0)) 2653 return false; 2654 break; 2655 case NIX_TXSCH_LVL_MDQ: 2656 if (regbase == NIX_AF_MDQX_CIR(0) || 2657 regbase == NIX_AF_MDQX_PIR(0)) 2658 return false; 2659 break; 2660 } 2661 return true; 2662 } 2663 2664 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, 2665 u16 pcifunc, int blkaddr) 2666 { 2667 u32 *pfvf_map; 2668 int schq; 2669 2670 schq = nix_get_tx_link(rvu, pcifunc); 2671 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map; 2672 /* Skip if PF has already done the config */ 2673 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE) 2674 return; 2675 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq), 2676 (TXSCH_TL1_DFLT_RR_PRIO << 1)); 2677 2678 /* On OcteonTx2 the config was in bytes and newer silcons 2679 * it's changed to weight. 2680 */ 2681 if (!rvu->hw->cap.nix_common_dwrr_mtu) 2682 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), 2683 TXSCH_TL1_DFLT_RR_QTM); 2684 else 2685 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), 2686 CN10K_MAX_DWRR_WEIGHT); 2687 2688 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00); 2689 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); 2690 } 2691 2692 /* Register offset - [15:0] 2693 * Scheduler Queue number - [25:16] 2694 */ 2695 #define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0) 2696 2697 static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw, 2698 int blkaddr, struct nix_txschq_config *req, 2699 struct nix_txschq_config *rsp) 2700 { 2701 u16 pcifunc = req->hdr.pcifunc; 2702 int idx, schq; 2703 u64 reg; 2704 2705 for (idx = 0; idx < req->num_regs; idx++) { 2706 reg = req->reg[idx]; 2707 reg &= NIX_TX_SCHQ_MASK; 2708 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2709 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) || 2710 !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq)) 2711 return NIX_AF_INVAL_TXSCHQ_CFG; 2712 rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg); 2713 } 2714 rsp->lvl = req->lvl; 2715 rsp->num_regs = req->num_regs; 2716 return 0; 2717 } 2718 2719 void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc, 2720 struct nix_txsch *txsch, bool enable) 2721 { 2722 struct rvu_hwinfo *hw = rvu->hw; 2723 int lbk_link_start, lbk_links; 2724 u8 pf = rvu_get_pf(pcifunc); 2725 int schq; 2726 u64 cfg; 2727 2728 if (!is_pf_cgxmapped(rvu, pf)) 2729 return; 2730 2731 cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0; 2732 lbk_link_start = hw->cgx_links; 2733 2734 for (schq = 0; schq < txsch->schq.max; schq++) { 2735 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2736 continue; 2737 /* Enable all LBK links with channel 63 by default so that 2738 * packets can be sent to LBK with a NPC TX MCAM rule 2739 */ 2740 lbk_links = hw->lbk_links; 2741 while (lbk_links--) 2742 rvu_write64(rvu, blkaddr, 2743 NIX_AF_TL3_TL2X_LINKX_CFG(schq, 2744 lbk_link_start + 2745 lbk_links), cfg); 2746 } 2747 } 2748 2749 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, 2750 struct nix_txschq_config *req, 2751 struct nix_txschq_config *rsp) 2752 { 2753 u64 reg, val, regval, schq_regbase, val_mask; 2754 struct rvu_hwinfo *hw = rvu->hw; 2755 u16 pcifunc = req->hdr.pcifunc; 2756 struct nix_txsch *txsch; 2757 struct nix_hw *nix_hw; 2758 int blkaddr, idx, err; 2759 int nixlf, schq; 2760 u32 *pfvf_map; 2761 2762 if (req->lvl >= NIX_TXSCH_LVL_CNT || 2763 req->num_regs > MAX_REGS_PER_MBOX_MSG) 2764 return NIX_AF_INVAL_TXSCHQ_CFG; 2765 2766 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2767 if (err) 2768 return err; 2769 2770 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2771 if (!nix_hw) 2772 return NIX_AF_ERR_INVALID_NIXBLK; 2773 2774 if (req->read) 2775 return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp); 2776 2777 txsch = &nix_hw->txsch[req->lvl]; 2778 pfvf_map = txsch->pfvf_map; 2779 2780 if (req->lvl >= hw->cap.nix_tx_aggr_lvl && 2781 pcifunc & RVU_PFVF_FUNC_MASK) { 2782 mutex_lock(&rvu->rsrc_lock); 2783 if (req->lvl == NIX_TXSCH_LVL_TL1) 2784 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr); 2785 mutex_unlock(&rvu->rsrc_lock); 2786 return 0; 2787 } 2788 2789 for (idx = 0; idx < req->num_regs; idx++) { 2790 reg = req->reg[idx]; 2791 reg &= NIX_TX_SCHQ_MASK; 2792 regval = req->regval[idx]; 2793 schq_regbase = reg & 0xFFFF; 2794 val_mask = req->regval_mask[idx]; 2795 2796 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr, 2797 txsch->lvl, reg, regval)) 2798 return NIX_AF_INVAL_TXSCHQ_CFG; 2799 2800 /* Check if shaping and coloring is supported */ 2801 if (!is_txschq_shaping_valid(hw, req->lvl, reg)) 2802 continue; 2803 2804 val = rvu_read64(rvu, blkaddr, reg); 2805 regval = (val & val_mask) | (regval & ~val_mask); 2806 2807 /* Handle shaping state toggle specially */ 2808 if (hw->cap.nix_shaper_toggle_wait && 2809 handle_txschq_shaper_update(rvu, blkaddr, nixlf, 2810 req->lvl, reg, regval)) 2811 continue; 2812 2813 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */ 2814 if (schq_regbase == NIX_AF_SMQX_CFG(0)) { 2815 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], 2816 pcifunc, 0); 2817 regval &= ~(0x7FULL << 24); 2818 regval |= ((u64)nixlf << 24); 2819 } 2820 2821 /* Clear 'BP_ENA' config, if it's not allowed */ 2822 if (!hw->cap.nix_tx_link_bp) { 2823 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) || 2824 (schq_regbase & 0xFF00) == 2825 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0)) 2826 regval &= ~BIT_ULL(13); 2827 } 2828 2829 /* Mark config as done for TL1 by PF */ 2830 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) && 2831 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) { 2832 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2833 mutex_lock(&rvu->rsrc_lock); 2834 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], 2835 NIX_TXSCHQ_CFG_DONE); 2836 mutex_unlock(&rvu->rsrc_lock); 2837 } 2838 2839 /* SMQ flush is special hence split register writes such 2840 * that flush first and write rest of the bits later. 2841 */ 2842 if (schq_regbase == NIX_AF_SMQX_CFG(0) && 2843 (regval & BIT_ULL(49))) { 2844 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2845 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 2846 regval &= ~BIT_ULL(49); 2847 } 2848 rvu_write64(rvu, blkaddr, reg, regval); 2849 } 2850 2851 return 0; 2852 } 2853 2854 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, 2855 struct nix_vtag_config *req) 2856 { 2857 u64 regval = req->vtag_size; 2858 2859 if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 || 2860 req->vtag_size > VTAGSIZE_T8) 2861 return -EINVAL; 2862 2863 /* RX VTAG Type 7 reserved for vf vlan */ 2864 if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7) 2865 return NIX_AF_ERR_RX_VTAG_INUSE; 2866 2867 if (req->rx.capture_vtag) 2868 regval |= BIT_ULL(5); 2869 if (req->rx.strip_vtag) 2870 regval |= BIT_ULL(4); 2871 2872 rvu_write64(rvu, blkaddr, 2873 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval); 2874 return 0; 2875 } 2876 2877 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr, 2878 u16 pcifunc, int index) 2879 { 2880 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2881 struct nix_txvlan *vlan; 2882 2883 if (!nix_hw) 2884 return NIX_AF_ERR_INVALID_NIXBLK; 2885 2886 vlan = &nix_hw->txvlan; 2887 if (vlan->entry2pfvf_map[index] != pcifunc) 2888 return NIX_AF_ERR_PARAM; 2889 2890 rvu_write64(rvu, blkaddr, 2891 NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull); 2892 rvu_write64(rvu, blkaddr, 2893 NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull); 2894 2895 vlan->entry2pfvf_map[index] = 0; 2896 rvu_free_rsrc(&vlan->rsrc, index); 2897 2898 return 0; 2899 } 2900 2901 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc) 2902 { 2903 struct nix_txvlan *vlan; 2904 struct nix_hw *nix_hw; 2905 int index, blkaddr; 2906 2907 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2908 if (blkaddr < 0) 2909 return; 2910 2911 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2912 if (!nix_hw) 2913 return; 2914 2915 vlan = &nix_hw->txvlan; 2916 2917 mutex_lock(&vlan->rsrc_lock); 2918 /* Scan all the entries and free the ones mapped to 'pcifunc' */ 2919 for (index = 0; index < vlan->rsrc.max; index++) { 2920 if (vlan->entry2pfvf_map[index] == pcifunc) 2921 nix_tx_vtag_free(rvu, blkaddr, pcifunc, index); 2922 } 2923 mutex_unlock(&vlan->rsrc_lock); 2924 } 2925 2926 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr, 2927 u64 vtag, u8 size) 2928 { 2929 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2930 struct nix_txvlan *vlan; 2931 u64 regval; 2932 int index; 2933 2934 if (!nix_hw) 2935 return NIX_AF_ERR_INVALID_NIXBLK; 2936 2937 vlan = &nix_hw->txvlan; 2938 2939 mutex_lock(&vlan->rsrc_lock); 2940 2941 index = rvu_alloc_rsrc(&vlan->rsrc); 2942 if (index < 0) { 2943 mutex_unlock(&vlan->rsrc_lock); 2944 return index; 2945 } 2946 2947 mutex_unlock(&vlan->rsrc_lock); 2948 2949 regval = size ? vtag : vtag << 32; 2950 2951 rvu_write64(rvu, blkaddr, 2952 NIX_AF_TX_VTAG_DEFX_DATA(index), regval); 2953 rvu_write64(rvu, blkaddr, 2954 NIX_AF_TX_VTAG_DEFX_CTL(index), size); 2955 2956 return index; 2957 } 2958 2959 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr, 2960 struct nix_vtag_config *req) 2961 { 2962 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2963 u16 pcifunc = req->hdr.pcifunc; 2964 int idx0 = req->tx.vtag0_idx; 2965 int idx1 = req->tx.vtag1_idx; 2966 struct nix_txvlan *vlan; 2967 int err = 0; 2968 2969 if (!nix_hw) 2970 return NIX_AF_ERR_INVALID_NIXBLK; 2971 2972 vlan = &nix_hw->txvlan; 2973 if (req->tx.free_vtag0 && req->tx.free_vtag1) 2974 if (vlan->entry2pfvf_map[idx0] != pcifunc || 2975 vlan->entry2pfvf_map[idx1] != pcifunc) 2976 return NIX_AF_ERR_PARAM; 2977 2978 mutex_lock(&vlan->rsrc_lock); 2979 2980 if (req->tx.free_vtag0) { 2981 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0); 2982 if (err) 2983 goto exit; 2984 } 2985 2986 if (req->tx.free_vtag1) 2987 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1); 2988 2989 exit: 2990 mutex_unlock(&vlan->rsrc_lock); 2991 return err; 2992 } 2993 2994 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr, 2995 struct nix_vtag_config *req, 2996 struct nix_vtag_config_rsp *rsp) 2997 { 2998 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2999 struct nix_txvlan *vlan; 3000 u16 pcifunc = req->hdr.pcifunc; 3001 3002 if (!nix_hw) 3003 return NIX_AF_ERR_INVALID_NIXBLK; 3004 3005 vlan = &nix_hw->txvlan; 3006 if (req->tx.cfg_vtag0) { 3007 rsp->vtag0_idx = 3008 nix_tx_vtag_alloc(rvu, blkaddr, 3009 req->tx.vtag0, req->vtag_size); 3010 3011 if (rsp->vtag0_idx < 0) 3012 return NIX_AF_ERR_TX_VTAG_NOSPC; 3013 3014 vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc; 3015 } 3016 3017 if (req->tx.cfg_vtag1) { 3018 rsp->vtag1_idx = 3019 nix_tx_vtag_alloc(rvu, blkaddr, 3020 req->tx.vtag1, req->vtag_size); 3021 3022 if (rsp->vtag1_idx < 0) 3023 goto err_free; 3024 3025 vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc; 3026 } 3027 3028 return 0; 3029 3030 err_free: 3031 if (req->tx.cfg_vtag0) 3032 nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx); 3033 3034 return NIX_AF_ERR_TX_VTAG_NOSPC; 3035 } 3036 3037 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, 3038 struct nix_vtag_config *req, 3039 struct nix_vtag_config_rsp *rsp) 3040 { 3041 u16 pcifunc = req->hdr.pcifunc; 3042 int blkaddr, nixlf, err; 3043 3044 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3045 if (err) 3046 return err; 3047 3048 if (req->cfg_type) { 3049 /* rx vtag configuration */ 3050 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req); 3051 if (err) 3052 return NIX_AF_ERR_PARAM; 3053 } else { 3054 /* tx vtag configuration */ 3055 if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) && 3056 (req->tx.free_vtag0 || req->tx.free_vtag1)) 3057 return NIX_AF_ERR_PARAM; 3058 3059 if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1) 3060 return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp); 3061 3062 if (req->tx.free_vtag0 || req->tx.free_vtag1) 3063 return nix_tx_vtag_decfg(rvu, blkaddr, req); 3064 } 3065 3066 return 0; 3067 } 3068 3069 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw, 3070 int mce, u8 op, u16 pcifunc, int next, 3071 int index, u8 mce_op, bool eol) 3072 { 3073 struct nix_aq_enq_req aq_req; 3074 int err; 3075 3076 aq_req.hdr.pcifunc = 0; 3077 aq_req.ctype = NIX_AQ_CTYPE_MCE; 3078 aq_req.op = op; 3079 aq_req.qidx = mce; 3080 3081 /* Use RSS with RSS index 0 */ 3082 aq_req.mce.op = mce_op; 3083 aq_req.mce.index = index; 3084 aq_req.mce.eol = eol; 3085 aq_req.mce.pf_func = pcifunc; 3086 aq_req.mce.next = next; 3087 3088 /* All fields valid */ 3089 *(u64 *)(&aq_req.mce_mask) = ~0ULL; 3090 3091 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL); 3092 if (err) { 3093 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n", 3094 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 3095 return err; 3096 } 3097 return 0; 3098 } 3099 3100 static void nix_delete_mcast_mce_list(struct nix_mce_list *mce_list) 3101 { 3102 struct hlist_node *tmp; 3103 struct mce *mce; 3104 3105 /* Scan through the current list */ 3106 hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) { 3107 hlist_del(&mce->node); 3108 kfree(mce); 3109 } 3110 3111 mce_list->count = 0; 3112 mce_list->max = 0; 3113 } 3114 3115 static int nix_get_last_mce_list_index(struct nix_mcast_grp_elem *elem) 3116 { 3117 return elem->mce_start_index + elem->mcast_mce_list.count - 1; 3118 } 3119 3120 static int nix_update_ingress_mce_list_hw(struct rvu *rvu, 3121 struct nix_hw *nix_hw, 3122 struct nix_mcast_grp_elem *elem) 3123 { 3124 int idx, last_idx, next_idx, err; 3125 struct nix_mce_list *mce_list; 3126 struct mce *mce, *prev_mce; 3127 3128 mce_list = &elem->mcast_mce_list; 3129 idx = elem->mce_start_index; 3130 last_idx = nix_get_last_mce_list_index(elem); 3131 hlist_for_each_entry(mce, &mce_list->head, node) { 3132 if (idx > last_idx) 3133 break; 3134 3135 if (!mce->is_active) { 3136 if (idx == elem->mce_start_index) { 3137 idx++; 3138 prev_mce = mce; 3139 elem->mce_start_index = idx; 3140 continue; 3141 } else if (idx == last_idx) { 3142 err = nix_blk_setup_mce(rvu, nix_hw, idx - 1, NIX_AQ_INSTOP_WRITE, 3143 prev_mce->pcifunc, next_idx, 3144 prev_mce->rq_rss_index, 3145 prev_mce->dest_type, 3146 false); 3147 if (err) 3148 return err; 3149 3150 break; 3151 } 3152 } 3153 3154 next_idx = idx + 1; 3155 /* EOL should be set in last MCE */ 3156 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE, 3157 mce->pcifunc, next_idx, 3158 mce->rq_rss_index, mce->dest_type, 3159 (next_idx > last_idx) ? true : false); 3160 if (err) 3161 return err; 3162 3163 idx++; 3164 prev_mce = mce; 3165 } 3166 3167 return 0; 3168 } 3169 3170 static void nix_update_egress_mce_list_hw(struct rvu *rvu, 3171 struct nix_hw *nix_hw, 3172 struct nix_mcast_grp_elem *elem) 3173 { 3174 struct nix_mce_list *mce_list; 3175 int idx, last_idx, next_idx; 3176 struct mce *mce, *prev_mce; 3177 u64 regval; 3178 u8 eol; 3179 3180 mce_list = &elem->mcast_mce_list; 3181 idx = elem->mce_start_index; 3182 last_idx = nix_get_last_mce_list_index(elem); 3183 hlist_for_each_entry(mce, &mce_list->head, node) { 3184 if (idx > last_idx) 3185 break; 3186 3187 if (!mce->is_active) { 3188 if (idx == elem->mce_start_index) { 3189 idx++; 3190 prev_mce = mce; 3191 elem->mce_start_index = idx; 3192 continue; 3193 } else if (idx == last_idx) { 3194 regval = (next_idx << 16) | (1 << 12) | prev_mce->channel; 3195 rvu_write64(rvu, nix_hw->blkaddr, 3196 NIX_AF_TX_MCASTX(idx - 1), 3197 regval); 3198 break; 3199 } 3200 } 3201 3202 eol = 0; 3203 next_idx = idx + 1; 3204 /* EOL should be set in last MCE */ 3205 if (next_idx > last_idx) 3206 eol = 1; 3207 3208 regval = (next_idx << 16) | (eol << 12) | mce->channel; 3209 rvu_write64(rvu, nix_hw->blkaddr, 3210 NIX_AF_TX_MCASTX(idx), 3211 regval); 3212 idx++; 3213 prev_mce = mce; 3214 } 3215 } 3216 3217 static int nix_del_mce_list_entry(struct rvu *rvu, 3218 struct nix_hw *nix_hw, 3219 struct nix_mcast_grp_elem *elem, 3220 struct nix_mcast_grp_update_req *req) 3221 { 3222 u32 num_entry = req->num_mce_entry; 3223 struct nix_mce_list *mce_list; 3224 struct mce *mce; 3225 bool is_found; 3226 int i; 3227 3228 mce_list = &elem->mcast_mce_list; 3229 for (i = 0; i < num_entry; i++) { 3230 is_found = false; 3231 hlist_for_each_entry(mce, &mce_list->head, node) { 3232 /* If already exists, then delete */ 3233 if (mce->pcifunc == req->pcifunc[i]) { 3234 hlist_del(&mce->node); 3235 kfree(mce); 3236 mce_list->count--; 3237 is_found = true; 3238 break; 3239 } 3240 } 3241 3242 if (!is_found) 3243 return NIX_AF_ERR_INVALID_MCAST_DEL_REQ; 3244 } 3245 3246 mce_list->max = mce_list->count; 3247 /* Dump the updated list to HW */ 3248 if (elem->dir == NIX_MCAST_INGRESS) 3249 return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem); 3250 3251 nix_update_egress_mce_list_hw(rvu, nix_hw, elem); 3252 return 0; 3253 } 3254 3255 static int nix_add_mce_list_entry(struct rvu *rvu, 3256 struct nix_hw *nix_hw, 3257 struct nix_mcast_grp_elem *elem, 3258 struct nix_mcast_grp_update_req *req) 3259 { 3260 u32 num_entry = req->num_mce_entry; 3261 struct nix_mce_list *mce_list; 3262 struct hlist_node *tmp; 3263 struct mce *mce; 3264 int i; 3265 3266 mce_list = &elem->mcast_mce_list; 3267 for (i = 0; i < num_entry; i++) { 3268 mce = kzalloc(sizeof(*mce), GFP_KERNEL); 3269 if (!mce) 3270 goto free_mce; 3271 3272 mce->pcifunc = req->pcifunc[i]; 3273 mce->channel = req->channel[i]; 3274 mce->rq_rss_index = req->rq_rss_index[i]; 3275 mce->dest_type = req->dest_type[i]; 3276 mce->is_active = 1; 3277 hlist_add_head(&mce->node, &mce_list->head); 3278 mce_list->count++; 3279 } 3280 3281 mce_list->max += num_entry; 3282 3283 /* Dump the updated list to HW */ 3284 if (elem->dir == NIX_MCAST_INGRESS) 3285 return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem); 3286 3287 nix_update_egress_mce_list_hw(rvu, nix_hw, elem); 3288 return 0; 3289 3290 free_mce: 3291 hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) { 3292 hlist_del(&mce->node); 3293 kfree(mce); 3294 mce_list->count--; 3295 } 3296 3297 return -ENOMEM; 3298 } 3299 3300 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list, 3301 u16 pcifunc, bool add) 3302 { 3303 struct mce *mce, *tail = NULL; 3304 bool delete = false; 3305 3306 /* Scan through the current list */ 3307 hlist_for_each_entry(mce, &mce_list->head, node) { 3308 /* If already exists, then delete */ 3309 if (mce->pcifunc == pcifunc && !add) { 3310 delete = true; 3311 break; 3312 } else if (mce->pcifunc == pcifunc && add) { 3313 /* entry already exists */ 3314 return 0; 3315 } 3316 tail = mce; 3317 } 3318 3319 if (delete) { 3320 hlist_del(&mce->node); 3321 kfree(mce); 3322 mce_list->count--; 3323 return 0; 3324 } 3325 3326 if (!add) 3327 return 0; 3328 3329 /* Add a new one to the list, at the tail */ 3330 mce = kzalloc(sizeof(*mce), GFP_KERNEL); 3331 if (!mce) 3332 return -ENOMEM; 3333 mce->pcifunc = pcifunc; 3334 if (!tail) 3335 hlist_add_head(&mce->node, &mce_list->head); 3336 else 3337 hlist_add_behind(&mce->node, &tail->node); 3338 mce_list->count++; 3339 return 0; 3340 } 3341 3342 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc, 3343 struct nix_mce_list *mce_list, 3344 int mce_idx, int mcam_index, bool add) 3345 { 3346 int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr; 3347 struct npc_mcam *mcam = &rvu->hw->mcam; 3348 struct nix_mcast *mcast; 3349 struct nix_hw *nix_hw; 3350 struct mce *mce; 3351 3352 if (!mce_list) 3353 return -EINVAL; 3354 3355 /* Get this PF/VF func's MCE index */ 3356 idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK); 3357 3358 if (idx > (mce_idx + mce_list->max)) { 3359 dev_err(rvu->dev, 3360 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n", 3361 __func__, idx, mce_list->max, 3362 pcifunc >> RVU_PFVF_PF_SHIFT); 3363 return -EINVAL; 3364 } 3365 3366 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 3367 if (err) 3368 return err; 3369 3370 mcast = &nix_hw->mcast; 3371 mutex_lock(&mcast->mce_lock); 3372 3373 err = nix_update_mce_list_entry(mce_list, pcifunc, add); 3374 if (err) 3375 goto end; 3376 3377 /* Disable MCAM entry in NPC */ 3378 if (!mce_list->count) { 3379 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3380 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false); 3381 goto end; 3382 } 3383 3384 /* Dump the updated list to HW */ 3385 idx = mce_idx; 3386 last_idx = idx + mce_list->count - 1; 3387 hlist_for_each_entry(mce, &mce_list->head, node) { 3388 if (idx > last_idx) 3389 break; 3390 3391 next_idx = idx + 1; 3392 /* EOL should be set in last MCE */ 3393 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE, 3394 mce->pcifunc, next_idx, 3395 0, 1, 3396 (next_idx > last_idx) ? true : false); 3397 if (err) 3398 goto end; 3399 idx++; 3400 } 3401 3402 end: 3403 mutex_unlock(&mcast->mce_lock); 3404 return err; 3405 } 3406 3407 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type, 3408 struct nix_mce_list **mce_list, int *mce_idx) 3409 { 3410 struct rvu_hwinfo *hw = rvu->hw; 3411 struct rvu_pfvf *pfvf; 3412 3413 if (!hw->cap.nix_rx_multicast || 3414 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) { 3415 *mce_list = NULL; 3416 *mce_idx = 0; 3417 return; 3418 } 3419 3420 /* Get this PF/VF func's MCE index */ 3421 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); 3422 3423 if (type == NIXLF_BCAST_ENTRY) { 3424 *mce_list = &pfvf->bcast_mce_list; 3425 *mce_idx = pfvf->bcast_mce_idx; 3426 } else if (type == NIXLF_ALLMULTI_ENTRY) { 3427 *mce_list = &pfvf->mcast_mce_list; 3428 *mce_idx = pfvf->mcast_mce_idx; 3429 } else if (type == NIXLF_PROMISC_ENTRY) { 3430 *mce_list = &pfvf->promisc_mce_list; 3431 *mce_idx = pfvf->promisc_mce_idx; 3432 } else { 3433 *mce_list = NULL; 3434 *mce_idx = 0; 3435 } 3436 } 3437 3438 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, 3439 int type, bool add) 3440 { 3441 int err = 0, nixlf, blkaddr, mcam_index, mce_idx; 3442 struct npc_mcam *mcam = &rvu->hw->mcam; 3443 struct rvu_hwinfo *hw = rvu->hw; 3444 struct nix_mce_list *mce_list; 3445 int pf; 3446 3447 /* skip multicast pkt replication for AF's VFs & SDP links */ 3448 if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(pcifunc)) 3449 return 0; 3450 3451 if (!hw->cap.nix_rx_multicast) 3452 return 0; 3453 3454 pf = rvu_get_pf(pcifunc); 3455 if (!is_pf_cgxmapped(rvu, pf)) 3456 return 0; 3457 3458 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3459 if (blkaddr < 0) 3460 return -EINVAL; 3461 3462 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 3463 if (nixlf < 0) 3464 return -EINVAL; 3465 3466 nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx); 3467 3468 mcam_index = npc_get_nixlf_mcam_index(mcam, 3469 pcifunc & ~RVU_PFVF_FUNC_MASK, 3470 nixlf, type); 3471 err = nix_update_mce_list(rvu, pcifunc, mce_list, 3472 mce_idx, mcam_index, add); 3473 return err; 3474 } 3475 3476 static void nix_setup_mcast_grp(struct nix_hw *nix_hw) 3477 { 3478 struct nix_mcast_grp *mcast_grp = &nix_hw->mcast_grp; 3479 3480 INIT_LIST_HEAD(&mcast_grp->mcast_grp_head); 3481 mutex_init(&mcast_grp->mcast_grp_lock); 3482 mcast_grp->next_grp_index = 1; 3483 mcast_grp->count = 0; 3484 } 3485 3486 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw) 3487 { 3488 struct nix_mcast *mcast = &nix_hw->mcast; 3489 int err, pf, numvfs, idx; 3490 struct rvu_pfvf *pfvf; 3491 u16 pcifunc; 3492 u64 cfg; 3493 3494 /* Skip PF0 (i.e AF) */ 3495 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) { 3496 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 3497 /* If PF is not enabled, nothing to do */ 3498 if (!((cfg >> 20) & 0x01)) 3499 continue; 3500 /* Get numVFs attached to this PF */ 3501 numvfs = (cfg >> 12) & 0xFF; 3502 3503 pfvf = &rvu->pf[pf]; 3504 3505 /* This NIX0/1 block mapped to PF ? */ 3506 if (pfvf->nix_blkaddr != nix_hw->blkaddr) 3507 continue; 3508 3509 /* save start idx of broadcast mce list */ 3510 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS); 3511 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1); 3512 3513 /* save start idx of multicast mce list */ 3514 pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS); 3515 nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1); 3516 3517 /* save the start idx of promisc mce list */ 3518 pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS); 3519 nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1); 3520 3521 for (idx = 0; idx < (numvfs + 1); idx++) { 3522 /* idx-0 is for PF, followed by VFs */ 3523 pcifunc = (pf << RVU_PFVF_PF_SHIFT); 3524 pcifunc |= idx; 3525 /* Add dummy entries now, so that we don't have to check 3526 * for whether AQ_OP should be INIT/WRITE later on. 3527 * Will be updated when a NIXLF is attached/detached to 3528 * these PF/VFs. 3529 */ 3530 err = nix_blk_setup_mce(rvu, nix_hw, 3531 pfvf->bcast_mce_idx + idx, 3532 NIX_AQ_INSTOP_INIT, 3533 pcifunc, 0, 0, 1, true); 3534 if (err) 3535 return err; 3536 3537 /* add dummy entries to multicast mce list */ 3538 err = nix_blk_setup_mce(rvu, nix_hw, 3539 pfvf->mcast_mce_idx + idx, 3540 NIX_AQ_INSTOP_INIT, 3541 pcifunc, 0, 0, 1, true); 3542 if (err) 3543 return err; 3544 3545 /* add dummy entries to promisc mce list */ 3546 err = nix_blk_setup_mce(rvu, nix_hw, 3547 pfvf->promisc_mce_idx + idx, 3548 NIX_AQ_INSTOP_INIT, 3549 pcifunc, 0, 0, 1, true); 3550 if (err) 3551 return err; 3552 } 3553 } 3554 return 0; 3555 } 3556 3557 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 3558 { 3559 struct nix_mcast *mcast = &nix_hw->mcast; 3560 struct rvu_hwinfo *hw = rvu->hw; 3561 int err, size; 3562 3563 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F; 3564 size = BIT_ULL(size); 3565 3566 /* Allocate bitmap for rx mce entries */ 3567 mcast->mce_counter[NIX_MCAST_INGRESS].max = 256UL << MC_TBL_SIZE; 3568 err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]); 3569 if (err) 3570 return -ENOMEM; 3571 3572 /* Allocate bitmap for tx mce entries */ 3573 mcast->mce_counter[NIX_MCAST_EGRESS].max = MC_TX_MAX; 3574 err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]); 3575 if (err) { 3576 rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]); 3577 return -ENOMEM; 3578 } 3579 3580 /* Alloc memory for multicast/mirror replication entries */ 3581 err = qmem_alloc(rvu->dev, &mcast->mce_ctx, 3582 mcast->mce_counter[NIX_MCAST_INGRESS].max, size); 3583 if (err) { 3584 rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]); 3585 rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]); 3586 return -ENOMEM; 3587 } 3588 3589 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE, 3590 (u64)mcast->mce_ctx->iova); 3591 3592 /* Set max list length equal to max no of VFs per PF + PF itself */ 3593 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG, 3594 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE); 3595 3596 /* Alloc memory for multicast replication buffers */ 3597 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF; 3598 err = qmem_alloc(rvu->dev, &mcast->mcast_buf, 3599 (8UL << MC_BUF_CNT), size); 3600 if (err) { 3601 rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]); 3602 rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]); 3603 return -ENOMEM; 3604 } 3605 3606 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE, 3607 (u64)mcast->mcast_buf->iova); 3608 3609 /* Alloc pkind for NIX internal RX multicast/mirror replay */ 3610 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc); 3611 3612 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG, 3613 BIT_ULL(63) | (mcast->replay_pkind << 24) | 3614 BIT_ULL(20) | MC_BUF_CNT); 3615 3616 mutex_init(&mcast->mce_lock); 3617 3618 nix_setup_mcast_grp(nix_hw); 3619 3620 return nix_setup_mce_tables(rvu, nix_hw); 3621 } 3622 3623 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw) 3624 { 3625 struct nix_txvlan *vlan = &nix_hw->txvlan; 3626 int err; 3627 3628 /* Allocate resource bimap for tx vtag def registers*/ 3629 vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX; 3630 err = rvu_alloc_bitmap(&vlan->rsrc); 3631 if (err) 3632 return -ENOMEM; 3633 3634 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ 3635 vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max, 3636 sizeof(u16), GFP_KERNEL); 3637 if (!vlan->entry2pfvf_map) 3638 goto free_mem; 3639 3640 mutex_init(&vlan->rsrc_lock); 3641 return 0; 3642 3643 free_mem: 3644 kfree(vlan->rsrc.bmap); 3645 return -ENOMEM; 3646 } 3647 3648 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 3649 { 3650 struct nix_txsch *txsch; 3651 int err, lvl, schq; 3652 u64 cfg, reg; 3653 3654 /* Get scheduler queue count of each type and alloc 3655 * bitmap for each for alloc/free/attach operations. 3656 */ 3657 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 3658 txsch = &nix_hw->txsch[lvl]; 3659 txsch->lvl = lvl; 3660 switch (lvl) { 3661 case NIX_TXSCH_LVL_SMQ: 3662 reg = NIX_AF_MDQ_CONST; 3663 break; 3664 case NIX_TXSCH_LVL_TL4: 3665 reg = NIX_AF_TL4_CONST; 3666 break; 3667 case NIX_TXSCH_LVL_TL3: 3668 reg = NIX_AF_TL3_CONST; 3669 break; 3670 case NIX_TXSCH_LVL_TL2: 3671 reg = NIX_AF_TL2_CONST; 3672 break; 3673 case NIX_TXSCH_LVL_TL1: 3674 reg = NIX_AF_TL1_CONST; 3675 break; 3676 } 3677 cfg = rvu_read64(rvu, blkaddr, reg); 3678 txsch->schq.max = cfg & 0xFFFF; 3679 err = rvu_alloc_bitmap(&txsch->schq); 3680 if (err) 3681 return err; 3682 3683 /* Allocate memory for scheduler queues to 3684 * PF/VF pcifunc mapping info. 3685 */ 3686 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max, 3687 sizeof(u32), GFP_KERNEL); 3688 if (!txsch->pfvf_map) 3689 return -ENOMEM; 3690 for (schq = 0; schq < txsch->schq.max; schq++) 3691 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 3692 } 3693 3694 /* Setup a default value of 8192 as DWRR MTU */ 3695 if (rvu->hw->cap.nix_common_dwrr_mtu || 3696 rvu->hw->cap.nix_multiple_dwrr_mtu) { 3697 rvu_write64(rvu, blkaddr, 3698 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM), 3699 convert_bytes_to_dwrr_mtu(8192)); 3700 rvu_write64(rvu, blkaddr, 3701 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK), 3702 convert_bytes_to_dwrr_mtu(8192)); 3703 rvu_write64(rvu, blkaddr, 3704 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP), 3705 convert_bytes_to_dwrr_mtu(8192)); 3706 } 3707 3708 return 0; 3709 } 3710 3711 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw, 3712 int blkaddr, u32 cfg) 3713 { 3714 int fmt_idx; 3715 3716 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) { 3717 if (nix_hw->mark_format.cfg[fmt_idx] == cfg) 3718 return fmt_idx; 3719 } 3720 if (fmt_idx >= nix_hw->mark_format.total) 3721 return -ERANGE; 3722 3723 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg); 3724 nix_hw->mark_format.cfg[fmt_idx] = cfg; 3725 nix_hw->mark_format.in_use++; 3726 return fmt_idx; 3727 } 3728 3729 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw, 3730 int blkaddr) 3731 { 3732 u64 cfgs[] = { 3733 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003, 3734 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200, 3735 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203, 3736 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c, 3737 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00, 3738 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c, 3739 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008, 3740 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800, 3741 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808, 3742 }; 3743 int i, rc; 3744 u64 total; 3745 3746 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8; 3747 nix_hw->mark_format.total = (u8)total; 3748 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32), 3749 GFP_KERNEL); 3750 if (!nix_hw->mark_format.cfg) 3751 return -ENOMEM; 3752 for (i = 0; i < NIX_MARK_CFG_MAX; i++) { 3753 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]); 3754 if (rc < 0) 3755 dev_err(rvu->dev, "Err %d in setup mark format %d\n", 3756 i, rc); 3757 } 3758 3759 return 0; 3760 } 3761 3762 static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu) 3763 { 3764 /* CN10K supports LBK FIFO size 72 KB */ 3765 if (rvu->hw->lbk_bufsize == 0x12000) 3766 *max_mtu = CN10K_LBK_LINK_MAX_FRS; 3767 else 3768 *max_mtu = NIC_HW_MAX_FRS; 3769 } 3770 3771 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu) 3772 { 3773 int fifo_size = rvu_cgx_get_fifolen(rvu); 3774 3775 /* RPM supports FIFO len 128 KB and RPM2 supports double the 3776 * FIFO len to accommodate 8 LMACS 3777 */ 3778 if (fifo_size == 0x20000 || fifo_size == 0x40000) 3779 *max_mtu = CN10K_LMAC_LINK_MAX_FRS; 3780 else 3781 *max_mtu = NIC_HW_MAX_FRS; 3782 } 3783 3784 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req, 3785 struct nix_hw_info *rsp) 3786 { 3787 u16 pcifunc = req->hdr.pcifunc; 3788 u64 dwrr_mtu; 3789 int blkaddr; 3790 3791 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3792 if (blkaddr < 0) 3793 return NIX_AF_ERR_AF_LF_INVALID; 3794 3795 if (is_lbk_vf(rvu, pcifunc)) 3796 rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu); 3797 else 3798 rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu); 3799 3800 rsp->min_mtu = NIC_HW_MIN_FRS; 3801 3802 if (!rvu->hw->cap.nix_common_dwrr_mtu && 3803 !rvu->hw->cap.nix_multiple_dwrr_mtu) { 3804 /* Return '1' on OTx2 */ 3805 rsp->rpm_dwrr_mtu = 1; 3806 rsp->sdp_dwrr_mtu = 1; 3807 rsp->lbk_dwrr_mtu = 1; 3808 return 0; 3809 } 3810 3811 /* Return DWRR_MTU for TLx_SCHEDULE[RR_WEIGHT] config */ 3812 dwrr_mtu = rvu_read64(rvu, blkaddr, 3813 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM)); 3814 rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); 3815 3816 dwrr_mtu = rvu_read64(rvu, blkaddr, 3817 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP)); 3818 rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); 3819 3820 dwrr_mtu = rvu_read64(rvu, blkaddr, 3821 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK)); 3822 rsp->lbk_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); 3823 3824 return 0; 3825 } 3826 3827 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, 3828 struct msg_rsp *rsp) 3829 { 3830 u16 pcifunc = req->hdr.pcifunc; 3831 int i, nixlf, blkaddr, err; 3832 u64 stats; 3833 3834 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3835 if (err) 3836 return err; 3837 3838 /* Get stats count supported by HW */ 3839 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 3840 3841 /* Reset tx stats */ 3842 for (i = 0; i < ((stats >> 24) & 0xFF); i++) 3843 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0); 3844 3845 /* Reset rx stats */ 3846 for (i = 0; i < ((stats >> 32) & 0xFF); i++) 3847 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0); 3848 3849 return 0; 3850 } 3851 3852 /* Returns the ALG index to be set into NPC_RX_ACTION */ 3853 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg) 3854 { 3855 int i; 3856 3857 /* Scan over exiting algo entries to find a match */ 3858 for (i = 0; i < nix_hw->flowkey.in_use; i++) 3859 if (nix_hw->flowkey.flowkey[i] == flow_cfg) 3860 return i; 3861 3862 return -ERANGE; 3863 } 3864 3865 /* Mask to match ipv6(NPC_LT_LC_IP6) and ipv6 ext(NPC_LT_LC_IP6_EXT) */ 3866 #define NPC_LT_LC_IP6_MATCH_MSK ((~(NPC_LT_LC_IP6 ^ NPC_LT_LC_IP6_EXT)) & 0xf) 3867 /* Mask to match both ipv4(NPC_LT_LC_IP) and ipv4 ext(NPC_LT_LC_IP_OPT) */ 3868 #define NPC_LT_LC_IP_MATCH_MSK ((~(NPC_LT_LC_IP ^ NPC_LT_LC_IP_OPT)) & 0xf) 3869 3870 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) 3871 { 3872 int idx, nr_field, key_off, field_marker, keyoff_marker; 3873 int max_key_off, max_bit_pos, group_member; 3874 struct nix_rx_flowkey_alg *field; 3875 struct nix_rx_flowkey_alg tmp; 3876 u32 key_type, valid_key; 3877 u32 l3_l4_src_dst; 3878 int l4_key_offset = 0; 3879 3880 if (!alg) 3881 return -EINVAL; 3882 3883 #define FIELDS_PER_ALG 5 3884 #define MAX_KEY_OFF 40 3885 /* Clear all fields */ 3886 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG); 3887 3888 /* Each of the 32 possible flow key algorithm definitions should 3889 * fall into above incremental config (except ALG0). Otherwise a 3890 * single NPC MCAM entry is not sufficient for supporting RSS. 3891 * 3892 * If a different definition or combination needed then NPC MCAM 3893 * has to be programmed to filter such pkts and it's action should 3894 * point to this definition to calculate flowtag or hash. 3895 * 3896 * The `for loop` goes over _all_ protocol field and the following 3897 * variables depicts the state machine forward progress logic. 3898 * 3899 * keyoff_marker - Enabled when hash byte length needs to be accounted 3900 * in field->key_offset update. 3901 * field_marker - Enabled when a new field needs to be selected. 3902 * group_member - Enabled when protocol is part of a group. 3903 */ 3904 3905 /* Last 4 bits (31:28) are reserved to specify SRC, DST 3906 * selection for L3, L4 i.e IPV[4,6]_SRC, IPV[4,6]_DST, 3907 * [TCP,UDP,SCTP]_SRC, [TCP,UDP,SCTP]_DST 3908 * 31 => L3_SRC, 30 => L3_DST, 29 => L4_SRC, 28 => L4_DST 3909 */ 3910 l3_l4_src_dst = flow_cfg; 3911 /* Reset these 4 bits, so that these won't be part of key */ 3912 flow_cfg &= NIX_FLOW_KEY_TYPE_L3_L4_MASK; 3913 3914 keyoff_marker = 0; max_key_off = 0; group_member = 0; 3915 nr_field = 0; key_off = 0; field_marker = 1; 3916 field = &tmp; max_bit_pos = fls(flow_cfg); 3917 for (idx = 0; 3918 idx < max_bit_pos && nr_field < FIELDS_PER_ALG && 3919 key_off < MAX_KEY_OFF; idx++) { 3920 key_type = BIT(idx); 3921 valid_key = flow_cfg & key_type; 3922 /* Found a field marker, reset the field values */ 3923 if (field_marker) 3924 memset(&tmp, 0, sizeof(tmp)); 3925 3926 field_marker = true; 3927 keyoff_marker = true; 3928 switch (key_type) { 3929 case NIX_FLOW_KEY_TYPE_PORT: 3930 field->sel_chan = true; 3931 /* This should be set to 1, when SEL_CHAN is set */ 3932 field->bytesm1 = 1; 3933 break; 3934 case NIX_FLOW_KEY_TYPE_IPV4_PROTO: 3935 field->lid = NPC_LID_LC; 3936 field->hdr_offset = 9; /* offset */ 3937 field->bytesm1 = 0; /* 1 byte */ 3938 field->ltype_match = NPC_LT_LC_IP; 3939 field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK; 3940 break; 3941 case NIX_FLOW_KEY_TYPE_IPV4: 3942 case NIX_FLOW_KEY_TYPE_INNR_IPV4: 3943 field->lid = NPC_LID_LC; 3944 field->ltype_match = NPC_LT_LC_IP; 3945 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) { 3946 field->lid = NPC_LID_LG; 3947 field->ltype_match = NPC_LT_LG_TU_IP; 3948 } 3949 field->hdr_offset = 12; /* SIP offset */ 3950 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */ 3951 3952 /* Only SIP */ 3953 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY) 3954 field->bytesm1 = 3; /* SIP, 4 bytes */ 3955 3956 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) { 3957 /* Both SIP + DIP */ 3958 if (field->bytesm1 == 3) { 3959 field->bytesm1 = 7; /* SIP + DIP, 8B */ 3960 } else { 3961 /* Only DIP */ 3962 field->hdr_offset = 16; /* DIP off */ 3963 field->bytesm1 = 3; /* DIP, 4 bytes */ 3964 } 3965 } 3966 field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK; 3967 keyoff_marker = false; 3968 break; 3969 case NIX_FLOW_KEY_TYPE_IPV6: 3970 case NIX_FLOW_KEY_TYPE_INNR_IPV6: 3971 field->lid = NPC_LID_LC; 3972 field->ltype_match = NPC_LT_LC_IP6; 3973 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) { 3974 field->lid = NPC_LID_LG; 3975 field->ltype_match = NPC_LT_LG_TU_IP6; 3976 } 3977 field->hdr_offset = 8; /* SIP offset */ 3978 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */ 3979 3980 /* Only SIP */ 3981 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY) 3982 field->bytesm1 = 15; /* SIP, 16 bytes */ 3983 3984 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) { 3985 /* Both SIP + DIP */ 3986 if (field->bytesm1 == 15) { 3987 /* SIP + DIP, 32 bytes */ 3988 field->bytesm1 = 31; 3989 } else { 3990 /* Only DIP */ 3991 field->hdr_offset = 24; /* DIP off */ 3992 field->bytesm1 = 15; /* DIP,16 bytes */ 3993 } 3994 } 3995 field->ltype_mask = NPC_LT_LC_IP6_MATCH_MSK; 3996 break; 3997 case NIX_FLOW_KEY_TYPE_TCP: 3998 case NIX_FLOW_KEY_TYPE_UDP: 3999 case NIX_FLOW_KEY_TYPE_SCTP: 4000 case NIX_FLOW_KEY_TYPE_INNR_TCP: 4001 case NIX_FLOW_KEY_TYPE_INNR_UDP: 4002 case NIX_FLOW_KEY_TYPE_INNR_SCTP: 4003 field->lid = NPC_LID_LD; 4004 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP || 4005 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP || 4006 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) 4007 field->lid = NPC_LID_LH; 4008 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */ 4009 4010 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_SRC_ONLY) 4011 field->bytesm1 = 1; /* SRC, 2 bytes */ 4012 4013 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_DST_ONLY) { 4014 /* Both SRC + DST */ 4015 if (field->bytesm1 == 1) { 4016 /* SRC + DST, 4 bytes */ 4017 field->bytesm1 = 3; 4018 } else { 4019 /* Only DIP */ 4020 field->hdr_offset = 2; /* DST off */ 4021 field->bytesm1 = 1; /* DST, 2 bytes */ 4022 } 4023 } 4024 4025 /* Enum values for NPC_LID_LD and NPC_LID_LG are same, 4026 * so no need to change the ltype_match, just change 4027 * the lid for inner protocols 4028 */ 4029 BUILD_BUG_ON((int)NPC_LT_LD_TCP != 4030 (int)NPC_LT_LH_TU_TCP); 4031 BUILD_BUG_ON((int)NPC_LT_LD_UDP != 4032 (int)NPC_LT_LH_TU_UDP); 4033 BUILD_BUG_ON((int)NPC_LT_LD_SCTP != 4034 (int)NPC_LT_LH_TU_SCTP); 4035 4036 if ((key_type == NIX_FLOW_KEY_TYPE_TCP || 4037 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) && 4038 valid_key) { 4039 field->ltype_match |= NPC_LT_LD_TCP; 4040 group_member = true; 4041 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP || 4042 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) && 4043 valid_key) { 4044 field->ltype_match |= NPC_LT_LD_UDP; 4045 group_member = true; 4046 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP || 4047 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) && 4048 valid_key) { 4049 field->ltype_match |= NPC_LT_LD_SCTP; 4050 group_member = true; 4051 } 4052 field->ltype_mask = ~field->ltype_match; 4053 if (key_type == NIX_FLOW_KEY_TYPE_SCTP || 4054 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) { 4055 /* Handle the case where any of the group item 4056 * is enabled in the group but not the final one 4057 */ 4058 if (group_member) { 4059 valid_key = true; 4060 group_member = false; 4061 } 4062 } else { 4063 field_marker = false; 4064 keyoff_marker = false; 4065 } 4066 4067 /* TCP/UDP/SCTP and ESP/AH falls at same offset so 4068 * remember the TCP key offset of 40 byte hash key. 4069 */ 4070 if (key_type == NIX_FLOW_KEY_TYPE_TCP) 4071 l4_key_offset = key_off; 4072 break; 4073 case NIX_FLOW_KEY_TYPE_NVGRE: 4074 field->lid = NPC_LID_LD; 4075 field->hdr_offset = 4; /* VSID offset */ 4076 field->bytesm1 = 2; 4077 field->ltype_match = NPC_LT_LD_NVGRE; 4078 field->ltype_mask = 0xF; 4079 break; 4080 case NIX_FLOW_KEY_TYPE_VXLAN: 4081 case NIX_FLOW_KEY_TYPE_GENEVE: 4082 field->lid = NPC_LID_LE; 4083 field->bytesm1 = 2; 4084 field->hdr_offset = 4; 4085 field->ltype_mask = 0xF; 4086 field_marker = false; 4087 keyoff_marker = false; 4088 4089 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) { 4090 field->ltype_match |= NPC_LT_LE_VXLAN; 4091 group_member = true; 4092 } 4093 4094 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) { 4095 field->ltype_match |= NPC_LT_LE_GENEVE; 4096 group_member = true; 4097 } 4098 4099 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) { 4100 if (group_member) { 4101 field->ltype_mask = ~field->ltype_match; 4102 field_marker = true; 4103 keyoff_marker = true; 4104 valid_key = true; 4105 group_member = false; 4106 } 4107 } 4108 break; 4109 case NIX_FLOW_KEY_TYPE_ETH_DMAC: 4110 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC: 4111 field->lid = NPC_LID_LA; 4112 field->ltype_match = NPC_LT_LA_ETHER; 4113 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) { 4114 field->lid = NPC_LID_LF; 4115 field->ltype_match = NPC_LT_LF_TU_ETHER; 4116 } 4117 field->hdr_offset = 0; 4118 field->bytesm1 = 5; /* DMAC 6 Byte */ 4119 field->ltype_mask = 0xF; 4120 break; 4121 case NIX_FLOW_KEY_TYPE_IPV6_EXT: 4122 field->lid = NPC_LID_LC; 4123 field->hdr_offset = 40; /* IPV6 hdr */ 4124 field->bytesm1 = 0; /* 1 Byte ext hdr*/ 4125 field->ltype_match = NPC_LT_LC_IP6_EXT; 4126 field->ltype_mask = 0xF; 4127 break; 4128 case NIX_FLOW_KEY_TYPE_GTPU: 4129 field->lid = NPC_LID_LE; 4130 field->hdr_offset = 4; 4131 field->bytesm1 = 3; /* 4 bytes TID*/ 4132 field->ltype_match = NPC_LT_LE_GTPU; 4133 field->ltype_mask = 0xF; 4134 break; 4135 case NIX_FLOW_KEY_TYPE_CUSTOM0: 4136 field->lid = NPC_LID_LC; 4137 field->hdr_offset = 6; 4138 field->bytesm1 = 1; /* 2 Bytes*/ 4139 field->ltype_match = NPC_LT_LC_CUSTOM0; 4140 field->ltype_mask = 0xF; 4141 break; 4142 case NIX_FLOW_KEY_TYPE_VLAN: 4143 field->lid = NPC_LID_LB; 4144 field->hdr_offset = 2; /* Skip TPID (2-bytes) */ 4145 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */ 4146 field->ltype_match = NPC_LT_LB_CTAG; 4147 field->ltype_mask = 0xF; 4148 field->fn_mask = 1; /* Mask out the first nibble */ 4149 break; 4150 case NIX_FLOW_KEY_TYPE_AH: 4151 case NIX_FLOW_KEY_TYPE_ESP: 4152 field->hdr_offset = 0; 4153 field->bytesm1 = 7; /* SPI + sequence number */ 4154 field->ltype_mask = 0xF; 4155 field->lid = NPC_LID_LE; 4156 field->ltype_match = NPC_LT_LE_ESP; 4157 if (key_type == NIX_FLOW_KEY_TYPE_AH) { 4158 field->lid = NPC_LID_LD; 4159 field->ltype_match = NPC_LT_LD_AH; 4160 field->hdr_offset = 4; 4161 keyoff_marker = false; 4162 } 4163 break; 4164 } 4165 field->ena = 1; 4166 4167 /* Found a valid flow key type */ 4168 if (valid_key) { 4169 /* Use the key offset of TCP/UDP/SCTP fields 4170 * for ESP/AH fields. 4171 */ 4172 if (key_type == NIX_FLOW_KEY_TYPE_ESP || 4173 key_type == NIX_FLOW_KEY_TYPE_AH) 4174 key_off = l4_key_offset; 4175 field->key_offset = key_off; 4176 memcpy(&alg[nr_field], field, sizeof(*field)); 4177 max_key_off = max(max_key_off, field->bytesm1 + 1); 4178 4179 /* Found a field marker, get the next field */ 4180 if (field_marker) 4181 nr_field++; 4182 } 4183 4184 /* Found a keyoff marker, update the new key_off */ 4185 if (keyoff_marker) { 4186 key_off += max_key_off; 4187 max_key_off = 0; 4188 } 4189 } 4190 /* Processed all the flow key types */ 4191 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF) 4192 return 0; 4193 else 4194 return NIX_AF_ERR_RSS_NOSPC_FIELD; 4195 } 4196 4197 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg) 4198 { 4199 u64 field[FIELDS_PER_ALG]; 4200 struct nix_hw *hw; 4201 int fid, rc; 4202 4203 hw = get_nix_hw(rvu->hw, blkaddr); 4204 if (!hw) 4205 return NIX_AF_ERR_INVALID_NIXBLK; 4206 4207 /* No room to add new flow hash algoritham */ 4208 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX) 4209 return NIX_AF_ERR_RSS_NOSPC_ALGO; 4210 4211 /* Generate algo fields for the given flow_cfg */ 4212 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg); 4213 if (rc) 4214 return rc; 4215 4216 /* Update ALGX_FIELDX register with generated fields */ 4217 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 4218 rvu_write64(rvu, blkaddr, 4219 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use, 4220 fid), field[fid]); 4221 4222 /* Store the flow_cfg for futher lookup */ 4223 rc = hw->flowkey.in_use; 4224 hw->flowkey.flowkey[rc] = flow_cfg; 4225 hw->flowkey.in_use++; 4226 4227 return rc; 4228 } 4229 4230 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, 4231 struct nix_rss_flowkey_cfg *req, 4232 struct nix_rss_flowkey_cfg_rsp *rsp) 4233 { 4234 u16 pcifunc = req->hdr.pcifunc; 4235 int alg_idx, nixlf, blkaddr; 4236 struct nix_hw *nix_hw; 4237 int err; 4238 4239 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 4240 if (err) 4241 return err; 4242 4243 nix_hw = get_nix_hw(rvu->hw, blkaddr); 4244 if (!nix_hw) 4245 return NIX_AF_ERR_INVALID_NIXBLK; 4246 4247 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg); 4248 /* Failed to get algo index from the exiting list, reserve new */ 4249 if (alg_idx < 0) { 4250 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr, 4251 req->flowkey_cfg); 4252 if (alg_idx < 0) 4253 return alg_idx; 4254 } 4255 rsp->alg_idx = alg_idx; 4256 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group, 4257 alg_idx, req->mcam_index); 4258 return 0; 4259 } 4260 4261 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr) 4262 { 4263 u32 flowkey_cfg, minkey_cfg; 4264 int alg, fid, rc; 4265 4266 /* Disable all flow key algx fieldx */ 4267 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) { 4268 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 4269 rvu_write64(rvu, blkaddr, 4270 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid), 4271 0); 4272 } 4273 4274 /* IPv4/IPv6 SIP/DIPs */ 4275 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6; 4276 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 4277 if (rc < 0) 4278 return rc; 4279 4280 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 4281 minkey_cfg = flowkey_cfg; 4282 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP; 4283 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 4284 if (rc < 0) 4285 return rc; 4286 4287 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 4288 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP; 4289 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 4290 if (rc < 0) 4291 return rc; 4292 4293 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 4294 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP; 4295 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 4296 if (rc < 0) 4297 return rc; 4298 4299 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */ 4300 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 4301 NIX_FLOW_KEY_TYPE_UDP; 4302 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 4303 if (rc < 0) 4304 return rc; 4305 4306 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 4307 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 4308 NIX_FLOW_KEY_TYPE_SCTP; 4309 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 4310 if (rc < 0) 4311 return rc; 4312 4313 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 4314 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP | 4315 NIX_FLOW_KEY_TYPE_SCTP; 4316 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 4317 if (rc < 0) 4318 return rc; 4319 4320 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 4321 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 4322 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP; 4323 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 4324 if (rc < 0) 4325 return rc; 4326 4327 return 0; 4328 } 4329 4330 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, 4331 struct nix_set_mac_addr *req, 4332 struct msg_rsp *rsp) 4333 { 4334 bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK; 4335 u16 pcifunc = req->hdr.pcifunc; 4336 int blkaddr, nixlf, err; 4337 struct rvu_pfvf *pfvf; 4338 4339 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 4340 if (err) 4341 return err; 4342 4343 pfvf = rvu_get_pfvf(rvu, pcifunc); 4344 4345 /* untrusted VF can't overwrite admin(PF) changes */ 4346 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && 4347 (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) { 4348 dev_warn(rvu->dev, 4349 "MAC address set by admin(PF) cannot be overwritten by untrusted VF"); 4350 return -EPERM; 4351 } 4352 4353 ether_addr_copy(pfvf->mac_addr, req->mac_addr); 4354 4355 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 4356 pfvf->rx_chan_base, req->mac_addr); 4357 4358 if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf) 4359 ether_addr_copy(pfvf->default_mac, req->mac_addr); 4360 4361 rvu_switch_update_rules(rvu, pcifunc); 4362 4363 return 0; 4364 } 4365 4366 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu, 4367 struct msg_req *req, 4368 struct nix_get_mac_addr_rsp *rsp) 4369 { 4370 u16 pcifunc = req->hdr.pcifunc; 4371 struct rvu_pfvf *pfvf; 4372 4373 if (!is_nixlf_attached(rvu, pcifunc)) 4374 return NIX_AF_ERR_AF_LF_INVALID; 4375 4376 pfvf = rvu_get_pfvf(rvu, pcifunc); 4377 4378 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 4379 4380 return 0; 4381 } 4382 4383 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, 4384 struct msg_rsp *rsp) 4385 { 4386 bool allmulti, promisc, nix_rx_multicast; 4387 u16 pcifunc = req->hdr.pcifunc; 4388 struct rvu_pfvf *pfvf; 4389 int nixlf, err; 4390 4391 pfvf = rvu_get_pfvf(rvu, pcifunc); 4392 promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false; 4393 allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false; 4394 pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false; 4395 4396 nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list; 4397 4398 if (is_vf(pcifunc) && !nix_rx_multicast && 4399 (promisc || allmulti)) { 4400 dev_warn_ratelimited(rvu->dev, 4401 "VF promisc/multicast not supported\n"); 4402 return 0; 4403 } 4404 4405 /* untrusted VF can't configure promisc/allmulti */ 4406 if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && 4407 (promisc || allmulti)) 4408 return 0; 4409 4410 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 4411 if (err) 4412 return err; 4413 4414 if (nix_rx_multicast) { 4415 /* add/del this PF_FUNC to/from mcast pkt replication list */ 4416 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY, 4417 allmulti); 4418 if (err) { 4419 dev_err(rvu->dev, 4420 "Failed to update pcifunc 0x%x to multicast list\n", 4421 pcifunc); 4422 return err; 4423 } 4424 4425 /* add/del this PF_FUNC to/from promisc pkt replication list */ 4426 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY, 4427 promisc); 4428 if (err) { 4429 dev_err(rvu->dev, 4430 "Failed to update pcifunc 0x%x to promisc list\n", 4431 pcifunc); 4432 return err; 4433 } 4434 } 4435 4436 /* install/uninstall allmulti entry */ 4437 if (allmulti) { 4438 rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf, 4439 pfvf->rx_chan_base); 4440 } else { 4441 if (!nix_rx_multicast) 4442 rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false); 4443 } 4444 4445 /* install/uninstall promisc entry */ 4446 if (promisc) 4447 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 4448 pfvf->rx_chan_base, 4449 pfvf->rx_chan_cnt); 4450 else 4451 if (!nix_rx_multicast) 4452 rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false); 4453 4454 return 0; 4455 } 4456 4457 static void nix_find_link_frs(struct rvu *rvu, 4458 struct nix_frs_cfg *req, u16 pcifunc) 4459 { 4460 int pf = rvu_get_pf(pcifunc); 4461 struct rvu_pfvf *pfvf; 4462 int maxlen, minlen; 4463 int numvfs, hwvf; 4464 int vf; 4465 4466 /* Update with requester's min/max lengths */ 4467 pfvf = rvu_get_pfvf(rvu, pcifunc); 4468 pfvf->maxlen = req->maxlen; 4469 if (req->update_minlen) 4470 pfvf->minlen = req->minlen; 4471 4472 maxlen = req->maxlen; 4473 minlen = req->update_minlen ? req->minlen : 0; 4474 4475 /* Get this PF's numVFs and starting hwvf */ 4476 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); 4477 4478 /* For each VF, compare requested max/minlen */ 4479 for (vf = 0; vf < numvfs; vf++) { 4480 pfvf = &rvu->hwvf[hwvf + vf]; 4481 if (pfvf->maxlen > maxlen) 4482 maxlen = pfvf->maxlen; 4483 if (req->update_minlen && 4484 pfvf->minlen && pfvf->minlen < minlen) 4485 minlen = pfvf->minlen; 4486 } 4487 4488 /* Compare requested max/minlen with PF's max/minlen */ 4489 pfvf = &rvu->pf[pf]; 4490 if (pfvf->maxlen > maxlen) 4491 maxlen = pfvf->maxlen; 4492 if (req->update_minlen && 4493 pfvf->minlen && pfvf->minlen < minlen) 4494 minlen = pfvf->minlen; 4495 4496 /* Update the request with max/min PF's and it's VF's max/min */ 4497 req->maxlen = maxlen; 4498 if (req->update_minlen) 4499 req->minlen = minlen; 4500 } 4501 4502 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, 4503 struct msg_rsp *rsp) 4504 { 4505 struct rvu_hwinfo *hw = rvu->hw; 4506 u16 pcifunc = req->hdr.pcifunc; 4507 int pf = rvu_get_pf(pcifunc); 4508 int blkaddr, link = -1; 4509 struct nix_hw *nix_hw; 4510 struct rvu_pfvf *pfvf; 4511 u8 cgx = 0, lmac = 0; 4512 u16 max_mtu; 4513 u64 cfg; 4514 4515 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 4516 if (blkaddr < 0) 4517 return NIX_AF_ERR_AF_LF_INVALID; 4518 4519 nix_hw = get_nix_hw(rvu->hw, blkaddr); 4520 if (!nix_hw) 4521 return NIX_AF_ERR_INVALID_NIXBLK; 4522 4523 if (is_lbk_vf(rvu, pcifunc)) 4524 rvu_get_lbk_link_max_frs(rvu, &max_mtu); 4525 else 4526 rvu_get_lmac_link_max_frs(rvu, &max_mtu); 4527 4528 if (!req->sdp_link && req->maxlen > max_mtu) 4529 return NIX_AF_ERR_FRS_INVALID; 4530 4531 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS) 4532 return NIX_AF_ERR_FRS_INVALID; 4533 4534 /* Check if config is for SDP link */ 4535 if (req->sdp_link) { 4536 if (!hw->sdp_links) 4537 return NIX_AF_ERR_RX_LINK_INVALID; 4538 link = hw->cgx_links + hw->lbk_links; 4539 goto linkcfg; 4540 } 4541 4542 /* Check if the request is from CGX mapped RVU PF */ 4543 if (is_pf_cgxmapped(rvu, pf)) { 4544 /* Get CGX and LMAC to which this PF is mapped and find link */ 4545 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac); 4546 link = (cgx * hw->lmac_per_cgx) + lmac; 4547 } else if (pf == 0) { 4548 /* For VFs of PF0 ingress is LBK port, so config LBK link */ 4549 pfvf = rvu_get_pfvf(rvu, pcifunc); 4550 link = hw->cgx_links + pfvf->lbkid; 4551 } 4552 4553 if (link < 0) 4554 return NIX_AF_ERR_RX_LINK_INVALID; 4555 4556 linkcfg: 4557 nix_find_link_frs(rvu, req, pcifunc); 4558 4559 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link)); 4560 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16); 4561 if (req->update_minlen) 4562 cfg = (cfg & ~0xFFFFULL) | req->minlen; 4563 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg); 4564 4565 return 0; 4566 } 4567 4568 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, 4569 struct msg_rsp *rsp) 4570 { 4571 int nixlf, blkaddr, err; 4572 u64 cfg; 4573 4574 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr); 4575 if (err) 4576 return err; 4577 4578 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf)); 4579 /* Set the interface configuration */ 4580 if (req->len_verify & BIT(0)) 4581 cfg |= BIT_ULL(41); 4582 else 4583 cfg &= ~BIT_ULL(41); 4584 4585 if (req->len_verify & BIT(1)) 4586 cfg |= BIT_ULL(40); 4587 else 4588 cfg &= ~BIT_ULL(40); 4589 4590 if (req->len_verify & NIX_RX_DROP_RE) 4591 cfg |= BIT_ULL(32); 4592 else 4593 cfg &= ~BIT_ULL(32); 4594 4595 if (req->csum_verify & BIT(0)) 4596 cfg |= BIT_ULL(37); 4597 else 4598 cfg &= ~BIT_ULL(37); 4599 4600 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg); 4601 4602 return 0; 4603 } 4604 4605 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs) 4606 { 4607 return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */ 4608 } 4609 4610 static void nix_link_config(struct rvu *rvu, int blkaddr, 4611 struct nix_hw *nix_hw) 4612 { 4613 struct rvu_hwinfo *hw = rvu->hw; 4614 int cgx, lmac_cnt, slink, link; 4615 u16 lbk_max_frs, lmac_max_frs; 4616 unsigned long lmac_bmap; 4617 u64 tx_credits, cfg; 4618 u64 lmac_fifo_len; 4619 int iter; 4620 4621 rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs); 4622 rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs); 4623 4624 /* Set default min/max packet lengths allowed on NIX Rx links. 4625 * 4626 * With HW reset minlen value of 60byte, HW will treat ARP pkts 4627 * as undersize and report them to SW as error pkts, hence 4628 * setting it to 40 bytes. 4629 */ 4630 for (link = 0; link < hw->cgx_links; link++) { 4631 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 4632 ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS); 4633 } 4634 4635 for (link = hw->cgx_links; link < hw->lbk_links; link++) { 4636 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 4637 ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS); 4638 } 4639 if (hw->sdp_links) { 4640 link = hw->cgx_links + hw->lbk_links; 4641 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 4642 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); 4643 } 4644 4645 /* Get MCS external bypass status for CN10K-B */ 4646 if (mcs_get_blkcnt() == 1) { 4647 /* Adjust for 2 credits when external bypass is disabled */ 4648 nix_hw->cc_mcs_cnt = is_mcs_bypass(0) ? 0 : 2; 4649 } 4650 4651 /* Set credits for Tx links assuming max packet length allowed. 4652 * This will be reconfigured based on MTU set for PF/VF. 4653 */ 4654 for (cgx = 0; cgx < hw->cgx; cgx++) { 4655 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); 4656 /* Skip when cgx is not available or lmac cnt is zero */ 4657 if (lmac_cnt <= 0) 4658 continue; 4659 slink = cgx * hw->lmac_per_cgx; 4660 4661 /* Get LMAC id's from bitmap */ 4662 lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu)); 4663 for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) { 4664 lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter); 4665 if (!lmac_fifo_len) { 4666 dev_err(rvu->dev, 4667 "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n", 4668 __func__, cgx, iter); 4669 continue; 4670 } 4671 tx_credits = (lmac_fifo_len - lmac_max_frs) / 16; 4672 /* Enable credits and set credit pkt count to max allowed */ 4673 cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 4674 cfg |= FIELD_PREP(NIX_AF_LINKX_MCS_CNT_MASK, nix_hw->cc_mcs_cnt); 4675 4676 link = iter + slink; 4677 nix_hw->tx_credits[link] = tx_credits; 4678 rvu_write64(rvu, blkaddr, 4679 NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); 4680 } 4681 } 4682 4683 /* Set Tx credits for LBK link */ 4684 slink = hw->cgx_links; 4685 for (link = slink; link < (slink + hw->lbk_links); link++) { 4686 tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs); 4687 nix_hw->tx_credits[link] = tx_credits; 4688 /* Enable credits and set credit pkt count to max allowed */ 4689 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 4690 rvu_write64(rvu, blkaddr, 4691 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits); 4692 } 4693 } 4694 4695 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr) 4696 { 4697 int idx, err; 4698 u64 status; 4699 4700 /* Start X2P bus calibration */ 4701 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 4702 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9)); 4703 /* Wait for calibration to complete */ 4704 err = rvu_poll_reg(rvu, blkaddr, 4705 NIX_AF_STATUS, BIT_ULL(10), false); 4706 if (err) { 4707 dev_err(rvu->dev, "NIX X2P bus calibration failed\n"); 4708 return err; 4709 } 4710 4711 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS); 4712 /* Check if CGX devices are ready */ 4713 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) { 4714 /* Skip when cgx port is not available */ 4715 if (!rvu_cgx_pdata(idx, rvu) || 4716 (status & (BIT_ULL(16 + idx)))) 4717 continue; 4718 dev_err(rvu->dev, 4719 "CGX%d didn't respond to NIX X2P calibration\n", idx); 4720 err = -EBUSY; 4721 } 4722 4723 /* Check if LBK is ready */ 4724 if (!(status & BIT_ULL(19))) { 4725 dev_err(rvu->dev, 4726 "LBK didn't respond to NIX X2P calibration\n"); 4727 err = -EBUSY; 4728 } 4729 4730 /* Clear 'calibrate_x2p' bit */ 4731 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 4732 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9)); 4733 if (err || (status & 0x3FFULL)) 4734 dev_err(rvu->dev, 4735 "NIX X2P calibration failed, status 0x%llx\n", status); 4736 if (err) 4737 return err; 4738 return 0; 4739 } 4740 4741 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) 4742 { 4743 u64 cfg; 4744 int err; 4745 4746 /* Set admin queue endianness */ 4747 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG); 4748 #ifdef __BIG_ENDIAN 4749 cfg |= BIT_ULL(8); 4750 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 4751 #else 4752 cfg &= ~BIT_ULL(8); 4753 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 4754 #endif 4755 4756 /* Do not bypass NDC cache */ 4757 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG); 4758 cfg &= ~0x3FFEULL; 4759 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 4760 /* Disable caching of SQB aka SQEs */ 4761 cfg |= 0x04ULL; 4762 #endif 4763 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg); 4764 4765 /* Result structure can be followed by RQ/SQ/CQ context at 4766 * RES + 128bytes and a write mask at RES + 256 bytes, depending on 4767 * operation type. Alloc sufficient result memory for all operations. 4768 */ 4769 err = rvu_aq_alloc(rvu, &block->aq, 4770 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s), 4771 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256); 4772 if (err) 4773 return err; 4774 4775 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE); 4776 rvu_write64(rvu, block->addr, 4777 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova); 4778 return 0; 4779 } 4780 4781 static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr) 4782 { 4783 struct rvu_hwinfo *hw = rvu->hw; 4784 u64 hw_const; 4785 4786 hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 4787 4788 /* On OcteonTx2 DWRR quantum is directly configured into each of 4789 * the transmit scheduler queues. And PF/VF drivers were free to 4790 * config any value upto 2^24. 4791 * On CN10K, HW is modified, the quantum configuration at scheduler 4792 * queues is in terms of weight. And SW needs to setup a base DWRR MTU 4793 * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do 4794 * 'DWRR MTU * weight' to get the quantum. 4795 * 4796 * Check if HW uses a common MTU for all DWRR quantum configs. 4797 * On OcteonTx2 this register field is '0'. 4798 */ 4799 if ((((hw_const >> 56) & 0x10) == 0x10) && !(hw_const & BIT_ULL(61))) 4800 hw->cap.nix_common_dwrr_mtu = true; 4801 4802 if (hw_const & BIT_ULL(61)) 4803 hw->cap.nix_multiple_dwrr_mtu = true; 4804 } 4805 4806 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) 4807 { 4808 const struct npc_lt_def_cfg *ltdefs; 4809 struct rvu_hwinfo *hw = rvu->hw; 4810 int blkaddr = nix_hw->blkaddr; 4811 struct rvu_block *block; 4812 int err; 4813 u64 cfg; 4814 4815 block = &hw->block[blkaddr]; 4816 4817 if (is_rvu_96xx_B0(rvu)) { 4818 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt 4819 * internal state when conditional clocks are turned off. 4820 * Hence enable them. 4821 */ 4822 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 4823 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL); 4824 } 4825 4826 /* Set chan/link to backpressure TL3 instead of TL2 */ 4827 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01); 4828 4829 /* Disable SQ manager's sticky mode operation (set TM6 = 0) 4830 * This sticky mode is known to cause SQ stalls when multiple 4831 * SQs are mapped to same SMQ and transmitting pkts at a time. 4832 */ 4833 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS); 4834 cfg &= ~BIT_ULL(15); 4835 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg); 4836 4837 ltdefs = rvu->kpu.lt_def; 4838 /* Calibrate X2P bus to check if CGX/LBK links are fine */ 4839 err = nix_calibrate_x2p(rvu, blkaddr); 4840 if (err) 4841 return err; 4842 4843 /* Setup capabilities of the NIX block */ 4844 rvu_nix_setup_capabilities(rvu, blkaddr); 4845 4846 /* Initialize admin queue */ 4847 err = nix_aq_init(rvu, block); 4848 if (err) 4849 return err; 4850 4851 /* Restore CINT timer delay to HW reset values */ 4852 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL); 4853 4854 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG); 4855 4856 /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */ 4857 cfg |= 1ULL; 4858 if (!is_rvu_otx2(rvu)) 4859 cfg |= NIX_PTP_1STEP_EN; 4860 4861 rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg); 4862 4863 if (!is_rvu_otx2(rvu)) 4864 rvu_nix_block_cn10k_init(rvu, nix_hw); 4865 4866 if (is_block_implemented(hw, blkaddr)) { 4867 err = nix_setup_txschq(rvu, nix_hw, blkaddr); 4868 if (err) 4869 return err; 4870 4871 err = nix_setup_ipolicers(rvu, nix_hw, blkaddr); 4872 if (err) 4873 return err; 4874 4875 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr); 4876 if (err) 4877 return err; 4878 4879 err = nix_setup_mcast(rvu, nix_hw, blkaddr); 4880 if (err) 4881 return err; 4882 4883 err = nix_setup_txvlan(rvu, nix_hw); 4884 if (err) 4885 return err; 4886 4887 err = nix_setup_bpids(rvu, nix_hw, blkaddr); 4888 if (err) 4889 return err; 4890 4891 /* Configure segmentation offload formats */ 4892 nix_setup_lso(rvu, nix_hw, blkaddr); 4893 4894 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info. 4895 * This helps HW protocol checker to identify headers 4896 * and validate length and checksums. 4897 */ 4898 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2, 4899 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) | 4900 ltdefs->rx_ol2.ltype_mask); 4901 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4, 4902 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) | 4903 ltdefs->rx_oip4.ltype_mask); 4904 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4, 4905 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) | 4906 ltdefs->rx_iip4.ltype_mask); 4907 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6, 4908 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) | 4909 ltdefs->rx_oip6.ltype_mask); 4910 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6, 4911 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) | 4912 ltdefs->rx_iip6.ltype_mask); 4913 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP, 4914 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) | 4915 ltdefs->rx_otcp.ltype_mask); 4916 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP, 4917 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) | 4918 ltdefs->rx_itcp.ltype_mask); 4919 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP, 4920 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) | 4921 ltdefs->rx_oudp.ltype_mask); 4922 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP, 4923 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) | 4924 ltdefs->rx_iudp.ltype_mask); 4925 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP, 4926 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) | 4927 ltdefs->rx_osctp.ltype_mask); 4928 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP, 4929 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) | 4930 ltdefs->rx_isctp.ltype_mask); 4931 4932 if (!is_rvu_otx2(rvu)) { 4933 /* Enable APAD calculation for other protocols 4934 * matching APAD0 and APAD1 lt def registers. 4935 */ 4936 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0, 4937 (ltdefs->rx_apad0.valid << 11) | 4938 (ltdefs->rx_apad0.lid << 8) | 4939 (ltdefs->rx_apad0.ltype_match << 4) | 4940 ltdefs->rx_apad0.ltype_mask); 4941 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1, 4942 (ltdefs->rx_apad1.valid << 11) | 4943 (ltdefs->rx_apad1.lid << 8) | 4944 (ltdefs->rx_apad1.ltype_match << 4) | 4945 ltdefs->rx_apad1.ltype_mask); 4946 4947 /* Receive ethertype defination register defines layer 4948 * information in NPC_RESULT_S to identify the Ethertype 4949 * location in L2 header. Used for Ethertype overwriting 4950 * in inline IPsec flow. 4951 */ 4952 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0), 4953 (ltdefs->rx_et[0].offset << 12) | 4954 (ltdefs->rx_et[0].valid << 11) | 4955 (ltdefs->rx_et[0].lid << 8) | 4956 (ltdefs->rx_et[0].ltype_match << 4) | 4957 ltdefs->rx_et[0].ltype_mask); 4958 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1), 4959 (ltdefs->rx_et[1].offset << 12) | 4960 (ltdefs->rx_et[1].valid << 11) | 4961 (ltdefs->rx_et[1].lid << 8) | 4962 (ltdefs->rx_et[1].ltype_match << 4) | 4963 ltdefs->rx_et[1].ltype_mask); 4964 } 4965 4966 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr); 4967 if (err) 4968 return err; 4969 4970 nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links, 4971 sizeof(u64), GFP_KERNEL); 4972 if (!nix_hw->tx_credits) 4973 return -ENOMEM; 4974 4975 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */ 4976 nix_link_config(rvu, blkaddr, nix_hw); 4977 4978 /* Enable Channel backpressure */ 4979 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0)); 4980 } 4981 return 0; 4982 } 4983 4984 int rvu_nix_init(struct rvu *rvu) 4985 { 4986 struct rvu_hwinfo *hw = rvu->hw; 4987 struct nix_hw *nix_hw; 4988 int blkaddr = 0, err; 4989 int i = 0; 4990 4991 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw), 4992 GFP_KERNEL); 4993 if (!hw->nix) 4994 return -ENOMEM; 4995 4996 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 4997 while (blkaddr) { 4998 nix_hw = &hw->nix[i]; 4999 nix_hw->rvu = rvu; 5000 nix_hw->blkaddr = blkaddr; 5001 err = rvu_nix_block_init(rvu, nix_hw); 5002 if (err) 5003 return err; 5004 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 5005 i++; 5006 } 5007 5008 return 0; 5009 } 5010 5011 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr, 5012 struct rvu_block *block) 5013 { 5014 struct nix_txsch *txsch; 5015 struct nix_mcast *mcast; 5016 struct nix_txvlan *vlan; 5017 struct nix_hw *nix_hw; 5018 int lvl; 5019 5020 rvu_aq_free(rvu, block->aq); 5021 5022 if (is_block_implemented(rvu->hw, blkaddr)) { 5023 nix_hw = get_nix_hw(rvu->hw, blkaddr); 5024 if (!nix_hw) 5025 return; 5026 5027 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 5028 txsch = &nix_hw->txsch[lvl]; 5029 kfree(txsch->schq.bmap); 5030 } 5031 5032 kfree(nix_hw->tx_credits); 5033 5034 nix_ipolicer_freemem(rvu, nix_hw); 5035 5036 vlan = &nix_hw->txvlan; 5037 kfree(vlan->rsrc.bmap); 5038 mutex_destroy(&vlan->rsrc_lock); 5039 5040 mcast = &nix_hw->mcast; 5041 qmem_free(rvu->dev, mcast->mce_ctx); 5042 qmem_free(rvu->dev, mcast->mcast_buf); 5043 mutex_destroy(&mcast->mce_lock); 5044 } 5045 } 5046 5047 void rvu_nix_freemem(struct rvu *rvu) 5048 { 5049 struct rvu_hwinfo *hw = rvu->hw; 5050 struct rvu_block *block; 5051 int blkaddr = 0; 5052 5053 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 5054 while (blkaddr) { 5055 block = &hw->block[blkaddr]; 5056 rvu_nix_block_freemem(rvu, blkaddr, block); 5057 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 5058 } 5059 } 5060 5061 static void nix_mcast_update_action(struct rvu *rvu, 5062 struct nix_mcast_grp_elem *elem) 5063 { 5064 struct npc_mcam *mcam = &rvu->hw->mcam; 5065 struct nix_rx_action rx_action = { 0 }; 5066 struct nix_tx_action tx_action = { 0 }; 5067 int npc_blkaddr; 5068 5069 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 5070 if (elem->dir == NIX_MCAST_INGRESS) { 5071 *(u64 *)&rx_action = npc_get_mcam_action(rvu, mcam, 5072 npc_blkaddr, 5073 elem->mcam_index); 5074 rx_action.index = elem->mce_start_index; 5075 npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index, 5076 *(u64 *)&rx_action); 5077 } else { 5078 *(u64 *)&tx_action = npc_get_mcam_action(rvu, mcam, 5079 npc_blkaddr, 5080 elem->mcam_index); 5081 tx_action.index = elem->mce_start_index; 5082 npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index, 5083 *(u64 *)&tx_action); 5084 } 5085 } 5086 5087 static void nix_mcast_update_mce_entry(struct rvu *rvu, u16 pcifunc, u8 is_active) 5088 { 5089 struct nix_mcast_grp_elem *elem; 5090 struct nix_mcast_grp *mcast_grp; 5091 struct nix_hw *nix_hw; 5092 int blkaddr; 5093 5094 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 5095 nix_hw = get_nix_hw(rvu->hw, blkaddr); 5096 if (!nix_hw) 5097 return; 5098 5099 mcast_grp = &nix_hw->mcast_grp; 5100 5101 mutex_lock(&mcast_grp->mcast_grp_lock); 5102 list_for_each_entry(elem, &mcast_grp->mcast_grp_head, list) { 5103 struct nix_mce_list *mce_list; 5104 struct mce *mce; 5105 5106 /* Iterate the group elements and disable the element which 5107 * received the disable request. 5108 */ 5109 mce_list = &elem->mcast_mce_list; 5110 hlist_for_each_entry(mce, &mce_list->head, node) { 5111 if (mce->pcifunc == pcifunc) { 5112 mce->is_active = is_active; 5113 break; 5114 } 5115 } 5116 5117 /* Dump the updated list to HW */ 5118 if (elem->dir == NIX_MCAST_INGRESS) 5119 nix_update_ingress_mce_list_hw(rvu, nix_hw, elem); 5120 else 5121 nix_update_egress_mce_list_hw(rvu, nix_hw, elem); 5122 5123 /* Update the multicast index in NPC rule */ 5124 nix_mcast_update_action(rvu, elem); 5125 } 5126 mutex_unlock(&mcast_grp->mcast_grp_lock); 5127 } 5128 5129 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, 5130 struct msg_rsp *rsp) 5131 { 5132 u16 pcifunc = req->hdr.pcifunc; 5133 struct rvu_pfvf *pfvf; 5134 int nixlf, err; 5135 5136 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 5137 if (err) 5138 return err; 5139 5140 /* Enable the interface if it is in any multicast list */ 5141 nix_mcast_update_mce_entry(rvu, pcifunc, 1); 5142 5143 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf); 5144 5145 npc_mcam_enable_flows(rvu, pcifunc); 5146 5147 pfvf = rvu_get_pfvf(rvu, pcifunc); 5148 set_bit(NIXLF_INITIALIZED, &pfvf->flags); 5149 5150 rvu_switch_update_rules(rvu, pcifunc); 5151 5152 return rvu_cgx_start_stop_io(rvu, pcifunc, true); 5153 } 5154 5155 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, 5156 struct msg_rsp *rsp) 5157 { 5158 u16 pcifunc = req->hdr.pcifunc; 5159 struct rvu_pfvf *pfvf; 5160 int nixlf, err; 5161 5162 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 5163 if (err) 5164 return err; 5165 5166 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 5167 /* Disable the interface if it is in any multicast list */ 5168 nix_mcast_update_mce_entry(rvu, pcifunc, 0); 5169 5170 5171 pfvf = rvu_get_pfvf(rvu, pcifunc); 5172 clear_bit(NIXLF_INITIALIZED, &pfvf->flags); 5173 5174 err = rvu_cgx_start_stop_io(rvu, pcifunc, false); 5175 if (err) 5176 return err; 5177 5178 rvu_cgx_tx_enable(rvu, pcifunc, true); 5179 5180 return 0; 5181 } 5182 5183 #define RX_SA_BASE GENMASK_ULL(52, 7) 5184 5185 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) 5186 { 5187 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 5188 struct hwctx_disable_req ctx_req; 5189 int pf = rvu_get_pf(pcifunc); 5190 struct mac_ops *mac_ops; 5191 u8 cgx_id, lmac_id; 5192 u64 sa_base; 5193 void *cgxd; 5194 int err; 5195 5196 ctx_req.hdr.pcifunc = pcifunc; 5197 5198 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */ 5199 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 5200 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 5201 nix_interface_deinit(rvu, pcifunc, nixlf); 5202 nix_rx_sync(rvu, blkaddr); 5203 nix_txschq_free(rvu, pcifunc); 5204 5205 clear_bit(NIXLF_INITIALIZED, &pfvf->flags); 5206 5207 rvu_cgx_start_stop_io(rvu, pcifunc, false); 5208 5209 if (pfvf->sq_ctx) { 5210 ctx_req.ctype = NIX_AQ_CTYPE_SQ; 5211 err = nix_lf_hwctx_disable(rvu, &ctx_req); 5212 if (err) 5213 dev_err(rvu->dev, "SQ ctx disable failed\n"); 5214 } 5215 5216 if (pfvf->rq_ctx) { 5217 ctx_req.ctype = NIX_AQ_CTYPE_RQ; 5218 err = nix_lf_hwctx_disable(rvu, &ctx_req); 5219 if (err) 5220 dev_err(rvu->dev, "RQ ctx disable failed\n"); 5221 } 5222 5223 if (pfvf->cq_ctx) { 5224 ctx_req.ctype = NIX_AQ_CTYPE_CQ; 5225 err = nix_lf_hwctx_disable(rvu, &ctx_req); 5226 if (err) 5227 dev_err(rvu->dev, "CQ ctx disable failed\n"); 5228 } 5229 5230 /* reset HW config done for Switch headers */ 5231 rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT, 5232 (PKIND_TX | PKIND_RX), 0, 0, 0, 0); 5233 5234 /* Disabling CGX and NPC config done for PTP */ 5235 if (pfvf->hw_rx_tstamp_en) { 5236 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 5237 cgxd = rvu_cgx_pdata(cgx_id, rvu); 5238 mac_ops = get_mac_ops(cgxd); 5239 mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false); 5240 /* Undo NPC config done for PTP */ 5241 if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false)) 5242 dev_err(rvu->dev, "NPC config for PTP failed\n"); 5243 pfvf->hw_rx_tstamp_en = false; 5244 } 5245 5246 /* reset priority flow control config */ 5247 rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0); 5248 5249 /* reset 802.3x flow control config */ 5250 rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0); 5251 5252 nix_ctx_free(rvu, pfvf); 5253 5254 nix_free_all_bandprof(rvu, pcifunc); 5255 5256 sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf)); 5257 if (FIELD_GET(RX_SA_BASE, sa_base)) { 5258 err = rvu_cpt_ctx_flush(rvu, pcifunc); 5259 if (err) 5260 dev_err(rvu->dev, 5261 "CPT ctx flush failed with error: %d\n", err); 5262 } 5263 } 5264 5265 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32) 5266 5267 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) 5268 { 5269 struct rvu_hwinfo *hw = rvu->hw; 5270 struct rvu_block *block; 5271 int blkaddr, pf; 5272 int nixlf; 5273 u64 cfg; 5274 5275 pf = rvu_get_pf(pcifunc); 5276 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) 5277 return 0; 5278 5279 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 5280 if (blkaddr < 0) 5281 return NIX_AF_ERR_AF_LF_INVALID; 5282 5283 block = &hw->block[blkaddr]; 5284 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 5285 if (nixlf < 0) 5286 return NIX_AF_ERR_AF_LF_INVALID; 5287 5288 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf)); 5289 5290 if (enable) 5291 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN; 5292 else 5293 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN; 5294 5295 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 5296 5297 return 0; 5298 } 5299 5300 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req, 5301 struct msg_rsp *rsp) 5302 { 5303 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true); 5304 } 5305 5306 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req, 5307 struct msg_rsp *rsp) 5308 { 5309 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false); 5310 } 5311 5312 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, 5313 struct nix_lso_format_cfg *req, 5314 struct nix_lso_format_cfg_rsp *rsp) 5315 { 5316 u16 pcifunc = req->hdr.pcifunc; 5317 struct nix_hw *nix_hw; 5318 struct rvu_pfvf *pfvf; 5319 int blkaddr, idx, f; 5320 u64 reg; 5321 5322 pfvf = rvu_get_pfvf(rvu, pcifunc); 5323 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 5324 if (!pfvf->nixlf || blkaddr < 0) 5325 return NIX_AF_ERR_AF_LF_INVALID; 5326 5327 nix_hw = get_nix_hw(rvu->hw, blkaddr); 5328 if (!nix_hw) 5329 return NIX_AF_ERR_INVALID_NIXBLK; 5330 5331 /* Find existing matching LSO format, if any */ 5332 for (idx = 0; idx < nix_hw->lso.in_use; idx++) { 5333 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) { 5334 reg = rvu_read64(rvu, blkaddr, 5335 NIX_AF_LSO_FORMATX_FIELDX(idx, f)); 5336 if (req->fields[f] != (reg & req->field_mask)) 5337 break; 5338 } 5339 5340 if (f == NIX_LSO_FIELD_MAX) 5341 break; 5342 } 5343 5344 if (idx < nix_hw->lso.in_use) { 5345 /* Match found */ 5346 rsp->lso_format_idx = idx; 5347 return 0; 5348 } 5349 5350 if (nix_hw->lso.in_use == nix_hw->lso.total) 5351 return NIX_AF_ERR_LSO_CFG_FAIL; 5352 5353 rsp->lso_format_idx = nix_hw->lso.in_use++; 5354 5355 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) 5356 rvu_write64(rvu, blkaddr, 5357 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f), 5358 req->fields[f]); 5359 5360 return 0; 5361 } 5362 5363 #define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48) 5364 #define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32) 5365 #define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16) 5366 #define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0) 5367 5368 #define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24) 5369 #define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8) 5370 #define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0) 5371 5372 #define CPT_INST_CREDIT_TH GENMASK_ULL(53, 32) 5373 #define CPT_INST_CREDIT_BPID GENMASK_ULL(30, 22) 5374 #define CPT_INST_CREDIT_CNT GENMASK_ULL(21, 0) 5375 5376 static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req, 5377 int blkaddr) 5378 { 5379 u8 cpt_idx, cpt_blkaddr; 5380 u64 val; 5381 5382 cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1; 5383 if (req->enable) { 5384 val = 0; 5385 /* Enable context prefetching */ 5386 if (!is_rvu_otx2(rvu)) 5387 val |= BIT_ULL(51); 5388 5389 /* Set OPCODE and EGRP */ 5390 val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp); 5391 val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode); 5392 val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1); 5393 val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2); 5394 5395 rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val); 5396 5397 /* Set CPT queue for inline IPSec */ 5398 val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot); 5399 val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC, 5400 req->inst_qsel.cpt_pf_func); 5401 5402 if (!is_rvu_otx2(rvu)) { 5403 cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 : 5404 BLKADDR_CPT1; 5405 val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr); 5406 } 5407 5408 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), 5409 val); 5410 5411 /* Set CPT credit */ 5412 val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx)); 5413 if ((val & 0x3FFFFF) != 0x3FFFFF) 5414 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), 5415 0x3FFFFF - val); 5416 5417 val = FIELD_PREP(CPT_INST_CREDIT_CNT, req->cpt_credit); 5418 val |= FIELD_PREP(CPT_INST_CREDIT_BPID, req->bpid); 5419 val |= FIELD_PREP(CPT_INST_CREDIT_TH, req->credit_th); 5420 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), val); 5421 } else { 5422 rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0); 5423 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), 5424 0x0); 5425 val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx)); 5426 if ((val & 0x3FFFFF) != 0x3FFFFF) 5427 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), 5428 0x3FFFFF - val); 5429 } 5430 } 5431 5432 int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu, 5433 struct nix_inline_ipsec_cfg *req, 5434 struct msg_rsp *rsp) 5435 { 5436 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) 5437 return 0; 5438 5439 nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0); 5440 if (is_block_implemented(rvu->hw, BLKADDR_CPT1)) 5441 nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1); 5442 5443 return 0; 5444 } 5445 5446 int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu, 5447 struct msg_req *req, 5448 struct nix_inline_ipsec_cfg *rsp) 5449 5450 { 5451 u64 val; 5452 5453 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) 5454 return 0; 5455 5456 val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_IPSEC_GEN_CFG); 5457 rsp->gen_cfg.egrp = FIELD_GET(IPSEC_GEN_CFG_EGRP, val); 5458 rsp->gen_cfg.opcode = FIELD_GET(IPSEC_GEN_CFG_OPCODE, val); 5459 rsp->gen_cfg.param1 = FIELD_GET(IPSEC_GEN_CFG_PARAM1, val); 5460 rsp->gen_cfg.param2 = FIELD_GET(IPSEC_GEN_CFG_PARAM2, val); 5461 5462 val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_CPTX_CREDIT(0)); 5463 rsp->cpt_credit = FIELD_GET(CPT_INST_CREDIT_CNT, val); 5464 rsp->credit_th = FIELD_GET(CPT_INST_CREDIT_TH, val); 5465 rsp->bpid = FIELD_GET(CPT_INST_CREDIT_BPID, val); 5466 5467 return 0; 5468 } 5469 5470 int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu, 5471 struct nix_inline_ipsec_lf_cfg *req, 5472 struct msg_rsp *rsp) 5473 { 5474 int lf, blkaddr, err; 5475 u64 val; 5476 5477 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) 5478 return 0; 5479 5480 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr); 5481 if (err) 5482 return err; 5483 5484 if (req->enable) { 5485 /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */ 5486 val = (u64)req->ipsec_cfg0.tt << 44 | 5487 (u64)req->ipsec_cfg0.tag_const << 20 | 5488 (u64)req->ipsec_cfg0.sa_pow2_size << 16 | 5489 req->ipsec_cfg0.lenm1_max; 5490 5491 if (blkaddr == BLKADDR_NIX1) 5492 val |= BIT_ULL(46); 5493 5494 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val); 5495 5496 /* Set SA_IDX_W and SA_IDX_MAX */ 5497 val = (u64)req->ipsec_cfg1.sa_idx_w << 32 | 5498 req->ipsec_cfg1.sa_idx_max; 5499 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val); 5500 5501 /* Set SA base address */ 5502 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), 5503 req->sa_base_addr); 5504 } else { 5505 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0); 5506 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0); 5507 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), 5508 0x0); 5509 } 5510 5511 return 0; 5512 } 5513 5514 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc) 5515 { 5516 bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); 5517 5518 /* overwrite vf mac address with default_mac */ 5519 if (from_vf) 5520 ether_addr_copy(pfvf->mac_addr, pfvf->default_mac); 5521 } 5522 5523 /* NIX ingress policers or bandwidth profiles APIs */ 5524 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr) 5525 { 5526 struct npc_lt_def_cfg defs, *ltdefs; 5527 5528 ltdefs = &defs; 5529 memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg)); 5530 5531 /* Extract PCP and DEI fields from outer VLAN from byte offset 5532 * 2 from the start of LB_PTR (ie TAG). 5533 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN 5534 * fields are considered when 'Tunnel enable' is set in profile. 5535 */ 5536 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI, 5537 (2UL << 12) | (ltdefs->ovlan.lid << 8) | 5538 (ltdefs->ovlan.ltype_match << 4) | 5539 ltdefs->ovlan.ltype_mask); 5540 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI, 5541 (2UL << 12) | (ltdefs->ivlan.lid << 8) | 5542 (ltdefs->ivlan.ltype_match << 4) | 5543 ltdefs->ivlan.ltype_mask); 5544 5545 /* DSCP field in outer and tunneled IPv4 packets */ 5546 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP, 5547 (1UL << 12) | (ltdefs->rx_oip4.lid << 8) | 5548 (ltdefs->rx_oip4.ltype_match << 4) | 5549 ltdefs->rx_oip4.ltype_mask); 5550 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP, 5551 (1UL << 12) | (ltdefs->rx_iip4.lid << 8) | 5552 (ltdefs->rx_iip4.ltype_match << 4) | 5553 ltdefs->rx_iip4.ltype_mask); 5554 5555 /* DSCP field (traffic class) in outer and tunneled IPv6 packets */ 5556 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP, 5557 (1UL << 11) | (ltdefs->rx_oip6.lid << 8) | 5558 (ltdefs->rx_oip6.ltype_match << 4) | 5559 ltdefs->rx_oip6.ltype_mask); 5560 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP, 5561 (1UL << 11) | (ltdefs->rx_iip6.lid << 8) | 5562 (ltdefs->rx_iip6.ltype_match << 4) | 5563 ltdefs->rx_iip6.ltype_mask); 5564 } 5565 5566 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw, 5567 int layer, int prof_idx) 5568 { 5569 struct nix_cn10k_aq_enq_req aq_req; 5570 int rc; 5571 5572 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 5573 5574 aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14); 5575 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; 5576 aq_req.op = NIX_AQ_INSTOP_INIT; 5577 5578 /* Context is all zeros, submit to AQ */ 5579 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 5580 (struct nix_aq_enq_req *)&aq_req, NULL); 5581 if (rc) 5582 dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n", 5583 layer, prof_idx); 5584 return rc; 5585 } 5586 5587 static int nix_setup_ipolicers(struct rvu *rvu, 5588 struct nix_hw *nix_hw, int blkaddr) 5589 { 5590 struct rvu_hwinfo *hw = rvu->hw; 5591 struct nix_ipolicer *ipolicer; 5592 int err, layer, prof_idx; 5593 u64 cfg; 5594 5595 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 5596 if (!(cfg & BIT_ULL(61))) { 5597 hw->cap.ipolicer = false; 5598 return 0; 5599 } 5600 5601 hw->cap.ipolicer = true; 5602 nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS, 5603 sizeof(*ipolicer), GFP_KERNEL); 5604 if (!nix_hw->ipolicer) 5605 return -ENOMEM; 5606 5607 cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST); 5608 5609 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5610 ipolicer = &nix_hw->ipolicer[layer]; 5611 switch (layer) { 5612 case BAND_PROF_LEAF_LAYER: 5613 ipolicer->band_prof.max = cfg & 0XFFFF; 5614 break; 5615 case BAND_PROF_MID_LAYER: 5616 ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF; 5617 break; 5618 case BAND_PROF_TOP_LAYER: 5619 ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF; 5620 break; 5621 } 5622 5623 if (!ipolicer->band_prof.max) 5624 continue; 5625 5626 err = rvu_alloc_bitmap(&ipolicer->band_prof); 5627 if (err) 5628 return err; 5629 5630 ipolicer->pfvf_map = devm_kcalloc(rvu->dev, 5631 ipolicer->band_prof.max, 5632 sizeof(u16), GFP_KERNEL); 5633 if (!ipolicer->pfvf_map) 5634 return -ENOMEM; 5635 5636 ipolicer->match_id = devm_kcalloc(rvu->dev, 5637 ipolicer->band_prof.max, 5638 sizeof(u16), GFP_KERNEL); 5639 if (!ipolicer->match_id) 5640 return -ENOMEM; 5641 5642 for (prof_idx = 0; 5643 prof_idx < ipolicer->band_prof.max; prof_idx++) { 5644 /* Set AF as current owner for INIT ops to succeed */ 5645 ipolicer->pfvf_map[prof_idx] = 0x00; 5646 5647 /* There is no enable bit in the profile context, 5648 * so no context disable. So let's INIT them here 5649 * so that PF/VF later on have to just do WRITE to 5650 * setup policer rates and config. 5651 */ 5652 err = nix_init_policer_context(rvu, nix_hw, 5653 layer, prof_idx); 5654 if (err) 5655 return err; 5656 } 5657 5658 /* Allocate memory for maintaining ref_counts for MID level 5659 * profiles, this will be needed for leaf layer profiles' 5660 * aggregation. 5661 */ 5662 if (layer != BAND_PROF_MID_LAYER) 5663 continue; 5664 5665 ipolicer->ref_count = devm_kcalloc(rvu->dev, 5666 ipolicer->band_prof.max, 5667 sizeof(u16), GFP_KERNEL); 5668 if (!ipolicer->ref_count) 5669 return -ENOMEM; 5670 } 5671 5672 /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */ 5673 rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19); 5674 5675 nix_config_rx_pkt_policer_precolor(rvu, blkaddr); 5676 5677 return 0; 5678 } 5679 5680 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw) 5681 { 5682 struct nix_ipolicer *ipolicer; 5683 int layer; 5684 5685 if (!rvu->hw->cap.ipolicer) 5686 return; 5687 5688 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5689 ipolicer = &nix_hw->ipolicer[layer]; 5690 5691 if (!ipolicer->band_prof.max) 5692 continue; 5693 5694 kfree(ipolicer->band_prof.bmap); 5695 } 5696 } 5697 5698 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, 5699 struct nix_hw *nix_hw, u16 pcifunc) 5700 { 5701 struct nix_ipolicer *ipolicer; 5702 int layer, hi_layer, prof_idx; 5703 5704 /* Bits [15:14] in profile index represent layer */ 5705 layer = (req->qidx >> 14) & 0x03; 5706 prof_idx = req->qidx & 0x3FFF; 5707 5708 ipolicer = &nix_hw->ipolicer[layer]; 5709 if (prof_idx >= ipolicer->band_prof.max) 5710 return -EINVAL; 5711 5712 /* Check if the profile is allocated to the requesting PCIFUNC or not 5713 * with the exception of AF. AF is allowed to read and update contexts. 5714 */ 5715 if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc) 5716 return -EINVAL; 5717 5718 /* If this profile is linked to higher layer profile then check 5719 * if that profile is also allocated to the requesting PCIFUNC 5720 * or not. 5721 */ 5722 if (!req->prof.hl_en) 5723 return 0; 5724 5725 /* Leaf layer profile can link only to mid layer and 5726 * mid layer to top layer. 5727 */ 5728 if (layer == BAND_PROF_LEAF_LAYER) 5729 hi_layer = BAND_PROF_MID_LAYER; 5730 else if (layer == BAND_PROF_MID_LAYER) 5731 hi_layer = BAND_PROF_TOP_LAYER; 5732 else 5733 return -EINVAL; 5734 5735 ipolicer = &nix_hw->ipolicer[hi_layer]; 5736 prof_idx = req->prof.band_prof_id; 5737 if (prof_idx >= ipolicer->band_prof.max || 5738 ipolicer->pfvf_map[prof_idx] != pcifunc) 5739 return -EINVAL; 5740 5741 return 0; 5742 } 5743 5744 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu, 5745 struct nix_bandprof_alloc_req *req, 5746 struct nix_bandprof_alloc_rsp *rsp) 5747 { 5748 int blkaddr, layer, prof, idx, err; 5749 u16 pcifunc = req->hdr.pcifunc; 5750 struct nix_ipolicer *ipolicer; 5751 struct nix_hw *nix_hw; 5752 5753 if (!rvu->hw->cap.ipolicer) 5754 return NIX_AF_ERR_IPOLICER_NOTSUPP; 5755 5756 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 5757 if (err) 5758 return err; 5759 5760 mutex_lock(&rvu->rsrc_lock); 5761 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5762 if (layer == BAND_PROF_INVAL_LAYER) 5763 continue; 5764 if (!req->prof_count[layer]) 5765 continue; 5766 5767 ipolicer = &nix_hw->ipolicer[layer]; 5768 for (idx = 0; idx < req->prof_count[layer]; idx++) { 5769 /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */ 5770 if (idx == MAX_BANDPROF_PER_PFFUNC) 5771 break; 5772 5773 prof = rvu_alloc_rsrc(&ipolicer->band_prof); 5774 if (prof < 0) 5775 break; 5776 rsp->prof_count[layer]++; 5777 rsp->prof_idx[layer][idx] = prof; 5778 ipolicer->pfvf_map[prof] = pcifunc; 5779 } 5780 } 5781 mutex_unlock(&rvu->rsrc_lock); 5782 return 0; 5783 } 5784 5785 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc) 5786 { 5787 int blkaddr, layer, prof_idx, err; 5788 struct nix_ipolicer *ipolicer; 5789 struct nix_hw *nix_hw; 5790 5791 if (!rvu->hw->cap.ipolicer) 5792 return NIX_AF_ERR_IPOLICER_NOTSUPP; 5793 5794 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 5795 if (err) 5796 return err; 5797 5798 mutex_lock(&rvu->rsrc_lock); 5799 /* Free all the profiles allocated to the PCIFUNC */ 5800 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5801 if (layer == BAND_PROF_INVAL_LAYER) 5802 continue; 5803 ipolicer = &nix_hw->ipolicer[layer]; 5804 5805 for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) { 5806 if (ipolicer->pfvf_map[prof_idx] != pcifunc) 5807 continue; 5808 5809 /* Clear ratelimit aggregation, if any */ 5810 if (layer == BAND_PROF_LEAF_LAYER && 5811 ipolicer->match_id[prof_idx]) 5812 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); 5813 5814 ipolicer->pfvf_map[prof_idx] = 0x00; 5815 ipolicer->match_id[prof_idx] = 0; 5816 rvu_free_rsrc(&ipolicer->band_prof, prof_idx); 5817 } 5818 } 5819 mutex_unlock(&rvu->rsrc_lock); 5820 return 0; 5821 } 5822 5823 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu, 5824 struct nix_bandprof_free_req *req, 5825 struct msg_rsp *rsp) 5826 { 5827 int blkaddr, layer, prof_idx, idx, err; 5828 u16 pcifunc = req->hdr.pcifunc; 5829 struct nix_ipolicer *ipolicer; 5830 struct nix_hw *nix_hw; 5831 5832 if (req->free_all) 5833 return nix_free_all_bandprof(rvu, pcifunc); 5834 5835 if (!rvu->hw->cap.ipolicer) 5836 return NIX_AF_ERR_IPOLICER_NOTSUPP; 5837 5838 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 5839 if (err) 5840 return err; 5841 5842 mutex_lock(&rvu->rsrc_lock); 5843 /* Free the requested profile indices */ 5844 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5845 if (layer == BAND_PROF_INVAL_LAYER) 5846 continue; 5847 if (!req->prof_count[layer]) 5848 continue; 5849 5850 ipolicer = &nix_hw->ipolicer[layer]; 5851 for (idx = 0; idx < req->prof_count[layer]; idx++) { 5852 if (idx == MAX_BANDPROF_PER_PFFUNC) 5853 break; 5854 prof_idx = req->prof_idx[layer][idx]; 5855 if (prof_idx >= ipolicer->band_prof.max || 5856 ipolicer->pfvf_map[prof_idx] != pcifunc) 5857 continue; 5858 5859 /* Clear ratelimit aggregation, if any */ 5860 if (layer == BAND_PROF_LEAF_LAYER && 5861 ipolicer->match_id[prof_idx]) 5862 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); 5863 5864 ipolicer->pfvf_map[prof_idx] = 0x00; 5865 ipolicer->match_id[prof_idx] = 0; 5866 rvu_free_rsrc(&ipolicer->band_prof, prof_idx); 5867 } 5868 } 5869 mutex_unlock(&rvu->rsrc_lock); 5870 return 0; 5871 } 5872 5873 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw, 5874 struct nix_cn10k_aq_enq_req *aq_req, 5875 struct nix_cn10k_aq_enq_rsp *aq_rsp, 5876 u16 pcifunc, u8 ctype, u32 qidx) 5877 { 5878 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 5879 aq_req->hdr.pcifunc = pcifunc; 5880 aq_req->ctype = ctype; 5881 aq_req->op = NIX_AQ_INSTOP_READ; 5882 aq_req->qidx = qidx; 5883 5884 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 5885 (struct nix_aq_enq_req *)aq_req, 5886 (struct nix_aq_enq_rsp *)aq_rsp); 5887 } 5888 5889 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu, 5890 struct nix_hw *nix_hw, 5891 struct nix_cn10k_aq_enq_req *aq_req, 5892 struct nix_cn10k_aq_enq_rsp *aq_rsp, 5893 u32 leaf_prof, u16 mid_prof) 5894 { 5895 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 5896 aq_req->hdr.pcifunc = 0x00; 5897 aq_req->ctype = NIX_AQ_CTYPE_BANDPROF; 5898 aq_req->op = NIX_AQ_INSTOP_WRITE; 5899 aq_req->qidx = leaf_prof; 5900 5901 aq_req->prof.band_prof_id = mid_prof; 5902 aq_req->prof_mask.band_prof_id = GENMASK(6, 0); 5903 aq_req->prof.hl_en = 1; 5904 aq_req->prof_mask.hl_en = 1; 5905 5906 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 5907 (struct nix_aq_enq_req *)aq_req, 5908 (struct nix_aq_enq_rsp *)aq_rsp); 5909 } 5910 5911 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc, 5912 u16 rq_idx, u16 match_id) 5913 { 5914 int leaf_prof, mid_prof, leaf_match; 5915 struct nix_cn10k_aq_enq_req aq_req; 5916 struct nix_cn10k_aq_enq_rsp aq_rsp; 5917 struct nix_ipolicer *ipolicer; 5918 struct nix_hw *nix_hw; 5919 int blkaddr, idx, rc; 5920 5921 if (!rvu->hw->cap.ipolicer) 5922 return 0; 5923 5924 rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 5925 if (rc) 5926 return rc; 5927 5928 /* Fetch the RQ's context to see if policing is enabled */ 5929 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc, 5930 NIX_AQ_CTYPE_RQ, rq_idx); 5931 if (rc) { 5932 dev_err(rvu->dev, 5933 "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n", 5934 __func__, rq_idx, pcifunc); 5935 return rc; 5936 } 5937 5938 if (!aq_rsp.rq.policer_ena) 5939 return 0; 5940 5941 /* Get the bandwidth profile ID mapped to this RQ */ 5942 leaf_prof = aq_rsp.rq.band_prof_id; 5943 5944 ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER]; 5945 ipolicer->match_id[leaf_prof] = match_id; 5946 5947 /* Check if any other leaf profile is marked with same match_id */ 5948 for (idx = 0; idx < ipolicer->band_prof.max; idx++) { 5949 if (idx == leaf_prof) 5950 continue; 5951 if (ipolicer->match_id[idx] != match_id) 5952 continue; 5953 5954 leaf_match = idx; 5955 break; 5956 } 5957 5958 if (idx == ipolicer->band_prof.max) 5959 return 0; 5960 5961 /* Fetch the matching profile's context to check if it's already 5962 * mapped to a mid level profile. 5963 */ 5964 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 5965 NIX_AQ_CTYPE_BANDPROF, leaf_match); 5966 if (rc) { 5967 dev_err(rvu->dev, 5968 "%s: Failed to fetch context of leaf profile %d\n", 5969 __func__, leaf_match); 5970 return rc; 5971 } 5972 5973 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; 5974 if (aq_rsp.prof.hl_en) { 5975 /* Get Mid layer prof index and map leaf_prof index 5976 * also such that flows that are being steered 5977 * to different RQs and marked with same match_id 5978 * are rate limited in a aggregate fashion 5979 */ 5980 mid_prof = aq_rsp.prof.band_prof_id; 5981 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 5982 &aq_req, &aq_rsp, 5983 leaf_prof, mid_prof); 5984 if (rc) { 5985 dev_err(rvu->dev, 5986 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 5987 __func__, leaf_prof, mid_prof); 5988 goto exit; 5989 } 5990 5991 mutex_lock(&rvu->rsrc_lock); 5992 ipolicer->ref_count[mid_prof]++; 5993 mutex_unlock(&rvu->rsrc_lock); 5994 goto exit; 5995 } 5996 5997 /* Allocate a mid layer profile and 5998 * map both 'leaf_prof' and 'leaf_match' profiles to it. 5999 */ 6000 mutex_lock(&rvu->rsrc_lock); 6001 mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof); 6002 if (mid_prof < 0) { 6003 dev_err(rvu->dev, 6004 "%s: Unable to allocate mid layer profile\n", __func__); 6005 mutex_unlock(&rvu->rsrc_lock); 6006 goto exit; 6007 } 6008 mutex_unlock(&rvu->rsrc_lock); 6009 ipolicer->pfvf_map[mid_prof] = 0x00; 6010 ipolicer->ref_count[mid_prof] = 0; 6011 6012 /* Initialize mid layer profile same as 'leaf_prof' */ 6013 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 6014 NIX_AQ_CTYPE_BANDPROF, leaf_prof); 6015 if (rc) { 6016 dev_err(rvu->dev, 6017 "%s: Failed to fetch context of leaf profile %d\n", 6018 __func__, leaf_prof); 6019 goto exit; 6020 } 6021 6022 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 6023 aq_req.hdr.pcifunc = 0x00; 6024 aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14); 6025 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; 6026 aq_req.op = NIX_AQ_INSTOP_WRITE; 6027 memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s)); 6028 memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s)); 6029 /* Clear higher layer enable bit in the mid profile, just in case */ 6030 aq_req.prof.hl_en = 0; 6031 aq_req.prof_mask.hl_en = 1; 6032 6033 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 6034 (struct nix_aq_enq_req *)&aq_req, NULL); 6035 if (rc) { 6036 dev_err(rvu->dev, 6037 "%s: Failed to INIT context of mid layer profile %d\n", 6038 __func__, mid_prof); 6039 goto exit; 6040 } 6041 6042 /* Map both leaf profiles to this mid layer profile */ 6043 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 6044 &aq_req, &aq_rsp, 6045 leaf_prof, mid_prof); 6046 if (rc) { 6047 dev_err(rvu->dev, 6048 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 6049 __func__, leaf_prof, mid_prof); 6050 goto exit; 6051 } 6052 6053 mutex_lock(&rvu->rsrc_lock); 6054 ipolicer->ref_count[mid_prof]++; 6055 mutex_unlock(&rvu->rsrc_lock); 6056 6057 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 6058 &aq_req, &aq_rsp, 6059 leaf_match, mid_prof); 6060 if (rc) { 6061 dev_err(rvu->dev, 6062 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 6063 __func__, leaf_match, mid_prof); 6064 ipolicer->ref_count[mid_prof]--; 6065 goto exit; 6066 } 6067 6068 mutex_lock(&rvu->rsrc_lock); 6069 ipolicer->ref_count[mid_prof]++; 6070 mutex_unlock(&rvu->rsrc_lock); 6071 6072 exit: 6073 return rc; 6074 } 6075 6076 /* Called with mutex rsrc_lock */ 6077 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, 6078 u32 leaf_prof) 6079 { 6080 struct nix_cn10k_aq_enq_req aq_req; 6081 struct nix_cn10k_aq_enq_rsp aq_rsp; 6082 struct nix_ipolicer *ipolicer; 6083 u16 mid_prof; 6084 int rc; 6085 6086 mutex_unlock(&rvu->rsrc_lock); 6087 6088 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 6089 NIX_AQ_CTYPE_BANDPROF, leaf_prof); 6090 6091 mutex_lock(&rvu->rsrc_lock); 6092 if (rc) { 6093 dev_err(rvu->dev, 6094 "%s: Failed to fetch context of leaf profile %d\n", 6095 __func__, leaf_prof); 6096 return; 6097 } 6098 6099 if (!aq_rsp.prof.hl_en) 6100 return; 6101 6102 mid_prof = aq_rsp.prof.band_prof_id; 6103 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; 6104 ipolicer->ref_count[mid_prof]--; 6105 /* If ref_count is zero, free mid layer profile */ 6106 if (!ipolicer->ref_count[mid_prof]) { 6107 ipolicer->pfvf_map[mid_prof] = 0x00; 6108 rvu_free_rsrc(&ipolicer->band_prof, mid_prof); 6109 } 6110 } 6111 6112 int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req, 6113 struct nix_bandprof_get_hwinfo_rsp *rsp) 6114 { 6115 struct nix_ipolicer *ipolicer; 6116 int blkaddr, layer, err; 6117 struct nix_hw *nix_hw; 6118 u64 tu; 6119 6120 if (!rvu->hw->cap.ipolicer) 6121 return NIX_AF_ERR_IPOLICER_NOTSUPP; 6122 6123 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); 6124 if (err) 6125 return err; 6126 6127 /* Return number of bandwidth profiles free at each layer */ 6128 mutex_lock(&rvu->rsrc_lock); 6129 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 6130 if (layer == BAND_PROF_INVAL_LAYER) 6131 continue; 6132 6133 ipolicer = &nix_hw->ipolicer[layer]; 6134 rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof); 6135 } 6136 mutex_unlock(&rvu->rsrc_lock); 6137 6138 /* Set the policer timeunit in nanosec */ 6139 tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0); 6140 rsp->policer_timeunit = (tu + 1) * 100; 6141 6142 return 0; 6143 } 6144 6145 static struct nix_mcast_grp_elem *rvu_nix_mcast_find_grp_elem(struct nix_mcast_grp *mcast_grp, 6146 u32 mcast_grp_idx) 6147 { 6148 struct nix_mcast_grp_elem *iter; 6149 bool is_found = false; 6150 6151 list_for_each_entry(iter, &mcast_grp->mcast_grp_head, list) { 6152 if (iter->mcast_grp_idx == mcast_grp_idx) { 6153 is_found = true; 6154 break; 6155 } 6156 } 6157 6158 if (is_found) 6159 return iter; 6160 6161 return NULL; 6162 } 6163 6164 int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, u32 mcast_grp_idx) 6165 { 6166 struct nix_mcast_grp_elem *elem; 6167 struct nix_mcast_grp *mcast_grp; 6168 struct nix_hw *nix_hw; 6169 int blkaddr, ret; 6170 6171 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 6172 nix_hw = get_nix_hw(rvu->hw, blkaddr); 6173 if (!nix_hw) 6174 return NIX_AF_ERR_INVALID_NIXBLK; 6175 6176 mcast_grp = &nix_hw->mcast_grp; 6177 mutex_lock(&mcast_grp->mcast_grp_lock); 6178 elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx); 6179 if (!elem) 6180 ret = NIX_AF_ERR_INVALID_MCAST_GRP; 6181 else 6182 ret = elem->mce_start_index; 6183 6184 mutex_unlock(&mcast_grp->mcast_grp_lock); 6185 return ret; 6186 } 6187 6188 void rvu_nix_mcast_flr_free_entries(struct rvu *rvu, u16 pcifunc) 6189 { 6190 struct nix_mcast_grp_destroy_req dreq = { 0 }; 6191 struct nix_mcast_grp_update_req ureq = { 0 }; 6192 struct nix_mcast_grp_update_rsp ursp = { 0 }; 6193 struct nix_mcast_grp_elem *elem, *tmp; 6194 struct nix_mcast_grp *mcast_grp; 6195 struct nix_hw *nix_hw; 6196 int blkaddr; 6197 6198 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 6199 nix_hw = get_nix_hw(rvu->hw, blkaddr); 6200 if (!nix_hw) 6201 return; 6202 6203 mcast_grp = &nix_hw->mcast_grp; 6204 6205 mutex_lock(&mcast_grp->mcast_grp_lock); 6206 list_for_each_entry_safe(elem, tmp, &mcast_grp->mcast_grp_head, list) { 6207 struct nix_mce_list *mce_list; 6208 struct hlist_node *tmp; 6209 struct mce *mce; 6210 6211 /* If the pcifunc which created the multicast/mirror 6212 * group received an FLR, then delete the entire group. 6213 */ 6214 if (elem->pcifunc == pcifunc) { 6215 /* Delete group */ 6216 dreq.hdr.pcifunc = elem->pcifunc; 6217 dreq.mcast_grp_idx = elem->mcast_grp_idx; 6218 dreq.is_af = 1; 6219 rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL); 6220 continue; 6221 } 6222 6223 /* Iterate the group elements and delete the element which 6224 * received the FLR. 6225 */ 6226 mce_list = &elem->mcast_mce_list; 6227 hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) { 6228 if (mce->pcifunc == pcifunc) { 6229 ureq.hdr.pcifunc = pcifunc; 6230 ureq.num_mce_entry = 1; 6231 ureq.mcast_grp_idx = elem->mcast_grp_idx; 6232 ureq.op = NIX_MCAST_OP_DEL_ENTRY; 6233 ureq.pcifunc[0] = pcifunc; 6234 ureq.is_af = 1; 6235 rvu_mbox_handler_nix_mcast_grp_update(rvu, &ureq, &ursp); 6236 break; 6237 } 6238 } 6239 } 6240 mutex_unlock(&mcast_grp->mcast_grp_lock); 6241 } 6242 6243 int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc, 6244 u32 mcast_grp_idx, u16 mcam_index) 6245 { 6246 struct nix_mcast_grp_elem *elem; 6247 struct nix_mcast_grp *mcast_grp; 6248 struct nix_hw *nix_hw; 6249 int blkaddr, ret = 0; 6250 6251 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 6252 nix_hw = get_nix_hw(rvu->hw, blkaddr); 6253 if (!nix_hw) 6254 return NIX_AF_ERR_INVALID_NIXBLK; 6255 6256 mcast_grp = &nix_hw->mcast_grp; 6257 mutex_lock(&mcast_grp->mcast_grp_lock); 6258 elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx); 6259 if (!elem) 6260 ret = NIX_AF_ERR_INVALID_MCAST_GRP; 6261 else 6262 elem->mcam_index = mcam_index; 6263 6264 mutex_unlock(&mcast_grp->mcast_grp_lock); 6265 return ret; 6266 } 6267 6268 int rvu_mbox_handler_nix_mcast_grp_create(struct rvu *rvu, 6269 struct nix_mcast_grp_create_req *req, 6270 struct nix_mcast_grp_create_rsp *rsp) 6271 { 6272 struct nix_mcast_grp_elem *elem; 6273 struct nix_mcast_grp *mcast_grp; 6274 struct nix_hw *nix_hw; 6275 int blkaddr, err; 6276 6277 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); 6278 if (err) 6279 return err; 6280 6281 mcast_grp = &nix_hw->mcast_grp; 6282 elem = kzalloc(sizeof(*elem), GFP_KERNEL); 6283 if (!elem) 6284 return -ENOMEM; 6285 6286 INIT_HLIST_HEAD(&elem->mcast_mce_list.head); 6287 elem->mcam_index = -1; 6288 elem->mce_start_index = -1; 6289 elem->pcifunc = req->hdr.pcifunc; 6290 elem->dir = req->dir; 6291 elem->mcast_grp_idx = mcast_grp->next_grp_index++; 6292 6293 mutex_lock(&mcast_grp->mcast_grp_lock); 6294 list_add_tail(&elem->list, &mcast_grp->mcast_grp_head); 6295 mcast_grp->count++; 6296 mutex_unlock(&mcast_grp->mcast_grp_lock); 6297 6298 rsp->mcast_grp_idx = elem->mcast_grp_idx; 6299 return 0; 6300 } 6301 6302 int rvu_mbox_handler_nix_mcast_grp_destroy(struct rvu *rvu, 6303 struct nix_mcast_grp_destroy_req *req, 6304 struct msg_rsp *rsp) 6305 { 6306 struct npc_delete_flow_req uninstall_req = { 0 }; 6307 struct npc_delete_flow_rsp uninstall_rsp = { 0 }; 6308 struct nix_mcast_grp_elem *elem; 6309 struct nix_mcast_grp *mcast_grp; 6310 int blkaddr, err, ret = 0; 6311 struct nix_mcast *mcast; 6312 struct nix_hw *nix_hw; 6313 6314 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); 6315 if (err) 6316 return err; 6317 6318 mcast_grp = &nix_hw->mcast_grp; 6319 6320 /* If AF is requesting for the deletion, 6321 * then AF is already taking the lock 6322 */ 6323 if (!req->is_af) 6324 mutex_lock(&mcast_grp->mcast_grp_lock); 6325 6326 elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx); 6327 if (!elem) { 6328 ret = NIX_AF_ERR_INVALID_MCAST_GRP; 6329 goto unlock_grp; 6330 } 6331 6332 /* If no mce entries are associated with the group 6333 * then just remove it from the global list. 6334 */ 6335 if (!elem->mcast_mce_list.count) 6336 goto delete_grp; 6337 6338 /* Delete the associated mcam entry and 6339 * remove all mce entries from the group 6340 */ 6341 mcast = &nix_hw->mcast; 6342 mutex_lock(&mcast->mce_lock); 6343 if (elem->mcam_index != -1) { 6344 uninstall_req.hdr.pcifunc = req->hdr.pcifunc; 6345 uninstall_req.entry = elem->mcam_index; 6346 rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp); 6347 } 6348 6349 nix_free_mce_list(mcast, elem->mcast_mce_list.count, 6350 elem->mce_start_index, elem->dir); 6351 nix_delete_mcast_mce_list(&elem->mcast_mce_list); 6352 mutex_unlock(&mcast->mce_lock); 6353 6354 delete_grp: 6355 list_del(&elem->list); 6356 kfree(elem); 6357 mcast_grp->count--; 6358 6359 unlock_grp: 6360 if (!req->is_af) 6361 mutex_unlock(&mcast_grp->mcast_grp_lock); 6362 6363 return ret; 6364 } 6365 6366 int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu, 6367 struct nix_mcast_grp_update_req *req, 6368 struct nix_mcast_grp_update_rsp *rsp) 6369 { 6370 struct nix_mcast_grp_destroy_req dreq = { 0 }; 6371 struct npc_mcam *mcam = &rvu->hw->mcam; 6372 struct nix_mcast_grp_elem *elem; 6373 struct nix_mcast_grp *mcast_grp; 6374 int blkaddr, err, npc_blkaddr; 6375 u16 prev_count, new_count; 6376 struct nix_mcast *mcast; 6377 struct nix_hw *nix_hw; 6378 int i, ret; 6379 6380 if (!req->num_mce_entry) 6381 return 0; 6382 6383 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); 6384 if (err) 6385 return err; 6386 6387 mcast_grp = &nix_hw->mcast_grp; 6388 6389 /* If AF is requesting for the updation, 6390 * then AF is already taking the lock 6391 */ 6392 if (!req->is_af) 6393 mutex_lock(&mcast_grp->mcast_grp_lock); 6394 6395 elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx); 6396 if (!elem) { 6397 ret = NIX_AF_ERR_INVALID_MCAST_GRP; 6398 goto unlock_grp; 6399 } 6400 6401 /* If any pcifunc matches the group's pcifunc, then we can 6402 * delete the entire group. 6403 */ 6404 if (req->op == NIX_MCAST_OP_DEL_ENTRY) { 6405 for (i = 0; i < req->num_mce_entry; i++) { 6406 if (elem->pcifunc == req->pcifunc[i]) { 6407 /* Delete group */ 6408 dreq.hdr.pcifunc = elem->pcifunc; 6409 dreq.mcast_grp_idx = elem->mcast_grp_idx; 6410 dreq.is_af = 1; 6411 rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL); 6412 ret = 0; 6413 goto unlock_grp; 6414 } 6415 } 6416 } 6417 6418 mcast = &nix_hw->mcast; 6419 mutex_lock(&mcast->mce_lock); 6420 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 6421 if (elem->mcam_index != -1) 6422 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, false); 6423 6424 prev_count = elem->mcast_mce_list.count; 6425 if (req->op == NIX_MCAST_OP_ADD_ENTRY) { 6426 new_count = prev_count + req->num_mce_entry; 6427 if (prev_count) 6428 nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir); 6429 6430 elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir); 6431 6432 /* It is possible not to get contiguous memory */ 6433 if (elem->mce_start_index < 0) { 6434 if (elem->mcam_index != -1) { 6435 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, 6436 elem->mcam_index, true); 6437 ret = NIX_AF_ERR_NON_CONTIG_MCE_LIST; 6438 goto unlock_mce; 6439 } 6440 } 6441 6442 ret = nix_add_mce_list_entry(rvu, nix_hw, elem, req); 6443 if (ret) { 6444 nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir); 6445 if (prev_count) 6446 elem->mce_start_index = nix_alloc_mce_list(mcast, 6447 prev_count, 6448 elem->dir); 6449 6450 if (elem->mcam_index != -1) 6451 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, 6452 elem->mcam_index, true); 6453 6454 goto unlock_mce; 6455 } 6456 } else { 6457 if (!prev_count || prev_count < req->num_mce_entry) { 6458 if (elem->mcam_index != -1) 6459 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, 6460 elem->mcam_index, true); 6461 ret = NIX_AF_ERR_INVALID_MCAST_DEL_REQ; 6462 goto unlock_mce; 6463 } 6464 6465 nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir); 6466 new_count = prev_count - req->num_mce_entry; 6467 elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir); 6468 ret = nix_del_mce_list_entry(rvu, nix_hw, elem, req); 6469 if (ret) { 6470 nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir); 6471 elem->mce_start_index = nix_alloc_mce_list(mcast, prev_count, elem->dir); 6472 if (elem->mcam_index != -1) 6473 npc_enable_mcam_entry(rvu, mcam, 6474 npc_blkaddr, 6475 elem->mcam_index, 6476 true); 6477 6478 goto unlock_mce; 6479 } 6480 } 6481 6482 if (elem->mcam_index == -1) { 6483 rsp->mce_start_index = elem->mce_start_index; 6484 ret = 0; 6485 goto unlock_mce; 6486 } 6487 6488 nix_mcast_update_action(rvu, elem); 6489 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, true); 6490 rsp->mce_start_index = elem->mce_start_index; 6491 ret = 0; 6492 6493 unlock_mce: 6494 mutex_unlock(&mcast->mce_lock); 6495 6496 unlock_grp: 6497 if (!req->is_af) 6498 mutex_unlock(&mcast_grp->mcast_grp_lock); 6499 6500 return ret; 6501 } 6502