1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell. 5 * 6 */ 7 8 #include <linux/module.h> 9 #include <linux/pci.h> 10 11 #include "rvu_struct.h" 12 #include "rvu_reg.h" 13 #include "rvu.h" 14 #include "npc.h" 15 #include "mcs.h" 16 #include "cgx.h" 17 #include "lmac_common.h" 18 #include "rvu_npc_hash.h" 19 20 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc); 21 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 22 int type, int chan_id); 23 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, 24 int type, bool add); 25 static int nix_setup_ipolicers(struct rvu *rvu, 26 struct nix_hw *nix_hw, int blkaddr); 27 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw); 28 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, 29 struct nix_hw *nix_hw, u16 pcifunc); 30 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc); 31 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, 32 u32 leaf_prof); 33 static const char *nix_get_ctx_name(int ctype); 34 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc); 35 36 enum mc_tbl_sz { 37 MC_TBL_SZ_256, 38 MC_TBL_SZ_512, 39 MC_TBL_SZ_1K, 40 MC_TBL_SZ_2K, 41 MC_TBL_SZ_4K, 42 MC_TBL_SZ_8K, 43 MC_TBL_SZ_16K, 44 MC_TBL_SZ_32K, 45 MC_TBL_SZ_64K, 46 }; 47 48 enum mc_buf_cnt { 49 MC_BUF_CNT_8, 50 MC_BUF_CNT_16, 51 MC_BUF_CNT_32, 52 MC_BUF_CNT_64, 53 MC_BUF_CNT_128, 54 MC_BUF_CNT_256, 55 MC_BUF_CNT_512, 56 MC_BUF_CNT_1024, 57 MC_BUF_CNT_2048, 58 }; 59 60 enum nix_makr_fmt_indexes { 61 NIX_MARK_CFG_IP_DSCP_RED, 62 NIX_MARK_CFG_IP_DSCP_YELLOW, 63 NIX_MARK_CFG_IP_DSCP_YELLOW_RED, 64 NIX_MARK_CFG_IP_ECN_RED, 65 NIX_MARK_CFG_IP_ECN_YELLOW, 66 NIX_MARK_CFG_IP_ECN_YELLOW_RED, 67 NIX_MARK_CFG_VLAN_DEI_RED, 68 NIX_MARK_CFG_VLAN_DEI_YELLOW, 69 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED, 70 NIX_MARK_CFG_MAX, 71 }; 72 73 /* For now considering MC resources needed for broadcast 74 * pkt replication only. i.e 256 HWVFs + 12 PFs. 75 */ 76 #define MC_TBL_SIZE MC_TBL_SZ_2K 77 #define MC_BUF_CNT MC_BUF_CNT_1024 78 79 #define MC_TX_MAX 2048 80 81 struct mce { 82 struct hlist_node node; 83 u32 rq_rss_index; 84 u16 pcifunc; 85 u16 channel; 86 u8 dest_type; 87 u8 is_active; 88 u8 reserved[2]; 89 }; 90 91 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr) 92 { 93 int i = 0; 94 95 /*If blkaddr is 0, return the first nix block address*/ 96 if (blkaddr == 0) 97 return rvu->nix_blkaddr[blkaddr]; 98 99 while (i + 1 < MAX_NIX_BLKS) { 100 if (rvu->nix_blkaddr[i] == blkaddr) 101 return rvu->nix_blkaddr[i + 1]; 102 i++; 103 } 104 105 return 0; 106 } 107 108 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc) 109 { 110 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 111 int blkaddr; 112 113 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 114 if (!pfvf->nixlf || blkaddr < 0) 115 return false; 116 return true; 117 } 118 119 int rvu_get_nixlf_count(struct rvu *rvu) 120 { 121 int blkaddr = 0, max = 0; 122 struct rvu_block *block; 123 124 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 125 while (blkaddr) { 126 block = &rvu->hw->block[blkaddr]; 127 max += block->lf.max; 128 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 129 } 130 return max; 131 } 132 133 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr) 134 { 135 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 136 struct rvu_hwinfo *hw = rvu->hw; 137 int blkaddr; 138 139 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 140 if (!pfvf->nixlf || blkaddr < 0) 141 return NIX_AF_ERR_AF_LF_INVALID; 142 143 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 144 if (*nixlf < 0) 145 return NIX_AF_ERR_AF_LF_INVALID; 146 147 if (nix_blkaddr) 148 *nix_blkaddr = blkaddr; 149 150 return 0; 151 } 152 153 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc, 154 struct nix_hw **nix_hw, int *blkaddr) 155 { 156 struct rvu_pfvf *pfvf; 157 158 pfvf = rvu_get_pfvf(rvu, pcifunc); 159 *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 160 if (!pfvf->nixlf || *blkaddr < 0) 161 return NIX_AF_ERR_AF_LF_INVALID; 162 163 *nix_hw = get_nix_hw(rvu->hw, *blkaddr); 164 if (!*nix_hw) 165 return NIX_AF_ERR_INVALID_NIXBLK; 166 return 0; 167 } 168 169 static void nix_mce_list_init(struct nix_mce_list *list, int max) 170 { 171 INIT_HLIST_HEAD(&list->head); 172 list->count = 0; 173 list->max = max; 174 } 175 176 static int nix_alloc_mce_list(struct nix_mcast *mcast, int count, u8 dir) 177 { 178 struct rsrc_bmap *mce_counter; 179 int idx; 180 181 if (!mcast) 182 return -EINVAL; 183 184 mce_counter = &mcast->mce_counter[dir]; 185 if (!rvu_rsrc_check_contig(mce_counter, count)) 186 return -ENOSPC; 187 188 idx = rvu_alloc_rsrc_contig(mce_counter, count); 189 return idx; 190 } 191 192 static void nix_free_mce_list(struct nix_mcast *mcast, int count, int start, u8 dir) 193 { 194 struct rsrc_bmap *mce_counter; 195 196 if (!mcast) 197 return; 198 199 mce_counter = &mcast->mce_counter[dir]; 200 rvu_free_rsrc_contig(mce_counter, count, start); 201 } 202 203 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) 204 { 205 int nix_blkaddr = 0, i = 0; 206 struct rvu *rvu = hw->rvu; 207 208 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 209 while (nix_blkaddr) { 210 if (blkaddr == nix_blkaddr && hw->nix) 211 return &hw->nix[i]; 212 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 213 i++; 214 } 215 return NULL; 216 } 217 218 int nix_get_dwrr_mtu_reg(struct rvu_hwinfo *hw, int smq_link_type) 219 { 220 if (hw->cap.nix_multiple_dwrr_mtu) 221 return NIX_AF_DWRR_MTUX(smq_link_type); 222 223 if (smq_link_type == SMQ_LINK_TYPE_SDP) 224 return NIX_AF_DWRR_SDP_MTU; 225 226 /* Here it's same reg for RPM and LBK */ 227 return NIX_AF_DWRR_RPM_MTU; 228 } 229 230 u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu) 231 { 232 dwrr_mtu &= 0x1FULL; 233 234 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. 235 * Value of 4 is reserved for MTU value of 9728 bytes. 236 * Value of 5 is reserved for MTU value of 10240 bytes. 237 */ 238 switch (dwrr_mtu) { 239 case 4: 240 return 9728; 241 case 5: 242 return 10240; 243 default: 244 return BIT_ULL(dwrr_mtu); 245 } 246 247 return 0; 248 } 249 250 u32 convert_bytes_to_dwrr_mtu(u32 bytes) 251 { 252 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. 253 * Value of 4 is reserved for MTU value of 9728 bytes. 254 * Value of 5 is reserved for MTU value of 10240 bytes. 255 */ 256 if (bytes > BIT_ULL(16)) 257 return 0; 258 259 switch (bytes) { 260 case 9728: 261 return 4; 262 case 10240: 263 return 5; 264 default: 265 return ilog2(bytes); 266 } 267 268 return 0; 269 } 270 271 static void nix_rx_sync(struct rvu *rvu, int blkaddr) 272 { 273 int err; 274 275 /* Sync all in flight RX packets to LLC/DRAM */ 276 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); 277 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); 278 if (err) 279 dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n"); 280 281 /* SW_SYNC ensures all existing transactions are finished and pkts 282 * are written to LLC/DRAM, queues should be teared down after 283 * successful SW_SYNC. Due to a HW errata, in some rare scenarios 284 * an existing transaction might end after SW_SYNC operation. To 285 * ensure operation is fully done, do the SW_SYNC twice. 286 */ 287 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); 288 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); 289 if (err) 290 dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n"); 291 } 292 293 static bool is_valid_txschq(struct rvu *rvu, int blkaddr, 294 int lvl, u16 pcifunc, u16 schq) 295 { 296 struct rvu_hwinfo *hw = rvu->hw; 297 struct nix_txsch *txsch; 298 struct nix_hw *nix_hw; 299 u16 map_func; 300 301 nix_hw = get_nix_hw(rvu->hw, blkaddr); 302 if (!nix_hw) 303 return false; 304 305 txsch = &nix_hw->txsch[lvl]; 306 /* Check out of bounds */ 307 if (schq >= txsch->schq.max) 308 return false; 309 310 mutex_lock(&rvu->rsrc_lock); 311 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); 312 mutex_unlock(&rvu->rsrc_lock); 313 314 /* TLs aggegating traffic are shared across PF and VFs */ 315 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 316 if ((nix_get_tx_link(rvu, map_func) != 317 nix_get_tx_link(rvu, pcifunc)) && 318 (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))) 319 return false; 320 else 321 return true; 322 } 323 324 if (map_func != pcifunc) 325 return false; 326 327 return true; 328 } 329 330 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf, 331 struct nix_lf_alloc_rsp *rsp, bool loop) 332 { 333 struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc); 334 u16 req_chan_base, req_chan_end, req_chan_cnt; 335 struct rvu_hwinfo *hw = rvu->hw; 336 struct sdp_node_info *sdp_info; 337 int pkind, pf, vf, lbkid, vfid; 338 u8 cgx_id, lmac_id; 339 bool from_vf; 340 int err; 341 342 pf = rvu_get_pf(pcifunc); 343 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && 344 type != NIX_INTF_TYPE_SDP) 345 return 0; 346 347 switch (type) { 348 case NIX_INTF_TYPE_CGX: 349 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf]; 350 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 351 352 pkind = rvu_npc_get_pkind(rvu, pf); 353 if (pkind < 0) { 354 dev_err(rvu->dev, 355 "PF_Func 0x%x: Invalid pkind\n", pcifunc); 356 return -EINVAL; 357 } 358 pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0); 359 pfvf->tx_chan_base = pfvf->rx_chan_base; 360 pfvf->rx_chan_cnt = 1; 361 pfvf->tx_chan_cnt = 1; 362 rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id; 363 364 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); 365 rvu_npc_set_pkind(rvu, pkind, pfvf); 366 break; 367 case NIX_INTF_TYPE_LBK: 368 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 369 370 /* If NIX1 block is present on the silicon then NIXes are 371 * assigned alternatively for lbk interfaces. NIX0 should 372 * send packets on lbk link 1 channels and NIX1 should send 373 * on lbk link 0 channels for the communication between 374 * NIX0 and NIX1. 375 */ 376 lbkid = 0; 377 if (rvu->hw->lbk_links > 1) 378 lbkid = vf & 0x1 ? 0 : 1; 379 380 /* By default NIX0 is configured to send packet on lbk link 1 381 * (which corresponds to LBK1), same packet will receive on 382 * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0 383 * (which corresponds to LBK2) packet will receive on NIX0 lbk 384 * link 1. 385 * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0 386 * transmits and receives on lbk link 0, whick corresponds 387 * to LBK1 block, back to back connectivity between NIX and 388 * LBK can be achieved (which is similar to 96xx) 389 * 390 * RX TX 391 * NIX0 lbk link 1 (LBK2) 1 (LBK1) 392 * NIX0 lbk link 0 (LBK0) 0 (LBK0) 393 * NIX1 lbk link 0 (LBK1) 0 (LBK2) 394 * NIX1 lbk link 1 (LBK3) 1 (LBK3) 395 */ 396 if (loop) 397 lbkid = !lbkid; 398 399 /* Note that AF's VFs work in pairs and talk over consecutive 400 * loopback channels.Therefore if odd number of AF VFs are 401 * enabled then the last VF remains with no pair. 402 */ 403 pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf); 404 pfvf->tx_chan_base = vf & 0x1 ? 405 rvu_nix_chan_lbk(rvu, lbkid, vf - 1) : 406 rvu_nix_chan_lbk(rvu, lbkid, vf + 1); 407 pfvf->rx_chan_cnt = 1; 408 pfvf->tx_chan_cnt = 1; 409 rsp->tx_link = hw->cgx_links + lbkid; 410 pfvf->lbkid = lbkid; 411 rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf); 412 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 413 pfvf->rx_chan_base, 414 pfvf->rx_chan_cnt); 415 416 break; 417 case NIX_INTF_TYPE_SDP: 418 from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); 419 parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; 420 sdp_info = parent_pf->sdp_info; 421 if (!sdp_info) { 422 dev_err(rvu->dev, "Invalid sdp_info pointer\n"); 423 return -EINVAL; 424 } 425 if (from_vf) { 426 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn + 427 sdp_info->num_pf_rings; 428 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 429 for (vfid = 0; vfid < vf; vfid++) 430 req_chan_base += sdp_info->vf_rings[vfid]; 431 req_chan_cnt = sdp_info->vf_rings[vf]; 432 req_chan_end = req_chan_base + req_chan_cnt - 1; 433 if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) || 434 req_chan_end > rvu_nix_chan_sdp(rvu, 255)) { 435 dev_err(rvu->dev, 436 "PF_Func 0x%x: Invalid channel base and count\n", 437 pcifunc); 438 return -EINVAL; 439 } 440 } else { 441 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn; 442 req_chan_cnt = sdp_info->num_pf_rings; 443 } 444 445 pfvf->rx_chan_base = req_chan_base; 446 pfvf->rx_chan_cnt = req_chan_cnt; 447 pfvf->tx_chan_base = pfvf->rx_chan_base; 448 pfvf->tx_chan_cnt = pfvf->rx_chan_cnt; 449 450 rsp->tx_link = hw->cgx_links + hw->lbk_links; 451 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 452 pfvf->rx_chan_base, 453 pfvf->rx_chan_cnt); 454 break; 455 } 456 457 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached 458 * RVU PF/VF's MAC address. 459 */ 460 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 461 pfvf->rx_chan_base, pfvf->mac_addr); 462 463 /* Add this PF_FUNC to bcast pkt replication list */ 464 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true); 465 if (err) { 466 dev_err(rvu->dev, 467 "Bcast list, failed to enable PF_FUNC 0x%x\n", 468 pcifunc); 469 return err; 470 } 471 /* Install MCAM rule matching Ethernet broadcast mac address */ 472 rvu_npc_install_bcast_match_entry(rvu, pcifunc, 473 nixlf, pfvf->rx_chan_base); 474 475 pfvf->maxlen = NIC_HW_MIN_FRS; 476 pfvf->minlen = NIC_HW_MIN_FRS; 477 478 return 0; 479 } 480 481 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) 482 { 483 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 484 int err; 485 486 pfvf->maxlen = 0; 487 pfvf->minlen = 0; 488 489 /* Remove this PF_FUNC from bcast pkt replication list */ 490 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false); 491 if (err) { 492 dev_err(rvu->dev, 493 "Bcast list, failed to disable PF_FUNC 0x%x\n", 494 pcifunc); 495 } 496 497 /* Free and disable any MCAM entries used by this NIX LF */ 498 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 499 500 /* Disable DMAC filters used */ 501 rvu_cgx_disable_dmac_entries(rvu, pcifunc); 502 } 503 504 #define NIX_BPIDS_PER_LMAC 8 505 #define NIX_BPIDS_PER_CPT 1 506 static int nix_setup_bpids(struct rvu *rvu, struct nix_hw *hw, int blkaddr) 507 { 508 struct nix_bp *bp = &hw->bp; 509 int err, max_bpids; 510 u64 cfg; 511 512 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 513 max_bpids = FIELD_GET(NIX_CONST_MAX_BPIDS, cfg); 514 515 /* Reserve the BPIds for CGX and SDP */ 516 bp->cgx_bpid_cnt = rvu->hw->cgx_links * NIX_BPIDS_PER_LMAC; 517 bp->sdp_bpid_cnt = rvu->hw->sdp_links * FIELD_GET(NIX_CONST_SDP_CHANS, cfg); 518 bp->free_pool_base = bp->cgx_bpid_cnt + bp->sdp_bpid_cnt + 519 NIX_BPIDS_PER_CPT; 520 bp->bpids.max = max_bpids - bp->free_pool_base; 521 522 err = rvu_alloc_bitmap(&bp->bpids); 523 if (err) 524 return err; 525 526 bp->fn_map = devm_kcalloc(rvu->dev, bp->bpids.max, 527 sizeof(u16), GFP_KERNEL); 528 if (!bp->fn_map) 529 return -ENOMEM; 530 531 bp->intf_map = devm_kcalloc(rvu->dev, bp->bpids.max, 532 sizeof(u8), GFP_KERNEL); 533 if (!bp->intf_map) 534 return -ENOMEM; 535 536 bp->ref_cnt = devm_kcalloc(rvu->dev, bp->bpids.max, 537 sizeof(u8), GFP_KERNEL); 538 if (!bp->ref_cnt) 539 return -ENOMEM; 540 541 return 0; 542 } 543 544 void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc) 545 { 546 int blkaddr, bpid, err; 547 struct nix_hw *nix_hw; 548 struct nix_bp *bp; 549 550 if (!is_lbk_vf(rvu, pcifunc)) 551 return; 552 553 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 554 if (err) 555 return; 556 557 bp = &nix_hw->bp; 558 559 mutex_lock(&rvu->rsrc_lock); 560 for (bpid = 0; bpid < bp->bpids.max; bpid++) { 561 if (bp->fn_map[bpid] == pcifunc) { 562 bp->ref_cnt[bpid]--; 563 if (bp->ref_cnt[bpid]) 564 continue; 565 rvu_free_rsrc(&bp->bpids, bpid); 566 bp->fn_map[bpid] = 0; 567 } 568 } 569 mutex_unlock(&rvu->rsrc_lock); 570 } 571 572 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, 573 struct nix_bp_cfg_req *req, 574 struct msg_rsp *rsp) 575 { 576 u16 pcifunc = req->hdr.pcifunc; 577 int blkaddr, pf, type, err; 578 u16 chan_base, chan, bpid; 579 struct rvu_pfvf *pfvf; 580 struct nix_hw *nix_hw; 581 struct nix_bp *bp; 582 u64 cfg; 583 584 pf = rvu_get_pf(pcifunc); 585 type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 586 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 587 return 0; 588 589 if (is_sdp_pfvf(pcifunc)) 590 type = NIX_INTF_TYPE_SDP; 591 592 pfvf = rvu_get_pfvf(rvu, pcifunc); 593 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 594 if (err) 595 return err; 596 597 bp = &nix_hw->bp; 598 chan_base = pfvf->rx_chan_base + req->chan_base; 599 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 600 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 601 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 602 cfg & ~BIT_ULL(16)); 603 604 if (type == NIX_INTF_TYPE_LBK) { 605 bpid = cfg & GENMASK(8, 0); 606 mutex_lock(&rvu->rsrc_lock); 607 rvu_free_rsrc(&bp->bpids, bpid - bp->free_pool_base); 608 for (bpid = 0; bpid < bp->bpids.max; bpid++) { 609 if (bp->fn_map[bpid] == pcifunc) { 610 bp->fn_map[bpid] = 0; 611 bp->ref_cnt[bpid] = 0; 612 } 613 } 614 mutex_unlock(&rvu->rsrc_lock); 615 } 616 } 617 return 0; 618 } 619 620 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 621 int type, int chan_id) 622 { 623 int bpid, blkaddr, sdp_chan_base, err; 624 struct rvu_hwinfo *hw = rvu->hw; 625 struct rvu_pfvf *pfvf; 626 struct nix_hw *nix_hw; 627 u8 cgx_id, lmac_id; 628 struct nix_bp *bp; 629 630 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 631 632 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); 633 if (err) 634 return err; 635 636 bp = &nix_hw->bp; 637 638 /* Backpressure IDs range division 639 * CGX channles are mapped to (0 - 191) BPIDs 640 * LBK channles are mapped to (192 - 255) BPIDs 641 * SDP channles are mapped to (256 - 511) BPIDs 642 * 643 * Lmac channles and bpids mapped as follows 644 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15) 645 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) .... 646 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) .... 647 */ 648 switch (type) { 649 case NIX_INTF_TYPE_CGX: 650 if ((req->chan_base + req->chan_cnt) > NIX_BPIDS_PER_LMAC) 651 return NIX_AF_ERR_INVALID_BPID_REQ; 652 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 653 /* Assign bpid based on cgx, lmac and chan id */ 654 bpid = (cgx_id * hw->lmac_per_cgx * NIX_BPIDS_PER_LMAC) + 655 (lmac_id * NIX_BPIDS_PER_LMAC) + req->chan_base; 656 657 if (req->bpid_per_chan) 658 bpid += chan_id; 659 if (bpid > bp->cgx_bpid_cnt) 660 return NIX_AF_ERR_INVALID_BPID; 661 break; 662 663 case NIX_INTF_TYPE_LBK: 664 /* Alloc bpid from the free pool */ 665 mutex_lock(&rvu->rsrc_lock); 666 bpid = rvu_alloc_rsrc(&bp->bpids); 667 if (bpid < 0) { 668 mutex_unlock(&rvu->rsrc_lock); 669 return NIX_AF_ERR_INVALID_BPID; 670 } 671 bp->fn_map[bpid] = req->hdr.pcifunc; 672 bp->ref_cnt[bpid]++; 673 bpid += bp->free_pool_base; 674 mutex_unlock(&rvu->rsrc_lock); 675 break; 676 case NIX_INTF_TYPE_SDP: 677 if ((req->chan_base + req->chan_cnt) > bp->sdp_bpid_cnt) 678 return NIX_AF_ERR_INVALID_BPID_REQ; 679 680 /* Handle usecase of 2 SDP blocks */ 681 if (!hw->cap.programmable_chans) 682 sdp_chan_base = pfvf->rx_chan_base - NIX_CHAN_SDP_CH_START; 683 else 684 sdp_chan_base = pfvf->rx_chan_base - hw->sdp_chan_base; 685 686 bpid = bp->cgx_bpid_cnt + req->chan_base + sdp_chan_base; 687 if (req->bpid_per_chan) 688 bpid += chan_id; 689 690 if (bpid > (bp->cgx_bpid_cnt + bp->sdp_bpid_cnt)) 691 return NIX_AF_ERR_INVALID_BPID; 692 break; 693 default: 694 return -EINVAL; 695 } 696 return bpid; 697 } 698 699 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, 700 struct nix_bp_cfg_req *req, 701 struct nix_bp_cfg_rsp *rsp) 702 { 703 int blkaddr, pf, type, chan_id = 0; 704 u16 pcifunc = req->hdr.pcifunc; 705 struct rvu_pfvf *pfvf; 706 u16 chan_base, chan; 707 s16 bpid, bpid_base; 708 u64 cfg; 709 710 pf = rvu_get_pf(pcifunc); 711 type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 712 if (is_sdp_pfvf(pcifunc)) 713 type = NIX_INTF_TYPE_SDP; 714 715 /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */ 716 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && 717 type != NIX_INTF_TYPE_SDP) 718 return 0; 719 720 pfvf = rvu_get_pfvf(rvu, pcifunc); 721 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 722 723 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id); 724 chan_base = pfvf->rx_chan_base + req->chan_base; 725 bpid = bpid_base; 726 727 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 728 if (bpid < 0) { 729 dev_warn(rvu->dev, "Fail to enable backpressure\n"); 730 return -EINVAL; 731 } 732 733 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 734 cfg &= ~GENMASK_ULL(8, 0); 735 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 736 cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16)); 737 chan_id++; 738 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); 739 } 740 741 for (chan = 0; chan < req->chan_cnt; chan++) { 742 /* Map channel and bpid assign to it */ 743 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 | 744 (bpid_base & 0x3FF); 745 if (req->bpid_per_chan) 746 bpid_base++; 747 } 748 rsp->chan_cnt = req->chan_cnt; 749 750 return 0; 751 } 752 753 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, 754 u64 format, bool v4, u64 *fidx) 755 { 756 struct nix_lso_format field = {0}; 757 758 /* IP's Length field */ 759 field.layer = NIX_TXLAYER_OL3; 760 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */ 761 field.offset = v4 ? 2 : 4; 762 field.sizem1 = 1; /* i.e 2 bytes */ 763 field.alg = NIX_LSOALG_ADD_PAYLEN; 764 rvu_write64(rvu, blkaddr, 765 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 766 *(u64 *)&field); 767 768 /* No ID field in IPv6 header */ 769 if (!v4) 770 return; 771 772 /* IP's ID field */ 773 field.layer = NIX_TXLAYER_OL3; 774 field.offset = 4; 775 field.sizem1 = 1; /* i.e 2 bytes */ 776 field.alg = NIX_LSOALG_ADD_SEGNUM; 777 rvu_write64(rvu, blkaddr, 778 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 779 *(u64 *)&field); 780 } 781 782 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr, 783 u64 format, u64 *fidx) 784 { 785 struct nix_lso_format field = {0}; 786 787 /* TCP's sequence number field */ 788 field.layer = NIX_TXLAYER_OL4; 789 field.offset = 4; 790 field.sizem1 = 3; /* i.e 4 bytes */ 791 field.alg = NIX_LSOALG_ADD_OFFSET; 792 rvu_write64(rvu, blkaddr, 793 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 794 *(u64 *)&field); 795 796 /* TCP's flags field */ 797 field.layer = NIX_TXLAYER_OL4; 798 field.offset = 12; 799 field.sizem1 = 1; /* 2 bytes */ 800 field.alg = NIX_LSOALG_TCP_FLAGS; 801 rvu_write64(rvu, blkaddr, 802 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 803 *(u64 *)&field); 804 } 805 806 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 807 { 808 u64 cfg, idx, fidx = 0; 809 810 /* Get max HW supported format indices */ 811 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF; 812 nix_hw->lso.total = cfg; 813 814 /* Enable LSO */ 815 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG); 816 /* For TSO, set first and middle segment flags to 817 * mask out PSH, RST & FIN flags in TCP packet 818 */ 819 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16)); 820 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16); 821 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63)); 822 823 /* Setup default static LSO formats 824 * 825 * Configure format fields for TCPv4 segmentation offload 826 */ 827 idx = NIX_LSO_FORMAT_IDX_TSOV4; 828 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx); 829 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 830 831 /* Set rest of the fields to NOP */ 832 for (; fidx < 8; fidx++) { 833 rvu_write64(rvu, blkaddr, 834 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 835 } 836 nix_hw->lso.in_use++; 837 838 /* Configure format fields for TCPv6 segmentation offload */ 839 idx = NIX_LSO_FORMAT_IDX_TSOV6; 840 fidx = 0; 841 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx); 842 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 843 844 /* Set rest of the fields to NOP */ 845 for (; fidx < 8; fidx++) { 846 rvu_write64(rvu, blkaddr, 847 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 848 } 849 nix_hw->lso.in_use++; 850 } 851 852 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) 853 { 854 kfree(pfvf->rq_bmap); 855 kfree(pfvf->sq_bmap); 856 kfree(pfvf->cq_bmap); 857 if (pfvf->rq_ctx) 858 qmem_free(rvu->dev, pfvf->rq_ctx); 859 if (pfvf->sq_ctx) 860 qmem_free(rvu->dev, pfvf->sq_ctx); 861 if (pfvf->cq_ctx) 862 qmem_free(rvu->dev, pfvf->cq_ctx); 863 if (pfvf->rss_ctx) 864 qmem_free(rvu->dev, pfvf->rss_ctx); 865 if (pfvf->nix_qints_ctx) 866 qmem_free(rvu->dev, pfvf->nix_qints_ctx); 867 if (pfvf->cq_ints_ctx) 868 qmem_free(rvu->dev, pfvf->cq_ints_ctx); 869 870 pfvf->rq_bmap = NULL; 871 pfvf->cq_bmap = NULL; 872 pfvf->sq_bmap = NULL; 873 pfvf->rq_ctx = NULL; 874 pfvf->sq_ctx = NULL; 875 pfvf->cq_ctx = NULL; 876 pfvf->rss_ctx = NULL; 877 pfvf->nix_qints_ctx = NULL; 878 pfvf->cq_ints_ctx = NULL; 879 } 880 881 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, 882 struct rvu_pfvf *pfvf, int nixlf, 883 int rss_sz, int rss_grps, int hwctx_size, 884 u64 way_mask, bool tag_lsb_as_adder) 885 { 886 int err, grp, num_indices; 887 u64 val; 888 889 /* RSS is not requested for this NIXLF */ 890 if (!rss_sz) 891 return 0; 892 num_indices = rss_sz * rss_grps; 893 894 /* Alloc NIX RSS HW context memory and config the base */ 895 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size); 896 if (err) 897 return err; 898 899 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf), 900 (u64)pfvf->rss_ctx->iova); 901 902 /* Config full RSS table size, enable RSS and caching */ 903 val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 | 904 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE); 905 906 if (tag_lsb_as_adder) 907 val |= BIT_ULL(5); 908 909 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val); 910 /* Config RSS group offset and sizes */ 911 for (grp = 0; grp < rss_grps; grp++) 912 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp), 913 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp)); 914 return 0; 915 } 916 917 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, 918 struct nix_aq_inst_s *inst) 919 { 920 struct admin_queue *aq = block->aq; 921 struct nix_aq_res_s *result; 922 int timeout = 1000; 923 u64 reg, head; 924 int ret; 925 926 result = (struct nix_aq_res_s *)aq->res->base; 927 928 /* Get current head pointer where to append this instruction */ 929 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS); 930 head = (reg >> 4) & AQ_PTR_MASK; 931 932 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)), 933 (void *)inst, aq->inst->entry_sz); 934 memset(result, 0, sizeof(*result)); 935 /* sync into memory */ 936 wmb(); 937 938 /* Ring the doorbell and wait for result */ 939 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1); 940 while (result->compcode == NIX_AQ_COMP_NOTDONE) { 941 cpu_relax(); 942 udelay(1); 943 timeout--; 944 if (!timeout) 945 return -EBUSY; 946 } 947 948 if (result->compcode != NIX_AQ_COMP_GOOD) { 949 /* TODO: Replace this with some error code */ 950 if (result->compcode == NIX_AQ_COMP_CTX_FAULT || 951 result->compcode == NIX_AQ_COMP_LOCKERR || 952 result->compcode == NIX_AQ_COMP_CTX_POISON) { 953 ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX); 954 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX); 955 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX); 956 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX); 957 if (ret) 958 dev_err(rvu->dev, 959 "%s: Not able to unlock cachelines\n", __func__); 960 } 961 962 return -EBUSY; 963 } 964 965 return 0; 966 } 967 968 static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req, 969 u16 *smq, u16 *smq_mask) 970 { 971 struct nix_cn10k_aq_enq_req *aq_req; 972 973 if (!is_rvu_otx2(rvu)) { 974 aq_req = (struct nix_cn10k_aq_enq_req *)req; 975 *smq = aq_req->sq.smq; 976 *smq_mask = aq_req->sq_mask.smq; 977 } else { 978 *smq = req->sq.smq; 979 *smq_mask = req->sq_mask.smq; 980 } 981 } 982 983 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, 984 struct nix_aq_enq_req *req, 985 struct nix_aq_enq_rsp *rsp) 986 { 987 struct rvu_hwinfo *hw = rvu->hw; 988 u16 pcifunc = req->hdr.pcifunc; 989 int nixlf, blkaddr, rc = 0; 990 struct nix_aq_inst_s inst; 991 struct rvu_block *block; 992 struct admin_queue *aq; 993 struct rvu_pfvf *pfvf; 994 u16 smq, smq_mask; 995 void *ctx, *mask; 996 bool ena; 997 u64 cfg; 998 999 blkaddr = nix_hw->blkaddr; 1000 block = &hw->block[blkaddr]; 1001 aq = block->aq; 1002 if (!aq) { 1003 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__); 1004 return NIX_AF_ERR_AQ_ENQUEUE; 1005 } 1006 1007 pfvf = rvu_get_pfvf(rvu, pcifunc); 1008 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1009 1010 /* Skip NIXLF check for broadcast MCE entry and bandwidth profile 1011 * operations done by AF itself. 1012 */ 1013 if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) || 1014 (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) { 1015 if (!pfvf->nixlf || nixlf < 0) 1016 return NIX_AF_ERR_AF_LF_INVALID; 1017 } 1018 1019 switch (req->ctype) { 1020 case NIX_AQ_CTYPE_RQ: 1021 /* Check if index exceeds max no of queues */ 1022 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize) 1023 rc = NIX_AF_ERR_AQ_ENQUEUE; 1024 break; 1025 case NIX_AQ_CTYPE_SQ: 1026 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize) 1027 rc = NIX_AF_ERR_AQ_ENQUEUE; 1028 break; 1029 case NIX_AQ_CTYPE_CQ: 1030 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize) 1031 rc = NIX_AF_ERR_AQ_ENQUEUE; 1032 break; 1033 case NIX_AQ_CTYPE_RSS: 1034 /* Check if RSS is enabled and qidx is within range */ 1035 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf)); 1036 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx || 1037 (req->qidx >= (256UL << (cfg & 0xF)))) 1038 rc = NIX_AF_ERR_AQ_ENQUEUE; 1039 break; 1040 case NIX_AQ_CTYPE_MCE: 1041 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG); 1042 1043 /* Check if index exceeds MCE list length */ 1044 if (!nix_hw->mcast.mce_ctx || 1045 (req->qidx >= (256UL << (cfg & 0xF)))) 1046 rc = NIX_AF_ERR_AQ_ENQUEUE; 1047 1048 /* Adding multicast lists for requests from PF/VFs is not 1049 * yet supported, so ignore this. 1050 */ 1051 if (rsp) 1052 rc = NIX_AF_ERR_AQ_ENQUEUE; 1053 break; 1054 case NIX_AQ_CTYPE_BANDPROF: 1055 if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req, 1056 nix_hw, pcifunc)) 1057 rc = NIX_AF_ERR_INVALID_BANDPROF; 1058 break; 1059 default: 1060 rc = NIX_AF_ERR_AQ_ENQUEUE; 1061 } 1062 1063 if (rc) 1064 return rc; 1065 1066 nix_get_aq_req_smq(rvu, req, &smq, &smq_mask); 1067 /* Check if SQ pointed SMQ belongs to this PF/VF or not */ 1068 if (req->ctype == NIX_AQ_CTYPE_SQ && 1069 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) || 1070 (req->op == NIX_AQ_INSTOP_WRITE && 1071 req->sq_mask.ena && req->sq.ena && smq_mask))) { 1072 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ, 1073 pcifunc, smq)) 1074 return NIX_AF_ERR_AQ_ENQUEUE; 1075 } 1076 1077 memset(&inst, 0, sizeof(struct nix_aq_inst_s)); 1078 inst.lf = nixlf; 1079 inst.cindex = req->qidx; 1080 inst.ctype = req->ctype; 1081 inst.op = req->op; 1082 /* Currently we are not supporting enqueuing multiple instructions, 1083 * so always choose first entry in result memory. 1084 */ 1085 inst.res_addr = (u64)aq->res->iova; 1086 1087 /* Hardware uses same aq->res->base for updating result of 1088 * previous instruction hence wait here till it is done. 1089 */ 1090 spin_lock(&aq->lock); 1091 1092 /* Clean result + context memory */ 1093 memset(aq->res->base, 0, aq->res->entry_sz); 1094 /* Context needs to be written at RES_ADDR + 128 */ 1095 ctx = aq->res->base + 128; 1096 /* Mask needs to be written at RES_ADDR + 256 */ 1097 mask = aq->res->base + 256; 1098 1099 switch (req->op) { 1100 case NIX_AQ_INSTOP_WRITE: 1101 if (req->ctype == NIX_AQ_CTYPE_RQ) 1102 memcpy(mask, &req->rq_mask, 1103 sizeof(struct nix_rq_ctx_s)); 1104 else if (req->ctype == NIX_AQ_CTYPE_SQ) 1105 memcpy(mask, &req->sq_mask, 1106 sizeof(struct nix_sq_ctx_s)); 1107 else if (req->ctype == NIX_AQ_CTYPE_CQ) 1108 memcpy(mask, &req->cq_mask, 1109 sizeof(struct nix_cq_ctx_s)); 1110 else if (req->ctype == NIX_AQ_CTYPE_RSS) 1111 memcpy(mask, &req->rss_mask, 1112 sizeof(struct nix_rsse_s)); 1113 else if (req->ctype == NIX_AQ_CTYPE_MCE) 1114 memcpy(mask, &req->mce_mask, 1115 sizeof(struct nix_rx_mce_s)); 1116 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 1117 memcpy(mask, &req->prof_mask, 1118 sizeof(struct nix_bandprof_s)); 1119 fallthrough; 1120 case NIX_AQ_INSTOP_INIT: 1121 if (req->ctype == NIX_AQ_CTYPE_RQ) 1122 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); 1123 else if (req->ctype == NIX_AQ_CTYPE_SQ) 1124 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s)); 1125 else if (req->ctype == NIX_AQ_CTYPE_CQ) 1126 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s)); 1127 else if (req->ctype == NIX_AQ_CTYPE_RSS) 1128 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s)); 1129 else if (req->ctype == NIX_AQ_CTYPE_MCE) 1130 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s)); 1131 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 1132 memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s)); 1133 break; 1134 case NIX_AQ_INSTOP_NOP: 1135 case NIX_AQ_INSTOP_READ: 1136 case NIX_AQ_INSTOP_LOCK: 1137 case NIX_AQ_INSTOP_UNLOCK: 1138 break; 1139 default: 1140 rc = NIX_AF_ERR_AQ_ENQUEUE; 1141 spin_unlock(&aq->lock); 1142 return rc; 1143 } 1144 1145 /* Submit the instruction to AQ */ 1146 rc = nix_aq_enqueue_wait(rvu, block, &inst); 1147 if (rc) { 1148 spin_unlock(&aq->lock); 1149 return rc; 1150 } 1151 1152 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */ 1153 if (req->op == NIX_AQ_INSTOP_INIT) { 1154 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena) 1155 __set_bit(req->qidx, pfvf->rq_bmap); 1156 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena) 1157 __set_bit(req->qidx, pfvf->sq_bmap); 1158 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena) 1159 __set_bit(req->qidx, pfvf->cq_bmap); 1160 } 1161 1162 if (req->op == NIX_AQ_INSTOP_WRITE) { 1163 if (req->ctype == NIX_AQ_CTYPE_RQ) { 1164 ena = (req->rq.ena & req->rq_mask.ena) | 1165 (test_bit(req->qidx, pfvf->rq_bmap) & 1166 ~req->rq_mask.ena); 1167 if (ena) 1168 __set_bit(req->qidx, pfvf->rq_bmap); 1169 else 1170 __clear_bit(req->qidx, pfvf->rq_bmap); 1171 } 1172 if (req->ctype == NIX_AQ_CTYPE_SQ) { 1173 ena = (req->rq.ena & req->sq_mask.ena) | 1174 (test_bit(req->qidx, pfvf->sq_bmap) & 1175 ~req->sq_mask.ena); 1176 if (ena) 1177 __set_bit(req->qidx, pfvf->sq_bmap); 1178 else 1179 __clear_bit(req->qidx, pfvf->sq_bmap); 1180 } 1181 if (req->ctype == NIX_AQ_CTYPE_CQ) { 1182 ena = (req->rq.ena & req->cq_mask.ena) | 1183 (test_bit(req->qidx, pfvf->cq_bmap) & 1184 ~req->cq_mask.ena); 1185 if (ena) 1186 __set_bit(req->qidx, pfvf->cq_bmap); 1187 else 1188 __clear_bit(req->qidx, pfvf->cq_bmap); 1189 } 1190 } 1191 1192 if (rsp) { 1193 /* Copy read context into mailbox */ 1194 if (req->op == NIX_AQ_INSTOP_READ) { 1195 if (req->ctype == NIX_AQ_CTYPE_RQ) 1196 memcpy(&rsp->rq, ctx, 1197 sizeof(struct nix_rq_ctx_s)); 1198 else if (req->ctype == NIX_AQ_CTYPE_SQ) 1199 memcpy(&rsp->sq, ctx, 1200 sizeof(struct nix_sq_ctx_s)); 1201 else if (req->ctype == NIX_AQ_CTYPE_CQ) 1202 memcpy(&rsp->cq, ctx, 1203 sizeof(struct nix_cq_ctx_s)); 1204 else if (req->ctype == NIX_AQ_CTYPE_RSS) 1205 memcpy(&rsp->rss, ctx, 1206 sizeof(struct nix_rsse_s)); 1207 else if (req->ctype == NIX_AQ_CTYPE_MCE) 1208 memcpy(&rsp->mce, ctx, 1209 sizeof(struct nix_rx_mce_s)); 1210 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 1211 memcpy(&rsp->prof, ctx, 1212 sizeof(struct nix_bandprof_s)); 1213 } 1214 } 1215 1216 spin_unlock(&aq->lock); 1217 return 0; 1218 } 1219 1220 static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw, 1221 struct nix_aq_enq_req *req, u8 ctype) 1222 { 1223 struct nix_cn10k_aq_enq_req aq_req; 1224 struct nix_cn10k_aq_enq_rsp aq_rsp; 1225 int rc, word; 1226 1227 if (req->ctype != NIX_AQ_CTYPE_CQ) 1228 return 0; 1229 1230 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 1231 req->hdr.pcifunc, ctype, req->qidx); 1232 if (rc) { 1233 dev_err(rvu->dev, 1234 "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n", 1235 __func__, nix_get_ctx_name(ctype), req->qidx, 1236 req->hdr.pcifunc); 1237 return rc; 1238 } 1239 1240 /* Make copy of original context & mask which are required 1241 * for resubmission 1242 */ 1243 memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s)); 1244 memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s)); 1245 1246 /* exclude fields which HW can update */ 1247 aq_req.cq_mask.cq_err = 0; 1248 aq_req.cq_mask.wrptr = 0; 1249 aq_req.cq_mask.tail = 0; 1250 aq_req.cq_mask.head = 0; 1251 aq_req.cq_mask.avg_level = 0; 1252 aq_req.cq_mask.update_time = 0; 1253 aq_req.cq_mask.substream = 0; 1254 1255 /* Context mask (cq_mask) holds mask value of fields which 1256 * are changed in AQ WRITE operation. 1257 * for example cq.drop = 0xa; 1258 * cq_mask.drop = 0xff; 1259 * Below logic performs '&' between cq and cq_mask so that non 1260 * updated fields are masked out for request and response 1261 * comparison 1262 */ 1263 for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64); 1264 word++) { 1265 *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &= 1266 (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); 1267 *(u64 *)((u8 *)&aq_req.cq + word * 8) &= 1268 (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); 1269 } 1270 1271 if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s))) 1272 return NIX_AF_ERR_AQ_CTX_RETRY_WRITE; 1273 1274 return 0; 1275 } 1276 1277 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, 1278 struct nix_aq_enq_rsp *rsp) 1279 { 1280 struct nix_hw *nix_hw; 1281 int err, retries = 5; 1282 int blkaddr; 1283 1284 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); 1285 if (blkaddr < 0) 1286 return NIX_AF_ERR_AF_LF_INVALID; 1287 1288 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1289 if (!nix_hw) 1290 return NIX_AF_ERR_INVALID_NIXBLK; 1291 1292 retry: 1293 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); 1294 1295 /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic' 1296 * As a work around perfrom CQ context read after each AQ write. If AQ 1297 * read shows AQ write is not updated perform AQ write again. 1298 */ 1299 if (!err && req->op == NIX_AQ_INSTOP_WRITE) { 1300 err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ); 1301 if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) { 1302 if (retries--) 1303 goto retry; 1304 else 1305 return NIX_AF_ERR_CQ_CTX_WRITE_ERR; 1306 } 1307 } 1308 1309 return err; 1310 } 1311 1312 static const char *nix_get_ctx_name(int ctype) 1313 { 1314 switch (ctype) { 1315 case NIX_AQ_CTYPE_CQ: 1316 return "CQ"; 1317 case NIX_AQ_CTYPE_SQ: 1318 return "SQ"; 1319 case NIX_AQ_CTYPE_RQ: 1320 return "RQ"; 1321 case NIX_AQ_CTYPE_RSS: 1322 return "RSS"; 1323 } 1324 return ""; 1325 } 1326 1327 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) 1328 { 1329 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 1330 struct nix_aq_enq_req aq_req; 1331 unsigned long *bmap; 1332 int qidx, q_cnt = 0; 1333 int err = 0, rc; 1334 1335 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx) 1336 return NIX_AF_ERR_AQ_ENQUEUE; 1337 1338 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); 1339 aq_req.hdr.pcifunc = req->hdr.pcifunc; 1340 1341 if (req->ctype == NIX_AQ_CTYPE_CQ) { 1342 aq_req.cq.ena = 0; 1343 aq_req.cq_mask.ena = 1; 1344 aq_req.cq.bp_ena = 0; 1345 aq_req.cq_mask.bp_ena = 1; 1346 q_cnt = pfvf->cq_ctx->qsize; 1347 bmap = pfvf->cq_bmap; 1348 } 1349 if (req->ctype == NIX_AQ_CTYPE_SQ) { 1350 aq_req.sq.ena = 0; 1351 aq_req.sq_mask.ena = 1; 1352 q_cnt = pfvf->sq_ctx->qsize; 1353 bmap = pfvf->sq_bmap; 1354 } 1355 if (req->ctype == NIX_AQ_CTYPE_RQ) { 1356 aq_req.rq.ena = 0; 1357 aq_req.rq_mask.ena = 1; 1358 q_cnt = pfvf->rq_ctx->qsize; 1359 bmap = pfvf->rq_bmap; 1360 } 1361 1362 aq_req.ctype = req->ctype; 1363 aq_req.op = NIX_AQ_INSTOP_WRITE; 1364 1365 for (qidx = 0; qidx < q_cnt; qidx++) { 1366 if (!test_bit(qidx, bmap)) 1367 continue; 1368 aq_req.qidx = qidx; 1369 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL); 1370 if (rc) { 1371 err = rc; 1372 dev_err(rvu->dev, "Failed to disable %s:%d context\n", 1373 nix_get_ctx_name(req->ctype), qidx); 1374 } 1375 } 1376 1377 return err; 1378 } 1379 1380 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 1381 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req) 1382 { 1383 struct nix_aq_enq_req lock_ctx_req; 1384 int err; 1385 1386 if (req->op != NIX_AQ_INSTOP_INIT) 1387 return 0; 1388 1389 if (req->ctype == NIX_AQ_CTYPE_MCE || 1390 req->ctype == NIX_AQ_CTYPE_DYNO) 1391 return 0; 1392 1393 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req)); 1394 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc; 1395 lock_ctx_req.ctype = req->ctype; 1396 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK; 1397 lock_ctx_req.qidx = req->qidx; 1398 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL); 1399 if (err) 1400 dev_err(rvu->dev, 1401 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n", 1402 req->hdr.pcifunc, 1403 nix_get_ctx_name(req->ctype), req->qidx); 1404 return err; 1405 } 1406 1407 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 1408 struct nix_aq_enq_req *req, 1409 struct nix_aq_enq_rsp *rsp) 1410 { 1411 int err; 1412 1413 err = rvu_nix_aq_enq_inst(rvu, req, rsp); 1414 if (!err) 1415 err = nix_lf_hwctx_lockdown(rvu, req); 1416 return err; 1417 } 1418 #else 1419 1420 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 1421 struct nix_aq_enq_req *req, 1422 struct nix_aq_enq_rsp *rsp) 1423 { 1424 return rvu_nix_aq_enq_inst(rvu, req, rsp); 1425 } 1426 #endif 1427 /* CN10K mbox handler */ 1428 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu, 1429 struct nix_cn10k_aq_enq_req *req, 1430 struct nix_cn10k_aq_enq_rsp *rsp) 1431 { 1432 return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req, 1433 (struct nix_aq_enq_rsp *)rsp); 1434 } 1435 1436 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, 1437 struct hwctx_disable_req *req, 1438 struct msg_rsp *rsp) 1439 { 1440 return nix_lf_hwctx_disable(rvu, req); 1441 } 1442 1443 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, 1444 struct nix_lf_alloc_req *req, 1445 struct nix_lf_alloc_rsp *rsp) 1446 { 1447 int nixlf, qints, hwctx_size, intf, err, rc = 0; 1448 struct rvu_hwinfo *hw = rvu->hw; 1449 u16 pcifunc = req->hdr.pcifunc; 1450 struct rvu_block *block; 1451 struct rvu_pfvf *pfvf; 1452 u64 cfg, ctx_cfg; 1453 int blkaddr; 1454 1455 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt) 1456 return NIX_AF_ERR_PARAM; 1457 1458 if (req->way_mask) 1459 req->way_mask &= 0xFFFF; 1460 1461 pfvf = rvu_get_pfvf(rvu, pcifunc); 1462 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1463 if (!pfvf->nixlf || blkaddr < 0) 1464 return NIX_AF_ERR_AF_LF_INVALID; 1465 1466 block = &hw->block[blkaddr]; 1467 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1468 if (nixlf < 0) 1469 return NIX_AF_ERR_AF_LF_INVALID; 1470 1471 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */ 1472 if (req->npa_func) { 1473 /* If default, use 'this' NIXLF's PFFUNC */ 1474 if (req->npa_func == RVU_DEFAULT_PF_FUNC) 1475 req->npa_func = pcifunc; 1476 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA)) 1477 return NIX_AF_INVAL_NPA_PF_FUNC; 1478 } 1479 1480 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */ 1481 if (req->sso_func) { 1482 /* If default, use 'this' NIXLF's PFFUNC */ 1483 if (req->sso_func == RVU_DEFAULT_PF_FUNC) 1484 req->sso_func = pcifunc; 1485 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO)) 1486 return NIX_AF_INVAL_SSO_PF_FUNC; 1487 } 1488 1489 /* If RSS is being enabled, check if requested config is valid. 1490 * RSS table size should be power of two, otherwise 1491 * RSS_GRP::OFFSET + adder might go beyond that group or 1492 * won't be able to use entire table. 1493 */ 1494 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE || 1495 !is_power_of_2(req->rss_sz))) 1496 return NIX_AF_ERR_RSS_SIZE_INVALID; 1497 1498 if (req->rss_sz && 1499 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS)) 1500 return NIX_AF_ERR_RSS_GRPS_INVALID; 1501 1502 /* Reset this NIX LF */ 1503 err = rvu_lf_reset(rvu, block, nixlf); 1504 if (err) { 1505 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1506 block->addr - BLKADDR_NIX0, nixlf); 1507 return NIX_AF_ERR_LF_RESET; 1508 } 1509 1510 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3); 1511 1512 /* Alloc NIX RQ HW context memory and config the base */ 1513 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF); 1514 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size); 1515 if (err) 1516 goto free_mem; 1517 1518 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL); 1519 if (!pfvf->rq_bmap) 1520 goto free_mem; 1521 1522 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf), 1523 (u64)pfvf->rq_ctx->iova); 1524 1525 /* Set caching and queue count in HW */ 1526 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20; 1527 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg); 1528 1529 /* Alloc NIX SQ HW context memory and config the base */ 1530 hwctx_size = 1UL << (ctx_cfg & 0xF); 1531 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size); 1532 if (err) 1533 goto free_mem; 1534 1535 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL); 1536 if (!pfvf->sq_bmap) 1537 goto free_mem; 1538 1539 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf), 1540 (u64)pfvf->sq_ctx->iova); 1541 1542 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20; 1543 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg); 1544 1545 /* Alloc NIX CQ HW context memory and config the base */ 1546 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF); 1547 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size); 1548 if (err) 1549 goto free_mem; 1550 1551 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL); 1552 if (!pfvf->cq_bmap) 1553 goto free_mem; 1554 1555 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf), 1556 (u64)pfvf->cq_ctx->iova); 1557 1558 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20; 1559 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg); 1560 1561 /* Initialize receive side scaling (RSS) */ 1562 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF); 1563 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz, 1564 req->rss_grps, hwctx_size, req->way_mask, 1565 !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER)); 1566 if (err) 1567 goto free_mem; 1568 1569 /* Alloc memory for CQINT's HW contexts */ 1570 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1571 qints = (cfg >> 24) & 0xFFF; 1572 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF); 1573 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size); 1574 if (err) 1575 goto free_mem; 1576 1577 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf), 1578 (u64)pfvf->cq_ints_ctx->iova); 1579 1580 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), 1581 BIT_ULL(36) | req->way_mask << 20); 1582 1583 /* Alloc memory for QINT's HW contexts */ 1584 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1585 qints = (cfg >> 12) & 0xFFF; 1586 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF); 1587 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size); 1588 if (err) 1589 goto free_mem; 1590 1591 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf), 1592 (u64)pfvf->nix_qints_ctx->iova); 1593 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), 1594 BIT_ULL(36) | req->way_mask << 20); 1595 1596 /* Setup VLANX TPID's. 1597 * Use VLAN1 for 802.1Q 1598 * and VLAN0 for 802.1AD. 1599 */ 1600 cfg = (0x8100ULL << 16) | 0x88A8ULL; 1601 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 1602 1603 /* Enable LMTST for this NIX LF */ 1604 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0)); 1605 1606 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */ 1607 if (req->npa_func) 1608 cfg = req->npa_func; 1609 if (req->sso_func) 1610 cfg |= (u64)req->sso_func << 16; 1611 1612 cfg |= (u64)req->xqe_sz << 33; 1613 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg); 1614 1615 /* Config Rx pkt length, csum checks and apad enable / disable */ 1616 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg); 1617 1618 /* Configure pkind for TX parse config */ 1619 cfg = NPC_TX_DEF_PKIND; 1620 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg); 1621 1622 if (is_rep_dev(rvu, pcifunc)) { 1623 pfvf->tx_chan_base = RVU_SWITCH_LBK_CHAN; 1624 pfvf->tx_chan_cnt = 1; 1625 goto exit; 1626 } 1627 1628 intf = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 1629 if (is_sdp_pfvf(pcifunc)) 1630 intf = NIX_INTF_TYPE_SDP; 1631 1632 err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp, 1633 !!(req->flags & NIX_LF_LBK_BLK_SEL)); 1634 if (err) 1635 goto free_mem; 1636 1637 /* Disable NPC entries as NIXLF's contexts are not initialized yet */ 1638 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 1639 1640 /* Configure RX VTAG Type 7 (strip) for vf vlan */ 1641 rvu_write64(rvu, blkaddr, 1642 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7), 1643 VTAGSIZE_T4 | VTAG_STRIP); 1644 1645 goto exit; 1646 1647 free_mem: 1648 nix_ctx_free(rvu, pfvf); 1649 rc = -ENOMEM; 1650 1651 exit: 1652 /* Set macaddr of this PF/VF */ 1653 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 1654 1655 /* set SQB size info */ 1656 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST); 1657 rsp->sqb_size = (cfg >> 34) & 0xFFFF; 1658 rsp->rx_chan_base = pfvf->rx_chan_base; 1659 rsp->tx_chan_base = pfvf->tx_chan_base; 1660 rsp->rx_chan_cnt = pfvf->rx_chan_cnt; 1661 rsp->tx_chan_cnt = pfvf->tx_chan_cnt; 1662 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4; 1663 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6; 1664 /* Get HW supported stat count */ 1665 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 1666 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF); 1667 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF); 1668 /* Get count of CQ IRQs and error IRQs supported per LF */ 1669 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1670 rsp->qints = ((cfg >> 12) & 0xFFF); 1671 rsp->cints = ((cfg >> 24) & 0xFFF); 1672 rsp->cgx_links = hw->cgx_links; 1673 rsp->lbk_links = hw->lbk_links; 1674 rsp->sdp_links = hw->sdp_links; 1675 1676 return rc; 1677 } 1678 1679 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req, 1680 struct msg_rsp *rsp) 1681 { 1682 struct rvu_hwinfo *hw = rvu->hw; 1683 u16 pcifunc = req->hdr.pcifunc; 1684 struct rvu_block *block; 1685 int blkaddr, nixlf, err; 1686 struct rvu_pfvf *pfvf; 1687 1688 pfvf = rvu_get_pfvf(rvu, pcifunc); 1689 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1690 if (!pfvf->nixlf || blkaddr < 0) 1691 return NIX_AF_ERR_AF_LF_INVALID; 1692 1693 block = &hw->block[blkaddr]; 1694 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1695 if (nixlf < 0) 1696 return NIX_AF_ERR_AF_LF_INVALID; 1697 1698 if (is_rep_dev(rvu, pcifunc)) 1699 goto free_lf; 1700 1701 if (req->flags & NIX_LF_DISABLE_FLOWS) 1702 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 1703 else 1704 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 1705 1706 /* Free any tx vtag def entries used by this NIX LF */ 1707 if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG)) 1708 nix_free_tx_vtag_entries(rvu, pcifunc); 1709 1710 nix_interface_deinit(rvu, pcifunc, nixlf); 1711 1712 free_lf: 1713 /* Reset this NIX LF */ 1714 err = rvu_lf_reset(rvu, block, nixlf); 1715 if (err) { 1716 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1717 block->addr - BLKADDR_NIX0, nixlf); 1718 return NIX_AF_ERR_LF_RESET; 1719 } 1720 1721 nix_ctx_free(rvu, pfvf); 1722 1723 return 0; 1724 } 1725 1726 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, 1727 struct nix_mark_format_cfg *req, 1728 struct nix_mark_format_cfg_rsp *rsp) 1729 { 1730 u16 pcifunc = req->hdr.pcifunc; 1731 struct nix_hw *nix_hw; 1732 struct rvu_pfvf *pfvf; 1733 int blkaddr, rc; 1734 u32 cfg; 1735 1736 pfvf = rvu_get_pfvf(rvu, pcifunc); 1737 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1738 if (!pfvf->nixlf || blkaddr < 0) 1739 return NIX_AF_ERR_AF_LF_INVALID; 1740 1741 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1742 if (!nix_hw) 1743 return NIX_AF_ERR_INVALID_NIXBLK; 1744 1745 cfg = (((u32)req->offset & 0x7) << 16) | 1746 (((u32)req->y_mask & 0xF) << 12) | 1747 (((u32)req->y_val & 0xF) << 8) | 1748 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF); 1749 1750 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg); 1751 if (rc < 0) { 1752 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)", 1753 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 1754 return NIX_AF_ERR_MARK_CFG_FAIL; 1755 } 1756 1757 rsp->mark_format_idx = rc; 1758 return 0; 1759 } 1760 1761 /* Handle shaper update specially for few revisions */ 1762 static bool 1763 handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf, 1764 int lvl, u64 reg, u64 regval) 1765 { 1766 u64 regbase, oldval, sw_xoff = 0; 1767 u64 dbgval, md_debug0 = 0; 1768 unsigned long poll_tmo; 1769 bool rate_reg = 0; 1770 u32 schq; 1771 1772 regbase = reg & 0xFFFF; 1773 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 1774 1775 /* Check for rate register */ 1776 switch (lvl) { 1777 case NIX_TXSCH_LVL_TL1: 1778 md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq); 1779 sw_xoff = NIX_AF_TL1X_SW_XOFF(schq); 1780 1781 rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0)); 1782 break; 1783 case NIX_TXSCH_LVL_TL2: 1784 md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq); 1785 sw_xoff = NIX_AF_TL2X_SW_XOFF(schq); 1786 1787 rate_reg = (regbase == NIX_AF_TL2X_CIR(0) || 1788 regbase == NIX_AF_TL2X_PIR(0)); 1789 break; 1790 case NIX_TXSCH_LVL_TL3: 1791 md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq); 1792 sw_xoff = NIX_AF_TL3X_SW_XOFF(schq); 1793 1794 rate_reg = (regbase == NIX_AF_TL3X_CIR(0) || 1795 regbase == NIX_AF_TL3X_PIR(0)); 1796 break; 1797 case NIX_TXSCH_LVL_TL4: 1798 md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq); 1799 sw_xoff = NIX_AF_TL4X_SW_XOFF(schq); 1800 1801 rate_reg = (regbase == NIX_AF_TL4X_CIR(0) || 1802 regbase == NIX_AF_TL4X_PIR(0)); 1803 break; 1804 case NIX_TXSCH_LVL_MDQ: 1805 sw_xoff = NIX_AF_MDQX_SW_XOFF(schq); 1806 rate_reg = (regbase == NIX_AF_MDQX_CIR(0) || 1807 regbase == NIX_AF_MDQX_PIR(0)); 1808 break; 1809 } 1810 1811 if (!rate_reg) 1812 return false; 1813 1814 /* Nothing special to do when state is not toggled */ 1815 oldval = rvu_read64(rvu, blkaddr, reg); 1816 if ((oldval & 0x1) == (regval & 0x1)) { 1817 rvu_write64(rvu, blkaddr, reg, regval); 1818 return true; 1819 } 1820 1821 /* PIR/CIR disable */ 1822 if (!(regval & 0x1)) { 1823 rvu_write64(rvu, blkaddr, sw_xoff, 1); 1824 rvu_write64(rvu, blkaddr, reg, 0); 1825 udelay(4); 1826 rvu_write64(rvu, blkaddr, sw_xoff, 0); 1827 return true; 1828 } 1829 1830 /* PIR/CIR enable */ 1831 rvu_write64(rvu, blkaddr, sw_xoff, 1); 1832 if (md_debug0) { 1833 poll_tmo = jiffies + usecs_to_jiffies(10000); 1834 /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */ 1835 do { 1836 if (time_after(jiffies, poll_tmo)) { 1837 dev_err(rvu->dev, 1838 "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n", 1839 nixlf, schq, lvl); 1840 goto exit; 1841 } 1842 usleep_range(1, 5); 1843 dbgval = rvu_read64(rvu, blkaddr, md_debug0); 1844 } while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48))); 1845 } 1846 rvu_write64(rvu, blkaddr, reg, regval); 1847 exit: 1848 rvu_write64(rvu, blkaddr, sw_xoff, 0); 1849 return true; 1850 } 1851 1852 static void nix_reset_tx_schedule(struct rvu *rvu, int blkaddr, 1853 int lvl, int schq) 1854 { 1855 u64 tlx_parent = 0, tlx_schedule = 0; 1856 1857 switch (lvl) { 1858 case NIX_TXSCH_LVL_TL2: 1859 tlx_parent = NIX_AF_TL2X_PARENT(schq); 1860 tlx_schedule = NIX_AF_TL2X_SCHEDULE(schq); 1861 break; 1862 case NIX_TXSCH_LVL_TL3: 1863 tlx_parent = NIX_AF_TL3X_PARENT(schq); 1864 tlx_schedule = NIX_AF_TL3X_SCHEDULE(schq); 1865 break; 1866 case NIX_TXSCH_LVL_TL4: 1867 tlx_parent = NIX_AF_TL4X_PARENT(schq); 1868 tlx_schedule = NIX_AF_TL4X_SCHEDULE(schq); 1869 break; 1870 case NIX_TXSCH_LVL_MDQ: 1871 /* no need to reset SMQ_CFG as HW clears this CSR 1872 * on SMQ flush 1873 */ 1874 tlx_parent = NIX_AF_MDQX_PARENT(schq); 1875 tlx_schedule = NIX_AF_MDQX_SCHEDULE(schq); 1876 break; 1877 default: 1878 return; 1879 } 1880 1881 if (tlx_parent) 1882 rvu_write64(rvu, blkaddr, tlx_parent, 0x0); 1883 1884 if (tlx_schedule) 1885 rvu_write64(rvu, blkaddr, tlx_schedule, 0x0); 1886 } 1887 1888 /* Disable shaping of pkts by a scheduler queue 1889 * at a given scheduler level. 1890 */ 1891 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, 1892 int nixlf, int lvl, int schq) 1893 { 1894 struct rvu_hwinfo *hw = rvu->hw; 1895 u64 cir_reg = 0, pir_reg = 0; 1896 u64 cfg; 1897 1898 switch (lvl) { 1899 case NIX_TXSCH_LVL_TL1: 1900 cir_reg = NIX_AF_TL1X_CIR(schq); 1901 pir_reg = 0; /* PIR not available at TL1 */ 1902 break; 1903 case NIX_TXSCH_LVL_TL2: 1904 cir_reg = NIX_AF_TL2X_CIR(schq); 1905 pir_reg = NIX_AF_TL2X_PIR(schq); 1906 break; 1907 case NIX_TXSCH_LVL_TL3: 1908 cir_reg = NIX_AF_TL3X_CIR(schq); 1909 pir_reg = NIX_AF_TL3X_PIR(schq); 1910 break; 1911 case NIX_TXSCH_LVL_TL4: 1912 cir_reg = NIX_AF_TL4X_CIR(schq); 1913 pir_reg = NIX_AF_TL4X_PIR(schq); 1914 break; 1915 case NIX_TXSCH_LVL_MDQ: 1916 cir_reg = NIX_AF_MDQX_CIR(schq); 1917 pir_reg = NIX_AF_MDQX_PIR(schq); 1918 break; 1919 } 1920 1921 /* Shaper state toggle needs wait/poll */ 1922 if (hw->cap.nix_shaper_toggle_wait) { 1923 if (cir_reg) 1924 handle_txschq_shaper_update(rvu, blkaddr, nixlf, 1925 lvl, cir_reg, 0); 1926 if (pir_reg) 1927 handle_txschq_shaper_update(rvu, blkaddr, nixlf, 1928 lvl, pir_reg, 0); 1929 return; 1930 } 1931 1932 if (!cir_reg) 1933 return; 1934 cfg = rvu_read64(rvu, blkaddr, cir_reg); 1935 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0)); 1936 1937 if (!pir_reg) 1938 return; 1939 cfg = rvu_read64(rvu, blkaddr, pir_reg); 1940 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0)); 1941 } 1942 1943 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, 1944 int lvl, int schq) 1945 { 1946 struct rvu_hwinfo *hw = rvu->hw; 1947 int link_level; 1948 int link; 1949 1950 if (lvl >= hw->cap.nix_tx_aggr_lvl) 1951 return; 1952 1953 /* Reset TL4's SDP link config */ 1954 if (lvl == NIX_TXSCH_LVL_TL4) 1955 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00); 1956 1957 link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? 1958 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; 1959 if (lvl != link_level) 1960 return; 1961 1962 /* Reset TL2's CGX or LBK link config */ 1963 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) 1964 rvu_write64(rvu, blkaddr, 1965 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); 1966 } 1967 1968 static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr, 1969 int lvl, int schq) 1970 { 1971 struct rvu_hwinfo *hw = rvu->hw; 1972 u64 reg; 1973 1974 /* Skip this if shaping is not supported */ 1975 if (!hw->cap.nix_shaping) 1976 return; 1977 1978 /* Clear level specific SW_XOFF */ 1979 switch (lvl) { 1980 case NIX_TXSCH_LVL_TL1: 1981 reg = NIX_AF_TL1X_SW_XOFF(schq); 1982 break; 1983 case NIX_TXSCH_LVL_TL2: 1984 reg = NIX_AF_TL2X_SW_XOFF(schq); 1985 break; 1986 case NIX_TXSCH_LVL_TL3: 1987 reg = NIX_AF_TL3X_SW_XOFF(schq); 1988 break; 1989 case NIX_TXSCH_LVL_TL4: 1990 reg = NIX_AF_TL4X_SW_XOFF(schq); 1991 break; 1992 case NIX_TXSCH_LVL_MDQ: 1993 reg = NIX_AF_MDQX_SW_XOFF(schq); 1994 break; 1995 default: 1996 return; 1997 } 1998 1999 rvu_write64(rvu, blkaddr, reg, 0x0); 2000 } 2001 2002 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) 2003 { 2004 struct rvu_hwinfo *hw = rvu->hw; 2005 int pf = rvu_get_pf(pcifunc); 2006 u8 cgx_id = 0, lmac_id = 0; 2007 2008 if (is_lbk_vf(rvu, pcifunc)) {/* LBK links */ 2009 return hw->cgx_links; 2010 } else if (is_pf_cgxmapped(rvu, pf)) { 2011 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 2012 return (cgx_id * hw->lmac_per_cgx) + lmac_id; 2013 } 2014 2015 /* SDP link */ 2016 return hw->cgx_links + hw->lbk_links; 2017 } 2018 2019 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, 2020 int link, int *start, int *end) 2021 { 2022 struct rvu_hwinfo *hw = rvu->hw; 2023 int pf = rvu_get_pf(pcifunc); 2024 2025 /* LBK links */ 2026 if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc)) { 2027 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 2028 *end = *start + hw->cap.nix_txsch_per_lbk_lmac; 2029 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */ 2030 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 2031 *end = *start + hw->cap.nix_txsch_per_cgx_lmac; 2032 } else { /* SDP link */ 2033 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) + 2034 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links); 2035 *end = *start + hw->cap.nix_txsch_per_sdp_lmac; 2036 } 2037 } 2038 2039 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc, 2040 struct nix_hw *nix_hw, 2041 struct nix_txsch_alloc_req *req) 2042 { 2043 struct rvu_hwinfo *hw = rvu->hw; 2044 int schq, req_schq, free_cnt; 2045 struct nix_txsch *txsch; 2046 int link, start, end; 2047 2048 txsch = &nix_hw->txsch[lvl]; 2049 req_schq = req->schq_contig[lvl] + req->schq[lvl]; 2050 2051 if (!req_schq) 2052 return 0; 2053 2054 link = nix_get_tx_link(rvu, pcifunc); 2055 2056 /* For traffic aggregating scheduler level, one queue is enough */ 2057 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 2058 if (req_schq != 1) 2059 return NIX_AF_ERR_TLX_ALLOC_FAIL; 2060 return 0; 2061 } 2062 2063 /* Get free SCHQ count and check if request can be accomodated */ 2064 if (hw->cap.nix_fixed_txschq_mapping) { 2065 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 2066 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK); 2067 if (end <= txsch->schq.max && schq < end && 2068 !test_bit(schq, txsch->schq.bmap)) 2069 free_cnt = 1; 2070 else 2071 free_cnt = 0; 2072 } else { 2073 free_cnt = rvu_rsrc_free_count(&txsch->schq); 2074 } 2075 2076 if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC || 2077 req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC) 2078 return NIX_AF_ERR_TLX_ALLOC_FAIL; 2079 2080 /* If contiguous queues are needed, check for availability */ 2081 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] && 2082 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl])) 2083 return NIX_AF_ERR_TLX_ALLOC_FAIL; 2084 2085 return 0; 2086 } 2087 2088 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch, 2089 struct nix_txsch_alloc_rsp *rsp, 2090 int lvl, int start, int end) 2091 { 2092 struct rvu_hwinfo *hw = rvu->hw; 2093 u16 pcifunc = rsp->hdr.pcifunc; 2094 int idx, schq; 2095 2096 /* For traffic aggregating levels, queue alloc is based 2097 * on transmit link to which PF_FUNC is mapped to. 2098 */ 2099 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 2100 /* A single TL queue is allocated */ 2101 if (rsp->schq_contig[lvl]) { 2102 rsp->schq_contig[lvl] = 1; 2103 rsp->schq_contig_list[lvl][0] = start; 2104 } 2105 2106 /* Both contig and non-contig reqs doesn't make sense here */ 2107 if (rsp->schq_contig[lvl]) 2108 rsp->schq[lvl] = 0; 2109 2110 if (rsp->schq[lvl]) { 2111 rsp->schq[lvl] = 1; 2112 rsp->schq_list[lvl][0] = start; 2113 } 2114 return; 2115 } 2116 2117 /* Adjust the queue request count if HW supports 2118 * only one queue per level configuration. 2119 */ 2120 if (hw->cap.nix_fixed_txschq_mapping) { 2121 idx = pcifunc & RVU_PFVF_FUNC_MASK; 2122 schq = start + idx; 2123 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) { 2124 rsp->schq_contig[lvl] = 0; 2125 rsp->schq[lvl] = 0; 2126 return; 2127 } 2128 2129 if (rsp->schq_contig[lvl]) { 2130 rsp->schq_contig[lvl] = 1; 2131 set_bit(schq, txsch->schq.bmap); 2132 rsp->schq_contig_list[lvl][0] = schq; 2133 rsp->schq[lvl] = 0; 2134 } else if (rsp->schq[lvl]) { 2135 rsp->schq[lvl] = 1; 2136 set_bit(schq, txsch->schq.bmap); 2137 rsp->schq_list[lvl][0] = schq; 2138 } 2139 return; 2140 } 2141 2142 /* Allocate contiguous queue indices requesty first */ 2143 if (rsp->schq_contig[lvl]) { 2144 schq = bitmap_find_next_zero_area(txsch->schq.bmap, 2145 txsch->schq.max, start, 2146 rsp->schq_contig[lvl], 0); 2147 if (schq >= end) 2148 rsp->schq_contig[lvl] = 0; 2149 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) { 2150 set_bit(schq, txsch->schq.bmap); 2151 rsp->schq_contig_list[lvl][idx] = schq; 2152 schq++; 2153 } 2154 } 2155 2156 /* Allocate non-contiguous queue indices */ 2157 if (rsp->schq[lvl]) { 2158 idx = 0; 2159 for (schq = start; schq < end; schq++) { 2160 if (!test_bit(schq, txsch->schq.bmap)) { 2161 set_bit(schq, txsch->schq.bmap); 2162 rsp->schq_list[lvl][idx++] = schq; 2163 } 2164 if (idx == rsp->schq[lvl]) 2165 break; 2166 } 2167 /* Update how many were allocated */ 2168 rsp->schq[lvl] = idx; 2169 } 2170 } 2171 2172 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, 2173 struct nix_txsch_alloc_req *req, 2174 struct nix_txsch_alloc_rsp *rsp) 2175 { 2176 struct rvu_hwinfo *hw = rvu->hw; 2177 u16 pcifunc = req->hdr.pcifunc; 2178 int link, blkaddr, rc = 0; 2179 int lvl, idx, start, end; 2180 struct nix_txsch *txsch; 2181 struct nix_hw *nix_hw; 2182 u32 *pfvf_map; 2183 int nixlf; 2184 u16 schq; 2185 2186 rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2187 if (rc) 2188 return rc; 2189 2190 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2191 if (!nix_hw) 2192 return NIX_AF_ERR_INVALID_NIXBLK; 2193 2194 mutex_lock(&rvu->rsrc_lock); 2195 2196 /* Check if request is valid as per HW capabilities 2197 * and can be accomodated. 2198 */ 2199 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2200 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req); 2201 if (rc) 2202 goto err; 2203 } 2204 2205 /* Allocate requested Tx scheduler queues */ 2206 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2207 txsch = &nix_hw->txsch[lvl]; 2208 pfvf_map = txsch->pfvf_map; 2209 2210 if (!req->schq[lvl] && !req->schq_contig[lvl]) 2211 continue; 2212 2213 rsp->schq[lvl] = req->schq[lvl]; 2214 rsp->schq_contig[lvl] = req->schq_contig[lvl]; 2215 2216 link = nix_get_tx_link(rvu, pcifunc); 2217 2218 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 2219 start = link; 2220 end = link; 2221 } else if (hw->cap.nix_fixed_txschq_mapping) { 2222 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 2223 } else { 2224 start = 0; 2225 end = txsch->schq.max; 2226 } 2227 2228 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end); 2229 2230 /* Reset queue config */ 2231 for (idx = 0; idx < req->schq_contig[lvl]; idx++) { 2232 schq = rsp->schq_contig_list[lvl][idx]; 2233 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 2234 NIX_TXSCHQ_CFG_DONE)) 2235 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 2236 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2237 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2238 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2239 } 2240 2241 for (idx = 0; idx < req->schq[lvl]; idx++) { 2242 schq = rsp->schq_list[lvl][idx]; 2243 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 2244 NIX_TXSCHQ_CFG_DONE)) 2245 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 2246 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2247 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2248 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2249 } 2250 } 2251 2252 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl; 2253 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO; 2254 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr, 2255 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? 2256 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; 2257 goto exit; 2258 err: 2259 rc = NIX_AF_ERR_TLX_ALLOC_FAIL; 2260 exit: 2261 mutex_unlock(&rvu->rsrc_lock); 2262 return rc; 2263 } 2264 2265 static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq, 2266 struct nix_smq_flush_ctx *smq_flush_ctx) 2267 { 2268 struct nix_smq_tree_ctx *smq_tree_ctx; 2269 u64 parent_off, regval; 2270 u16 schq; 2271 int lvl; 2272 2273 smq_flush_ctx->smq = smq; 2274 2275 schq = smq; 2276 for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) { 2277 smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl]; 2278 smq_tree_ctx->schq = schq; 2279 if (lvl == NIX_TXSCH_LVL_TL1) { 2280 smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq); 2281 smq_tree_ctx->pir_off = 0; 2282 smq_tree_ctx->pir_val = 0; 2283 parent_off = 0; 2284 } else if (lvl == NIX_TXSCH_LVL_TL2) { 2285 smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq); 2286 smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq); 2287 parent_off = NIX_AF_TL2X_PARENT(schq); 2288 } else if (lvl == NIX_TXSCH_LVL_TL3) { 2289 smq_tree_ctx->cir_off = NIX_AF_TL3X_CIR(schq); 2290 smq_tree_ctx->pir_off = NIX_AF_TL3X_PIR(schq); 2291 parent_off = NIX_AF_TL3X_PARENT(schq); 2292 } else if (lvl == NIX_TXSCH_LVL_TL4) { 2293 smq_tree_ctx->cir_off = NIX_AF_TL4X_CIR(schq); 2294 smq_tree_ctx->pir_off = NIX_AF_TL4X_PIR(schq); 2295 parent_off = NIX_AF_TL4X_PARENT(schq); 2296 } else if (lvl == NIX_TXSCH_LVL_MDQ) { 2297 smq_tree_ctx->cir_off = NIX_AF_MDQX_CIR(schq); 2298 smq_tree_ctx->pir_off = NIX_AF_MDQX_PIR(schq); 2299 parent_off = NIX_AF_MDQX_PARENT(schq); 2300 } 2301 /* save cir/pir register values */ 2302 smq_tree_ctx->cir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->cir_off); 2303 if (smq_tree_ctx->pir_off) 2304 smq_tree_ctx->pir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->pir_off); 2305 2306 /* get parent txsch node */ 2307 if (parent_off) { 2308 regval = rvu_read64(rvu, blkaddr, parent_off); 2309 schq = (regval >> 16) & 0x1FF; 2310 } 2311 } 2312 } 2313 2314 static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr, 2315 struct nix_smq_flush_ctx *smq_flush_ctx, bool enable) 2316 { 2317 struct nix_txsch *txsch; 2318 struct nix_hw *nix_hw; 2319 int tl2, tl2_schq; 2320 u64 regoff; 2321 2322 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2323 if (!nix_hw) 2324 return; 2325 2326 /* loop through all TL2s with matching PF_FUNC */ 2327 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2]; 2328 tl2_schq = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL2].schq; 2329 for (tl2 = 0; tl2 < txsch->schq.max; tl2++) { 2330 /* skip the smq(flush) TL2 */ 2331 if (tl2 == tl2_schq) 2332 continue; 2333 /* skip unused TL2s */ 2334 if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE) 2335 continue; 2336 /* skip if PF_FUNC doesn't match */ 2337 if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) != 2338 (TXSCH_MAP_FUNC(txsch->pfvf_map[tl2_schq] & 2339 ~RVU_PFVF_FUNC_MASK))) 2340 continue; 2341 /* enable/disable XOFF */ 2342 regoff = NIX_AF_TL2X_SW_XOFF(tl2); 2343 if (enable) 2344 rvu_write64(rvu, blkaddr, regoff, 0x1); 2345 else 2346 rvu_write64(rvu, blkaddr, regoff, 0x0); 2347 } 2348 } 2349 2350 static void nix_smq_flush_enadis_rate(struct rvu *rvu, int blkaddr, 2351 struct nix_smq_flush_ctx *smq_flush_ctx, bool enable) 2352 { 2353 u64 cir_off, pir_off, cir_val, pir_val; 2354 struct nix_smq_tree_ctx *smq_tree_ctx; 2355 int lvl; 2356 2357 for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) { 2358 smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl]; 2359 cir_off = smq_tree_ctx->cir_off; 2360 cir_val = smq_tree_ctx->cir_val; 2361 pir_off = smq_tree_ctx->pir_off; 2362 pir_val = smq_tree_ctx->pir_val; 2363 2364 if (enable) { 2365 rvu_write64(rvu, blkaddr, cir_off, cir_val); 2366 if (lvl != NIX_TXSCH_LVL_TL1) 2367 rvu_write64(rvu, blkaddr, pir_off, pir_val); 2368 } else { 2369 rvu_write64(rvu, blkaddr, cir_off, 0x0); 2370 if (lvl != NIX_TXSCH_LVL_TL1) 2371 rvu_write64(rvu, blkaddr, pir_off, 0x0); 2372 } 2373 } 2374 } 2375 2376 static int nix_smq_flush(struct rvu *rvu, int blkaddr, 2377 int smq, u16 pcifunc, int nixlf) 2378 { 2379 struct nix_smq_flush_ctx *smq_flush_ctx; 2380 int err, restore_tx_en = 0, i; 2381 int pf = rvu_get_pf(pcifunc); 2382 u8 cgx_id = 0, lmac_id = 0; 2383 u16 tl2_tl3_link_schq; 2384 u8 link, link_level; 2385 u64 cfg, bmap = 0; 2386 2387 if (!is_rvu_otx2(rvu)) { 2388 /* Skip SMQ flush if pkt count is zero */ 2389 cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(smq)); 2390 if (!cfg) 2391 return 0; 2392 } 2393 2394 /* enable cgx tx if disabled */ 2395 if (is_pf_cgxmapped(rvu, pf)) { 2396 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 2397 restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), 2398 lmac_id, true); 2399 } 2400 2401 /* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */ 2402 smq_flush_ctx = kzalloc(sizeof(*smq_flush_ctx), GFP_KERNEL); 2403 if (!smq_flush_ctx) 2404 return -ENOMEM; 2405 nix_smq_flush_fill_ctx(rvu, blkaddr, smq, smq_flush_ctx); 2406 nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true); 2407 nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false); 2408 2409 /* Disable backpressure from physical link, 2410 * otherwise SMQ flush may stall. 2411 */ 2412 rvu_cgx_enadis_rx_bp(rvu, pf, false); 2413 2414 link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? 2415 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; 2416 tl2_tl3_link_schq = smq_flush_ctx->smq_tree_ctx[link_level].schq; 2417 link = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL1].schq; 2418 2419 /* SMQ set enqueue xoff */ 2420 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); 2421 cfg |= BIT_ULL(50); 2422 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); 2423 2424 /* Clear all NIX_AF_TL3_TL2_LINK_CFG[ENA] for the TL3/TL2 queue */ 2425 for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) { 2426 cfg = rvu_read64(rvu, blkaddr, 2427 NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link)); 2428 if (!(cfg & BIT_ULL(12))) 2429 continue; 2430 bmap |= BIT_ULL(i); 2431 cfg &= ~BIT_ULL(12); 2432 rvu_write64(rvu, blkaddr, 2433 NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg); 2434 } 2435 2436 /* Do SMQ flush and set enqueue xoff */ 2437 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); 2438 cfg |= BIT_ULL(50) | BIT_ULL(49); 2439 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); 2440 2441 /* Wait for flush to complete */ 2442 err = rvu_poll_reg(rvu, blkaddr, 2443 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true); 2444 if (err) 2445 dev_info(rvu->dev, 2446 "NIXLF%d: SMQ%d flush failed, txlink might be busy\n", 2447 nixlf, smq); 2448 2449 /* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */ 2450 for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) { 2451 if (!(bmap & BIT_ULL(i))) 2452 continue; 2453 cfg = rvu_read64(rvu, blkaddr, 2454 NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link)); 2455 cfg |= BIT_ULL(12); 2456 rvu_write64(rvu, blkaddr, 2457 NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg); 2458 } 2459 2460 /* clear XOFF on TL2s */ 2461 nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true); 2462 nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false); 2463 kfree(smq_flush_ctx); 2464 2465 rvu_cgx_enadis_rx_bp(rvu, pf, true); 2466 /* restore cgx tx state */ 2467 if (restore_tx_en) 2468 rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); 2469 return err; 2470 } 2471 2472 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) 2473 { 2474 int blkaddr, nixlf, lvl, schq, err; 2475 struct rvu_hwinfo *hw = rvu->hw; 2476 struct nix_txsch *txsch; 2477 struct nix_hw *nix_hw; 2478 u16 map_func; 2479 2480 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2481 if (blkaddr < 0) 2482 return NIX_AF_ERR_AF_LF_INVALID; 2483 2484 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2485 if (!nix_hw) 2486 return NIX_AF_ERR_INVALID_NIXBLK; 2487 2488 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 2489 if (nixlf < 0) 2490 return NIX_AF_ERR_AF_LF_INVALID; 2491 2492 /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/ 2493 mutex_lock(&rvu->rsrc_lock); 2494 for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2495 txsch = &nix_hw->txsch[lvl]; 2496 2497 if (lvl >= hw->cap.nix_tx_aggr_lvl) 2498 continue; 2499 2500 for (schq = 0; schq < txsch->schq.max; schq++) { 2501 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2502 continue; 2503 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2504 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); 2505 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2506 } 2507 } 2508 nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1, 2509 nix_get_tx_link(rvu, pcifunc)); 2510 2511 /* On PF cleanup, clear cfg done flag as 2512 * PF would have changed default config. 2513 */ 2514 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) { 2515 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1]; 2516 schq = nix_get_tx_link(rvu, pcifunc); 2517 /* Do not clear pcifunc in txsch->pfvf_map[schq] because 2518 * VF might be using this TL1 queue 2519 */ 2520 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); 2521 txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0); 2522 } 2523 2524 /* Flush SMQs */ 2525 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; 2526 for (schq = 0; schq < txsch->schq.max; schq++) { 2527 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2528 continue; 2529 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 2530 } 2531 2532 /* Now free scheduler queues to free pool */ 2533 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2534 /* TLs above aggregation level are shared across all PF 2535 * and it's VFs, hence skip freeing them. 2536 */ 2537 if (lvl >= hw->cap.nix_tx_aggr_lvl) 2538 continue; 2539 2540 txsch = &nix_hw->txsch[lvl]; 2541 for (schq = 0; schq < txsch->schq.max; schq++) { 2542 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2543 continue; 2544 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2545 rvu_free_rsrc(&txsch->schq, schq); 2546 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 2547 } 2548 } 2549 mutex_unlock(&rvu->rsrc_lock); 2550 2551 err = rvu_ndc_sync(rvu, blkaddr, nixlf, NIX_AF_NDC_TX_SYNC); 2552 if (err) 2553 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf); 2554 2555 return 0; 2556 } 2557 2558 static int nix_txschq_free_one(struct rvu *rvu, 2559 struct nix_txsch_free_req *req) 2560 { 2561 struct rvu_hwinfo *hw = rvu->hw; 2562 u16 pcifunc = req->hdr.pcifunc; 2563 int lvl, schq, nixlf, blkaddr; 2564 struct nix_txsch *txsch; 2565 struct nix_hw *nix_hw; 2566 u32 *pfvf_map; 2567 int rc; 2568 2569 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2570 if (blkaddr < 0) 2571 return NIX_AF_ERR_AF_LF_INVALID; 2572 2573 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2574 if (!nix_hw) 2575 return NIX_AF_ERR_INVALID_NIXBLK; 2576 2577 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 2578 if (nixlf < 0) 2579 return NIX_AF_ERR_AF_LF_INVALID; 2580 2581 lvl = req->schq_lvl; 2582 schq = req->schq; 2583 txsch = &nix_hw->txsch[lvl]; 2584 2585 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max) 2586 return 0; 2587 2588 pfvf_map = txsch->pfvf_map; 2589 mutex_lock(&rvu->rsrc_lock); 2590 2591 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) { 2592 rc = NIX_AF_ERR_TLX_INVALID; 2593 goto err; 2594 } 2595 2596 /* Clear SW_XOFF of this resource only. 2597 * For SMQ level, all path XOFF's 2598 * need to be made clear by user 2599 */ 2600 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); 2601 2602 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2603 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2604 2605 /* Flush if it is a SMQ. Onus of disabling 2606 * TL2/3 queue links before SMQ flush is on user 2607 */ 2608 if (lvl == NIX_TXSCH_LVL_SMQ && 2609 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) { 2610 rc = NIX_AF_SMQ_FLUSH_FAILED; 2611 goto err; 2612 } 2613 2614 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2615 2616 /* Free the resource */ 2617 rvu_free_rsrc(&txsch->schq, schq); 2618 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 2619 mutex_unlock(&rvu->rsrc_lock); 2620 return 0; 2621 err: 2622 mutex_unlock(&rvu->rsrc_lock); 2623 return rc; 2624 } 2625 2626 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, 2627 struct nix_txsch_free_req *req, 2628 struct msg_rsp *rsp) 2629 { 2630 if (req->flags & TXSCHQ_FREE_ALL) 2631 return nix_txschq_free(rvu, req->hdr.pcifunc); 2632 else 2633 return nix_txschq_free_one(rvu, req); 2634 } 2635 2636 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, 2637 int lvl, u64 reg, u64 regval) 2638 { 2639 u64 regbase = reg & 0xFFFF; 2640 u16 schq, parent; 2641 2642 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg)) 2643 return false; 2644 2645 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2646 /* Check if this schq belongs to this PF/VF or not */ 2647 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq)) 2648 return false; 2649 2650 parent = (regval >> 16) & 0x1FF; 2651 /* Validate MDQ's TL4 parent */ 2652 if (regbase == NIX_AF_MDQX_PARENT(0) && 2653 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent)) 2654 return false; 2655 2656 /* Validate TL4's TL3 parent */ 2657 if (regbase == NIX_AF_TL4X_PARENT(0) && 2658 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent)) 2659 return false; 2660 2661 /* Validate TL3's TL2 parent */ 2662 if (regbase == NIX_AF_TL3X_PARENT(0) && 2663 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent)) 2664 return false; 2665 2666 /* Validate TL2's TL1 parent */ 2667 if (regbase == NIX_AF_TL2X_PARENT(0) && 2668 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent)) 2669 return false; 2670 2671 return true; 2672 } 2673 2674 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg) 2675 { 2676 u64 regbase; 2677 2678 if (hw->cap.nix_shaping) 2679 return true; 2680 2681 /* If shaping and coloring is not supported, then 2682 * *_CIR and *_PIR registers should not be configured. 2683 */ 2684 regbase = reg & 0xFFFF; 2685 2686 switch (lvl) { 2687 case NIX_TXSCH_LVL_TL1: 2688 if (regbase == NIX_AF_TL1X_CIR(0)) 2689 return false; 2690 break; 2691 case NIX_TXSCH_LVL_TL2: 2692 if (regbase == NIX_AF_TL2X_CIR(0) || 2693 regbase == NIX_AF_TL2X_PIR(0)) 2694 return false; 2695 break; 2696 case NIX_TXSCH_LVL_TL3: 2697 if (regbase == NIX_AF_TL3X_CIR(0) || 2698 regbase == NIX_AF_TL3X_PIR(0)) 2699 return false; 2700 break; 2701 case NIX_TXSCH_LVL_TL4: 2702 if (regbase == NIX_AF_TL4X_CIR(0) || 2703 regbase == NIX_AF_TL4X_PIR(0)) 2704 return false; 2705 break; 2706 case NIX_TXSCH_LVL_MDQ: 2707 if (regbase == NIX_AF_MDQX_CIR(0) || 2708 regbase == NIX_AF_MDQX_PIR(0)) 2709 return false; 2710 break; 2711 } 2712 return true; 2713 } 2714 2715 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, 2716 u16 pcifunc, int blkaddr) 2717 { 2718 u32 *pfvf_map; 2719 int schq; 2720 2721 schq = nix_get_tx_link(rvu, pcifunc); 2722 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map; 2723 /* Skip if PF has already done the config */ 2724 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE) 2725 return; 2726 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq), 2727 (TXSCH_TL1_DFLT_RR_PRIO << 1)); 2728 2729 /* On OcteonTx2 the config was in bytes and newer silcons 2730 * it's changed to weight. 2731 */ 2732 if (!rvu->hw->cap.nix_common_dwrr_mtu) 2733 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), 2734 TXSCH_TL1_DFLT_RR_QTM); 2735 else 2736 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), 2737 CN10K_MAX_DWRR_WEIGHT); 2738 2739 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00); 2740 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); 2741 } 2742 2743 /* Register offset - [15:0] 2744 * Scheduler Queue number - [25:16] 2745 */ 2746 #define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0) 2747 2748 static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw, 2749 int blkaddr, struct nix_txschq_config *req, 2750 struct nix_txschq_config *rsp) 2751 { 2752 u16 pcifunc = req->hdr.pcifunc; 2753 int idx, schq; 2754 u64 reg; 2755 2756 for (idx = 0; idx < req->num_regs; idx++) { 2757 reg = req->reg[idx]; 2758 reg &= NIX_TX_SCHQ_MASK; 2759 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2760 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) || 2761 !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq)) 2762 return NIX_AF_INVAL_TXSCHQ_CFG; 2763 rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg); 2764 } 2765 rsp->lvl = req->lvl; 2766 rsp->num_regs = req->num_regs; 2767 return 0; 2768 } 2769 2770 void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc, 2771 struct nix_txsch *txsch, bool enable) 2772 { 2773 struct rvu_hwinfo *hw = rvu->hw; 2774 int lbk_link_start, lbk_links; 2775 u8 pf = rvu_get_pf(pcifunc); 2776 int schq; 2777 u64 cfg; 2778 2779 if (!is_pf_cgxmapped(rvu, pf) && !is_rep_dev(rvu, pcifunc)) 2780 return; 2781 2782 cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0; 2783 lbk_link_start = hw->cgx_links; 2784 2785 for (schq = 0; schq < txsch->schq.max; schq++) { 2786 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2787 continue; 2788 /* Enable all LBK links with channel 63 by default so that 2789 * packets can be sent to LBK with a NPC TX MCAM rule 2790 */ 2791 lbk_links = hw->lbk_links; 2792 while (lbk_links--) 2793 rvu_write64(rvu, blkaddr, 2794 NIX_AF_TL3_TL2X_LINKX_CFG(schq, 2795 lbk_link_start + 2796 lbk_links), cfg); 2797 } 2798 } 2799 2800 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, 2801 struct nix_txschq_config *req, 2802 struct nix_txschq_config *rsp) 2803 { 2804 u64 reg, val, regval, schq_regbase, val_mask; 2805 struct rvu_hwinfo *hw = rvu->hw; 2806 u16 pcifunc = req->hdr.pcifunc; 2807 struct nix_txsch *txsch; 2808 struct nix_hw *nix_hw; 2809 int blkaddr, idx, err; 2810 int nixlf, schq; 2811 u32 *pfvf_map; 2812 2813 if (req->lvl >= NIX_TXSCH_LVL_CNT || 2814 req->num_regs > MAX_REGS_PER_MBOX_MSG) 2815 return NIX_AF_INVAL_TXSCHQ_CFG; 2816 2817 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2818 if (err) 2819 return err; 2820 2821 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2822 if (!nix_hw) 2823 return NIX_AF_ERR_INVALID_NIXBLK; 2824 2825 if (req->read) 2826 return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp); 2827 2828 txsch = &nix_hw->txsch[req->lvl]; 2829 pfvf_map = txsch->pfvf_map; 2830 2831 if (req->lvl >= hw->cap.nix_tx_aggr_lvl && 2832 pcifunc & RVU_PFVF_FUNC_MASK) { 2833 mutex_lock(&rvu->rsrc_lock); 2834 if (req->lvl == NIX_TXSCH_LVL_TL1) 2835 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr); 2836 mutex_unlock(&rvu->rsrc_lock); 2837 return 0; 2838 } 2839 2840 for (idx = 0; idx < req->num_regs; idx++) { 2841 reg = req->reg[idx]; 2842 reg &= NIX_TX_SCHQ_MASK; 2843 regval = req->regval[idx]; 2844 schq_regbase = reg & 0xFFFF; 2845 val_mask = req->regval_mask[idx]; 2846 2847 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr, 2848 txsch->lvl, reg, regval)) 2849 return NIX_AF_INVAL_TXSCHQ_CFG; 2850 2851 /* Check if shaping and coloring is supported */ 2852 if (!is_txschq_shaping_valid(hw, req->lvl, reg)) 2853 continue; 2854 2855 val = rvu_read64(rvu, blkaddr, reg); 2856 regval = (val & val_mask) | (regval & ~val_mask); 2857 2858 /* Handle shaping state toggle specially */ 2859 if (hw->cap.nix_shaper_toggle_wait && 2860 handle_txschq_shaper_update(rvu, blkaddr, nixlf, 2861 req->lvl, reg, regval)) 2862 continue; 2863 2864 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */ 2865 if (schq_regbase == NIX_AF_SMQX_CFG(0)) { 2866 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], 2867 pcifunc, 0); 2868 regval &= ~(0x7FULL << 24); 2869 regval |= ((u64)nixlf << 24); 2870 } 2871 2872 /* Clear 'BP_ENA' config, if it's not allowed */ 2873 if (!hw->cap.nix_tx_link_bp) { 2874 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) || 2875 (schq_regbase & 0xFF00) == 2876 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0)) 2877 regval &= ~BIT_ULL(13); 2878 } 2879 2880 /* Mark config as done for TL1 by PF */ 2881 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) && 2882 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) { 2883 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2884 mutex_lock(&rvu->rsrc_lock); 2885 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], 2886 NIX_TXSCHQ_CFG_DONE); 2887 mutex_unlock(&rvu->rsrc_lock); 2888 } 2889 2890 /* SMQ flush is special hence split register writes such 2891 * that flush first and write rest of the bits later. 2892 */ 2893 if (schq_regbase == NIX_AF_SMQX_CFG(0) && 2894 (regval & BIT_ULL(49))) { 2895 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2896 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 2897 regval &= ~BIT_ULL(49); 2898 } 2899 rvu_write64(rvu, blkaddr, reg, regval); 2900 } 2901 2902 return 0; 2903 } 2904 2905 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, 2906 struct nix_vtag_config *req) 2907 { 2908 u64 regval = req->vtag_size; 2909 2910 if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 || 2911 req->vtag_size > VTAGSIZE_T8) 2912 return -EINVAL; 2913 2914 /* RX VTAG Type 7 reserved for vf vlan */ 2915 if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7) 2916 return NIX_AF_ERR_RX_VTAG_INUSE; 2917 2918 if (req->rx.capture_vtag) 2919 regval |= BIT_ULL(5); 2920 if (req->rx.strip_vtag) 2921 regval |= BIT_ULL(4); 2922 2923 rvu_write64(rvu, blkaddr, 2924 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval); 2925 return 0; 2926 } 2927 2928 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr, 2929 u16 pcifunc, int index) 2930 { 2931 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2932 struct nix_txvlan *vlan; 2933 2934 if (!nix_hw) 2935 return NIX_AF_ERR_INVALID_NIXBLK; 2936 2937 vlan = &nix_hw->txvlan; 2938 if (vlan->entry2pfvf_map[index] != pcifunc) 2939 return NIX_AF_ERR_PARAM; 2940 2941 rvu_write64(rvu, blkaddr, 2942 NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull); 2943 rvu_write64(rvu, blkaddr, 2944 NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull); 2945 2946 vlan->entry2pfvf_map[index] = 0; 2947 rvu_free_rsrc(&vlan->rsrc, index); 2948 2949 return 0; 2950 } 2951 2952 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc) 2953 { 2954 struct nix_txvlan *vlan; 2955 struct nix_hw *nix_hw; 2956 int index, blkaddr; 2957 2958 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2959 if (blkaddr < 0) 2960 return; 2961 2962 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2963 if (!nix_hw) 2964 return; 2965 2966 vlan = &nix_hw->txvlan; 2967 2968 mutex_lock(&vlan->rsrc_lock); 2969 /* Scan all the entries and free the ones mapped to 'pcifunc' */ 2970 for (index = 0; index < vlan->rsrc.max; index++) { 2971 if (vlan->entry2pfvf_map[index] == pcifunc) 2972 nix_tx_vtag_free(rvu, blkaddr, pcifunc, index); 2973 } 2974 mutex_unlock(&vlan->rsrc_lock); 2975 } 2976 2977 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr, 2978 u64 vtag, u8 size) 2979 { 2980 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2981 struct nix_txvlan *vlan; 2982 u64 regval; 2983 int index; 2984 2985 if (!nix_hw) 2986 return NIX_AF_ERR_INVALID_NIXBLK; 2987 2988 vlan = &nix_hw->txvlan; 2989 2990 mutex_lock(&vlan->rsrc_lock); 2991 2992 index = rvu_alloc_rsrc(&vlan->rsrc); 2993 if (index < 0) { 2994 mutex_unlock(&vlan->rsrc_lock); 2995 return index; 2996 } 2997 2998 mutex_unlock(&vlan->rsrc_lock); 2999 3000 regval = size ? vtag : vtag << 32; 3001 3002 rvu_write64(rvu, blkaddr, 3003 NIX_AF_TX_VTAG_DEFX_DATA(index), regval); 3004 rvu_write64(rvu, blkaddr, 3005 NIX_AF_TX_VTAG_DEFX_CTL(index), size); 3006 3007 return index; 3008 } 3009 3010 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr, 3011 struct nix_vtag_config *req) 3012 { 3013 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 3014 u16 pcifunc = req->hdr.pcifunc; 3015 int idx0 = req->tx.vtag0_idx; 3016 int idx1 = req->tx.vtag1_idx; 3017 struct nix_txvlan *vlan; 3018 int err = 0; 3019 3020 if (!nix_hw) 3021 return NIX_AF_ERR_INVALID_NIXBLK; 3022 3023 vlan = &nix_hw->txvlan; 3024 if (req->tx.free_vtag0 && req->tx.free_vtag1) 3025 if (vlan->entry2pfvf_map[idx0] != pcifunc || 3026 vlan->entry2pfvf_map[idx1] != pcifunc) 3027 return NIX_AF_ERR_PARAM; 3028 3029 mutex_lock(&vlan->rsrc_lock); 3030 3031 if (req->tx.free_vtag0) { 3032 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0); 3033 if (err) 3034 goto exit; 3035 } 3036 3037 if (req->tx.free_vtag1) 3038 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1); 3039 3040 exit: 3041 mutex_unlock(&vlan->rsrc_lock); 3042 return err; 3043 } 3044 3045 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr, 3046 struct nix_vtag_config *req, 3047 struct nix_vtag_config_rsp *rsp) 3048 { 3049 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 3050 struct nix_txvlan *vlan; 3051 u16 pcifunc = req->hdr.pcifunc; 3052 3053 if (!nix_hw) 3054 return NIX_AF_ERR_INVALID_NIXBLK; 3055 3056 vlan = &nix_hw->txvlan; 3057 if (req->tx.cfg_vtag0) { 3058 rsp->vtag0_idx = 3059 nix_tx_vtag_alloc(rvu, blkaddr, 3060 req->tx.vtag0, req->vtag_size); 3061 3062 if (rsp->vtag0_idx < 0) 3063 return NIX_AF_ERR_TX_VTAG_NOSPC; 3064 3065 vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc; 3066 } 3067 3068 if (req->tx.cfg_vtag1) { 3069 rsp->vtag1_idx = 3070 nix_tx_vtag_alloc(rvu, blkaddr, 3071 req->tx.vtag1, req->vtag_size); 3072 3073 if (rsp->vtag1_idx < 0) 3074 goto err_free; 3075 3076 vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc; 3077 } 3078 3079 return 0; 3080 3081 err_free: 3082 if (req->tx.cfg_vtag0) 3083 nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx); 3084 3085 return NIX_AF_ERR_TX_VTAG_NOSPC; 3086 } 3087 3088 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, 3089 struct nix_vtag_config *req, 3090 struct nix_vtag_config_rsp *rsp) 3091 { 3092 u16 pcifunc = req->hdr.pcifunc; 3093 int blkaddr, nixlf, err; 3094 3095 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3096 if (err) 3097 return err; 3098 3099 if (req->cfg_type) { 3100 /* rx vtag configuration */ 3101 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req); 3102 if (err) 3103 return NIX_AF_ERR_PARAM; 3104 } else { 3105 /* tx vtag configuration */ 3106 if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) && 3107 (req->tx.free_vtag0 || req->tx.free_vtag1)) 3108 return NIX_AF_ERR_PARAM; 3109 3110 if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1) 3111 return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp); 3112 3113 if (req->tx.free_vtag0 || req->tx.free_vtag1) 3114 return nix_tx_vtag_decfg(rvu, blkaddr, req); 3115 } 3116 3117 return 0; 3118 } 3119 3120 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw, 3121 int mce, u8 op, u16 pcifunc, int next, 3122 int index, u8 mce_op, bool eol) 3123 { 3124 struct nix_aq_enq_req aq_req; 3125 int err; 3126 3127 aq_req.hdr.pcifunc = 0; 3128 aq_req.ctype = NIX_AQ_CTYPE_MCE; 3129 aq_req.op = op; 3130 aq_req.qidx = mce; 3131 3132 /* Use RSS with RSS index 0 */ 3133 aq_req.mce.op = mce_op; 3134 aq_req.mce.index = index; 3135 aq_req.mce.eol = eol; 3136 aq_req.mce.pf_func = pcifunc; 3137 aq_req.mce.next = next; 3138 3139 /* All fields valid */ 3140 *(u64 *)(&aq_req.mce_mask) = ~0ULL; 3141 3142 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL); 3143 if (err) { 3144 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n", 3145 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 3146 return err; 3147 } 3148 return 0; 3149 } 3150 3151 static void nix_delete_mcast_mce_list(struct nix_mce_list *mce_list) 3152 { 3153 struct hlist_node *tmp; 3154 struct mce *mce; 3155 3156 /* Scan through the current list */ 3157 hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) { 3158 hlist_del(&mce->node); 3159 kfree(mce); 3160 } 3161 3162 mce_list->count = 0; 3163 mce_list->max = 0; 3164 } 3165 3166 static int nix_get_last_mce_list_index(struct nix_mcast_grp_elem *elem) 3167 { 3168 return elem->mce_start_index + elem->mcast_mce_list.count - 1; 3169 } 3170 3171 static int nix_update_ingress_mce_list_hw(struct rvu *rvu, 3172 struct nix_hw *nix_hw, 3173 struct nix_mcast_grp_elem *elem) 3174 { 3175 int idx, last_idx, next_idx, err; 3176 struct nix_mce_list *mce_list; 3177 struct mce *mce, *prev_mce; 3178 3179 mce_list = &elem->mcast_mce_list; 3180 idx = elem->mce_start_index; 3181 last_idx = nix_get_last_mce_list_index(elem); 3182 hlist_for_each_entry(mce, &mce_list->head, node) { 3183 if (idx > last_idx) 3184 break; 3185 3186 if (!mce->is_active) { 3187 if (idx == elem->mce_start_index) { 3188 idx++; 3189 prev_mce = mce; 3190 elem->mce_start_index = idx; 3191 continue; 3192 } else if (idx == last_idx) { 3193 err = nix_blk_setup_mce(rvu, nix_hw, idx - 1, NIX_AQ_INSTOP_WRITE, 3194 prev_mce->pcifunc, next_idx, 3195 prev_mce->rq_rss_index, 3196 prev_mce->dest_type, 3197 false); 3198 if (err) 3199 return err; 3200 3201 break; 3202 } 3203 } 3204 3205 next_idx = idx + 1; 3206 /* EOL should be set in last MCE */ 3207 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE, 3208 mce->pcifunc, next_idx, 3209 mce->rq_rss_index, mce->dest_type, 3210 (next_idx > last_idx) ? true : false); 3211 if (err) 3212 return err; 3213 3214 idx++; 3215 prev_mce = mce; 3216 } 3217 3218 return 0; 3219 } 3220 3221 static void nix_update_egress_mce_list_hw(struct rvu *rvu, 3222 struct nix_hw *nix_hw, 3223 struct nix_mcast_grp_elem *elem) 3224 { 3225 struct nix_mce_list *mce_list; 3226 int idx, last_idx, next_idx; 3227 struct mce *mce, *prev_mce; 3228 u64 regval; 3229 u8 eol; 3230 3231 mce_list = &elem->mcast_mce_list; 3232 idx = elem->mce_start_index; 3233 last_idx = nix_get_last_mce_list_index(elem); 3234 hlist_for_each_entry(mce, &mce_list->head, node) { 3235 if (idx > last_idx) 3236 break; 3237 3238 if (!mce->is_active) { 3239 if (idx == elem->mce_start_index) { 3240 idx++; 3241 prev_mce = mce; 3242 elem->mce_start_index = idx; 3243 continue; 3244 } else if (idx == last_idx) { 3245 regval = (next_idx << 16) | (1 << 12) | prev_mce->channel; 3246 rvu_write64(rvu, nix_hw->blkaddr, 3247 NIX_AF_TX_MCASTX(idx - 1), 3248 regval); 3249 break; 3250 } 3251 } 3252 3253 eol = 0; 3254 next_idx = idx + 1; 3255 /* EOL should be set in last MCE */ 3256 if (next_idx > last_idx) 3257 eol = 1; 3258 3259 regval = (next_idx << 16) | (eol << 12) | mce->channel; 3260 rvu_write64(rvu, nix_hw->blkaddr, 3261 NIX_AF_TX_MCASTX(idx), 3262 regval); 3263 idx++; 3264 prev_mce = mce; 3265 } 3266 } 3267 3268 static int nix_del_mce_list_entry(struct rvu *rvu, 3269 struct nix_hw *nix_hw, 3270 struct nix_mcast_grp_elem *elem, 3271 struct nix_mcast_grp_update_req *req) 3272 { 3273 u32 num_entry = req->num_mce_entry; 3274 struct nix_mce_list *mce_list; 3275 struct mce *mce; 3276 bool is_found; 3277 int i; 3278 3279 mce_list = &elem->mcast_mce_list; 3280 for (i = 0; i < num_entry; i++) { 3281 is_found = false; 3282 hlist_for_each_entry(mce, &mce_list->head, node) { 3283 /* If already exists, then delete */ 3284 if (mce->pcifunc == req->pcifunc[i]) { 3285 hlist_del(&mce->node); 3286 kfree(mce); 3287 mce_list->count--; 3288 is_found = true; 3289 break; 3290 } 3291 } 3292 3293 if (!is_found) 3294 return NIX_AF_ERR_INVALID_MCAST_DEL_REQ; 3295 } 3296 3297 mce_list->max = mce_list->count; 3298 /* Dump the updated list to HW */ 3299 if (elem->dir == NIX_MCAST_INGRESS) 3300 return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem); 3301 3302 nix_update_egress_mce_list_hw(rvu, nix_hw, elem); 3303 return 0; 3304 } 3305 3306 static int nix_add_mce_list_entry(struct rvu *rvu, 3307 struct nix_hw *nix_hw, 3308 struct nix_mcast_grp_elem *elem, 3309 struct nix_mcast_grp_update_req *req) 3310 { 3311 u32 num_entry = req->num_mce_entry; 3312 struct nix_mce_list *mce_list; 3313 struct hlist_node *tmp; 3314 struct mce *mce; 3315 int i; 3316 3317 mce_list = &elem->mcast_mce_list; 3318 for (i = 0; i < num_entry; i++) { 3319 mce = kzalloc(sizeof(*mce), GFP_KERNEL); 3320 if (!mce) 3321 goto free_mce; 3322 3323 mce->pcifunc = req->pcifunc[i]; 3324 mce->channel = req->channel[i]; 3325 mce->rq_rss_index = req->rq_rss_index[i]; 3326 mce->dest_type = req->dest_type[i]; 3327 mce->is_active = 1; 3328 hlist_add_head(&mce->node, &mce_list->head); 3329 mce_list->count++; 3330 } 3331 3332 mce_list->max += num_entry; 3333 3334 /* Dump the updated list to HW */ 3335 if (elem->dir == NIX_MCAST_INGRESS) 3336 return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem); 3337 3338 nix_update_egress_mce_list_hw(rvu, nix_hw, elem); 3339 return 0; 3340 3341 free_mce: 3342 hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) { 3343 hlist_del(&mce->node); 3344 kfree(mce); 3345 mce_list->count--; 3346 } 3347 3348 return -ENOMEM; 3349 } 3350 3351 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list, 3352 u16 pcifunc, bool add) 3353 { 3354 struct mce *mce, *tail = NULL; 3355 bool delete = false; 3356 3357 /* Scan through the current list */ 3358 hlist_for_each_entry(mce, &mce_list->head, node) { 3359 /* If already exists, then delete */ 3360 if (mce->pcifunc == pcifunc && !add) { 3361 delete = true; 3362 break; 3363 } else if (mce->pcifunc == pcifunc && add) { 3364 /* entry already exists */ 3365 return 0; 3366 } 3367 tail = mce; 3368 } 3369 3370 if (delete) { 3371 hlist_del(&mce->node); 3372 kfree(mce); 3373 mce_list->count--; 3374 return 0; 3375 } 3376 3377 if (!add) 3378 return 0; 3379 3380 /* Add a new one to the list, at the tail */ 3381 mce = kzalloc(sizeof(*mce), GFP_KERNEL); 3382 if (!mce) 3383 return -ENOMEM; 3384 mce->pcifunc = pcifunc; 3385 if (!tail) 3386 hlist_add_head(&mce->node, &mce_list->head); 3387 else 3388 hlist_add_behind(&mce->node, &tail->node); 3389 mce_list->count++; 3390 return 0; 3391 } 3392 3393 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc, 3394 struct nix_mce_list *mce_list, 3395 int mce_idx, int mcam_index, bool add) 3396 { 3397 int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr; 3398 struct npc_mcam *mcam = &rvu->hw->mcam; 3399 struct nix_mcast *mcast; 3400 struct nix_hw *nix_hw; 3401 struct mce *mce; 3402 3403 if (!mce_list) 3404 return -EINVAL; 3405 3406 /* Get this PF/VF func's MCE index */ 3407 idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK); 3408 3409 if (idx > (mce_idx + mce_list->max)) { 3410 dev_err(rvu->dev, 3411 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n", 3412 __func__, idx, mce_list->max, 3413 pcifunc >> RVU_PFVF_PF_SHIFT); 3414 return -EINVAL; 3415 } 3416 3417 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 3418 if (err) 3419 return err; 3420 3421 mcast = &nix_hw->mcast; 3422 mutex_lock(&mcast->mce_lock); 3423 3424 err = nix_update_mce_list_entry(mce_list, pcifunc, add); 3425 if (err) 3426 goto end; 3427 3428 /* Disable MCAM entry in NPC */ 3429 if (!mce_list->count) { 3430 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3431 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false); 3432 goto end; 3433 } 3434 3435 /* Dump the updated list to HW */ 3436 idx = mce_idx; 3437 last_idx = idx + mce_list->count - 1; 3438 hlist_for_each_entry(mce, &mce_list->head, node) { 3439 if (idx > last_idx) 3440 break; 3441 3442 next_idx = idx + 1; 3443 /* EOL should be set in last MCE */ 3444 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE, 3445 mce->pcifunc, next_idx, 3446 0, 1, 3447 (next_idx > last_idx) ? true : false); 3448 if (err) 3449 goto end; 3450 idx++; 3451 } 3452 3453 end: 3454 mutex_unlock(&mcast->mce_lock); 3455 return err; 3456 } 3457 3458 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type, 3459 struct nix_mce_list **mce_list, int *mce_idx) 3460 { 3461 struct rvu_hwinfo *hw = rvu->hw; 3462 struct rvu_pfvf *pfvf; 3463 3464 if (!hw->cap.nix_rx_multicast || 3465 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) { 3466 *mce_list = NULL; 3467 *mce_idx = 0; 3468 return; 3469 } 3470 3471 /* Get this PF/VF func's MCE index */ 3472 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); 3473 3474 if (type == NIXLF_BCAST_ENTRY) { 3475 *mce_list = &pfvf->bcast_mce_list; 3476 *mce_idx = pfvf->bcast_mce_idx; 3477 } else if (type == NIXLF_ALLMULTI_ENTRY) { 3478 *mce_list = &pfvf->mcast_mce_list; 3479 *mce_idx = pfvf->mcast_mce_idx; 3480 } else if (type == NIXLF_PROMISC_ENTRY) { 3481 *mce_list = &pfvf->promisc_mce_list; 3482 *mce_idx = pfvf->promisc_mce_idx; 3483 } else { 3484 *mce_list = NULL; 3485 *mce_idx = 0; 3486 } 3487 } 3488 3489 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, 3490 int type, bool add) 3491 { 3492 int err = 0, nixlf, blkaddr, mcam_index, mce_idx; 3493 struct npc_mcam *mcam = &rvu->hw->mcam; 3494 struct rvu_hwinfo *hw = rvu->hw; 3495 struct nix_mce_list *mce_list; 3496 int pf; 3497 3498 /* skip multicast pkt replication for AF's VFs & SDP links */ 3499 if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(pcifunc)) 3500 return 0; 3501 3502 if (!hw->cap.nix_rx_multicast) 3503 return 0; 3504 3505 pf = rvu_get_pf(pcifunc); 3506 if (!is_pf_cgxmapped(rvu, pf)) 3507 return 0; 3508 3509 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3510 if (blkaddr < 0) 3511 return -EINVAL; 3512 3513 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 3514 if (nixlf < 0) 3515 return -EINVAL; 3516 3517 nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx); 3518 3519 mcam_index = npc_get_nixlf_mcam_index(mcam, 3520 pcifunc & ~RVU_PFVF_FUNC_MASK, 3521 nixlf, type); 3522 err = nix_update_mce_list(rvu, pcifunc, mce_list, 3523 mce_idx, mcam_index, add); 3524 return err; 3525 } 3526 3527 static void nix_setup_mcast_grp(struct nix_hw *nix_hw) 3528 { 3529 struct nix_mcast_grp *mcast_grp = &nix_hw->mcast_grp; 3530 3531 INIT_LIST_HEAD(&mcast_grp->mcast_grp_head); 3532 mutex_init(&mcast_grp->mcast_grp_lock); 3533 mcast_grp->next_grp_index = 1; 3534 mcast_grp->count = 0; 3535 } 3536 3537 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw) 3538 { 3539 struct nix_mcast *mcast = &nix_hw->mcast; 3540 int err, pf, numvfs, idx; 3541 struct rvu_pfvf *pfvf; 3542 u16 pcifunc; 3543 u64 cfg; 3544 3545 /* Skip PF0 (i.e AF) */ 3546 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) { 3547 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 3548 /* If PF is not enabled, nothing to do */ 3549 if (!((cfg >> 20) & 0x01)) 3550 continue; 3551 /* Get numVFs attached to this PF */ 3552 numvfs = (cfg >> 12) & 0xFF; 3553 3554 pfvf = &rvu->pf[pf]; 3555 3556 /* This NIX0/1 block mapped to PF ? */ 3557 if (pfvf->nix_blkaddr != nix_hw->blkaddr) 3558 continue; 3559 3560 /* save start idx of broadcast mce list */ 3561 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS); 3562 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1); 3563 3564 /* save start idx of multicast mce list */ 3565 pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS); 3566 nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1); 3567 3568 /* save the start idx of promisc mce list */ 3569 pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS); 3570 nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1); 3571 3572 for (idx = 0; idx < (numvfs + 1); idx++) { 3573 /* idx-0 is for PF, followed by VFs */ 3574 pcifunc = (pf << RVU_PFVF_PF_SHIFT); 3575 pcifunc |= idx; 3576 /* Add dummy entries now, so that we don't have to check 3577 * for whether AQ_OP should be INIT/WRITE later on. 3578 * Will be updated when a NIXLF is attached/detached to 3579 * these PF/VFs. 3580 */ 3581 err = nix_blk_setup_mce(rvu, nix_hw, 3582 pfvf->bcast_mce_idx + idx, 3583 NIX_AQ_INSTOP_INIT, 3584 pcifunc, 0, 0, 1, true); 3585 if (err) 3586 return err; 3587 3588 /* add dummy entries to multicast mce list */ 3589 err = nix_blk_setup_mce(rvu, nix_hw, 3590 pfvf->mcast_mce_idx + idx, 3591 NIX_AQ_INSTOP_INIT, 3592 pcifunc, 0, 0, 1, true); 3593 if (err) 3594 return err; 3595 3596 /* add dummy entries to promisc mce list */ 3597 err = nix_blk_setup_mce(rvu, nix_hw, 3598 pfvf->promisc_mce_idx + idx, 3599 NIX_AQ_INSTOP_INIT, 3600 pcifunc, 0, 0, 1, true); 3601 if (err) 3602 return err; 3603 } 3604 } 3605 return 0; 3606 } 3607 3608 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 3609 { 3610 struct nix_mcast *mcast = &nix_hw->mcast; 3611 struct rvu_hwinfo *hw = rvu->hw; 3612 int err, size; 3613 3614 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F; 3615 size = BIT_ULL(size); 3616 3617 /* Allocate bitmap for rx mce entries */ 3618 mcast->mce_counter[NIX_MCAST_INGRESS].max = 256UL << MC_TBL_SIZE; 3619 err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]); 3620 if (err) 3621 return -ENOMEM; 3622 3623 /* Allocate bitmap for tx mce entries */ 3624 mcast->mce_counter[NIX_MCAST_EGRESS].max = MC_TX_MAX; 3625 err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]); 3626 if (err) { 3627 rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]); 3628 return -ENOMEM; 3629 } 3630 3631 /* Alloc memory for multicast/mirror replication entries */ 3632 err = qmem_alloc(rvu->dev, &mcast->mce_ctx, 3633 mcast->mce_counter[NIX_MCAST_INGRESS].max, size); 3634 if (err) { 3635 rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]); 3636 rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]); 3637 return -ENOMEM; 3638 } 3639 3640 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE, 3641 (u64)mcast->mce_ctx->iova); 3642 3643 /* Set max list length equal to max no of VFs per PF + PF itself */ 3644 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG, 3645 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE); 3646 3647 /* Alloc memory for multicast replication buffers */ 3648 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF; 3649 err = qmem_alloc(rvu->dev, &mcast->mcast_buf, 3650 (8UL << MC_BUF_CNT), size); 3651 if (err) { 3652 rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]); 3653 rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]); 3654 return -ENOMEM; 3655 } 3656 3657 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE, 3658 (u64)mcast->mcast_buf->iova); 3659 3660 /* Alloc pkind for NIX internal RX multicast/mirror replay */ 3661 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc); 3662 3663 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG, 3664 BIT_ULL(63) | (mcast->replay_pkind << 24) | 3665 BIT_ULL(20) | MC_BUF_CNT); 3666 3667 mutex_init(&mcast->mce_lock); 3668 3669 nix_setup_mcast_grp(nix_hw); 3670 3671 return nix_setup_mce_tables(rvu, nix_hw); 3672 } 3673 3674 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw) 3675 { 3676 struct nix_txvlan *vlan = &nix_hw->txvlan; 3677 int err; 3678 3679 /* Allocate resource bimap for tx vtag def registers*/ 3680 vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX; 3681 err = rvu_alloc_bitmap(&vlan->rsrc); 3682 if (err) 3683 return -ENOMEM; 3684 3685 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ 3686 vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max, 3687 sizeof(u16), GFP_KERNEL); 3688 if (!vlan->entry2pfvf_map) 3689 goto free_mem; 3690 3691 mutex_init(&vlan->rsrc_lock); 3692 return 0; 3693 3694 free_mem: 3695 kfree(vlan->rsrc.bmap); 3696 return -ENOMEM; 3697 } 3698 3699 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 3700 { 3701 struct nix_txsch *txsch; 3702 int err, lvl, schq; 3703 u64 cfg, reg; 3704 3705 /* Get scheduler queue count of each type and alloc 3706 * bitmap for each for alloc/free/attach operations. 3707 */ 3708 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 3709 txsch = &nix_hw->txsch[lvl]; 3710 txsch->lvl = lvl; 3711 switch (lvl) { 3712 case NIX_TXSCH_LVL_SMQ: 3713 reg = NIX_AF_MDQ_CONST; 3714 break; 3715 case NIX_TXSCH_LVL_TL4: 3716 reg = NIX_AF_TL4_CONST; 3717 break; 3718 case NIX_TXSCH_LVL_TL3: 3719 reg = NIX_AF_TL3_CONST; 3720 break; 3721 case NIX_TXSCH_LVL_TL2: 3722 reg = NIX_AF_TL2_CONST; 3723 break; 3724 case NIX_TXSCH_LVL_TL1: 3725 reg = NIX_AF_TL1_CONST; 3726 break; 3727 } 3728 cfg = rvu_read64(rvu, blkaddr, reg); 3729 txsch->schq.max = cfg & 0xFFFF; 3730 err = rvu_alloc_bitmap(&txsch->schq); 3731 if (err) 3732 return err; 3733 3734 /* Allocate memory for scheduler queues to 3735 * PF/VF pcifunc mapping info. 3736 */ 3737 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max, 3738 sizeof(u32), GFP_KERNEL); 3739 if (!txsch->pfvf_map) 3740 return -ENOMEM; 3741 for (schq = 0; schq < txsch->schq.max; schq++) 3742 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 3743 } 3744 3745 /* Setup a default value of 8192 as DWRR MTU */ 3746 if (rvu->hw->cap.nix_common_dwrr_mtu || 3747 rvu->hw->cap.nix_multiple_dwrr_mtu) { 3748 rvu_write64(rvu, blkaddr, 3749 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM), 3750 convert_bytes_to_dwrr_mtu(8192)); 3751 rvu_write64(rvu, blkaddr, 3752 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK), 3753 convert_bytes_to_dwrr_mtu(8192)); 3754 rvu_write64(rvu, blkaddr, 3755 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP), 3756 convert_bytes_to_dwrr_mtu(8192)); 3757 } 3758 3759 return 0; 3760 } 3761 3762 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw, 3763 int blkaddr, u32 cfg) 3764 { 3765 int fmt_idx; 3766 3767 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) { 3768 if (nix_hw->mark_format.cfg[fmt_idx] == cfg) 3769 return fmt_idx; 3770 } 3771 if (fmt_idx >= nix_hw->mark_format.total) 3772 return -ERANGE; 3773 3774 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg); 3775 nix_hw->mark_format.cfg[fmt_idx] = cfg; 3776 nix_hw->mark_format.in_use++; 3777 return fmt_idx; 3778 } 3779 3780 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw, 3781 int blkaddr) 3782 { 3783 u64 cfgs[] = { 3784 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003, 3785 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200, 3786 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203, 3787 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c, 3788 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00, 3789 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c, 3790 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008, 3791 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800, 3792 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808, 3793 }; 3794 int i, rc; 3795 u64 total; 3796 3797 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8; 3798 nix_hw->mark_format.total = (u8)total; 3799 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32), 3800 GFP_KERNEL); 3801 if (!nix_hw->mark_format.cfg) 3802 return -ENOMEM; 3803 for (i = 0; i < NIX_MARK_CFG_MAX; i++) { 3804 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]); 3805 if (rc < 0) 3806 dev_err(rvu->dev, "Err %d in setup mark format %d\n", 3807 i, rc); 3808 } 3809 3810 return 0; 3811 } 3812 3813 static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu) 3814 { 3815 /* CN10K supports LBK FIFO size 72 KB */ 3816 if (rvu->hw->lbk_bufsize == 0x12000) 3817 *max_mtu = CN10K_LBK_LINK_MAX_FRS; 3818 else 3819 *max_mtu = NIC_HW_MAX_FRS; 3820 } 3821 3822 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu) 3823 { 3824 int fifo_size = rvu_cgx_get_fifolen(rvu); 3825 3826 /* RPM supports FIFO len 128 KB and RPM2 supports double the 3827 * FIFO len to accommodate 8 LMACS 3828 */ 3829 if (fifo_size == 0x20000 || fifo_size == 0x40000) 3830 *max_mtu = CN10K_LMAC_LINK_MAX_FRS; 3831 else 3832 *max_mtu = NIC_HW_MAX_FRS; 3833 } 3834 3835 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req, 3836 struct nix_hw_info *rsp) 3837 { 3838 u16 pcifunc = req->hdr.pcifunc; 3839 u64 dwrr_mtu; 3840 int blkaddr; 3841 3842 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3843 if (blkaddr < 0) 3844 return NIX_AF_ERR_AF_LF_INVALID; 3845 3846 if (is_lbk_vf(rvu, pcifunc)) 3847 rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu); 3848 else 3849 rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu); 3850 3851 rsp->min_mtu = NIC_HW_MIN_FRS; 3852 3853 if (!rvu->hw->cap.nix_common_dwrr_mtu && 3854 !rvu->hw->cap.nix_multiple_dwrr_mtu) { 3855 /* Return '1' on OTx2 */ 3856 rsp->rpm_dwrr_mtu = 1; 3857 rsp->sdp_dwrr_mtu = 1; 3858 rsp->lbk_dwrr_mtu = 1; 3859 return 0; 3860 } 3861 3862 /* Return DWRR_MTU for TLx_SCHEDULE[RR_WEIGHT] config */ 3863 dwrr_mtu = rvu_read64(rvu, blkaddr, 3864 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM)); 3865 rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); 3866 3867 dwrr_mtu = rvu_read64(rvu, blkaddr, 3868 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP)); 3869 rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); 3870 3871 dwrr_mtu = rvu_read64(rvu, blkaddr, 3872 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK)); 3873 rsp->lbk_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); 3874 3875 return 0; 3876 } 3877 3878 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, 3879 struct msg_rsp *rsp) 3880 { 3881 u16 pcifunc = req->hdr.pcifunc; 3882 int i, nixlf, blkaddr, err; 3883 u64 stats; 3884 3885 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3886 if (err) 3887 return err; 3888 3889 /* Get stats count supported by HW */ 3890 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 3891 3892 /* Reset tx stats */ 3893 for (i = 0; i < ((stats >> 24) & 0xFF); i++) 3894 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0); 3895 3896 /* Reset rx stats */ 3897 for (i = 0; i < ((stats >> 32) & 0xFF); i++) 3898 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0); 3899 3900 return 0; 3901 } 3902 3903 /* Returns the ALG index to be set into NPC_RX_ACTION */ 3904 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg) 3905 { 3906 int i; 3907 3908 /* Scan over exiting algo entries to find a match */ 3909 for (i = 0; i < nix_hw->flowkey.in_use; i++) 3910 if (nix_hw->flowkey.flowkey[i] == flow_cfg) 3911 return i; 3912 3913 return -ERANGE; 3914 } 3915 3916 /* Mask to match ipv6(NPC_LT_LC_IP6) and ipv6 ext(NPC_LT_LC_IP6_EXT) */ 3917 #define NPC_LT_LC_IP6_MATCH_MSK ((~(NPC_LT_LC_IP6 ^ NPC_LT_LC_IP6_EXT)) & 0xf) 3918 /* Mask to match both ipv4(NPC_LT_LC_IP) and ipv4 ext(NPC_LT_LC_IP_OPT) */ 3919 #define NPC_LT_LC_IP_MATCH_MSK ((~(NPC_LT_LC_IP ^ NPC_LT_LC_IP_OPT)) & 0xf) 3920 3921 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) 3922 { 3923 int idx, nr_field, key_off, field_marker, keyoff_marker; 3924 int max_key_off, max_bit_pos, group_member; 3925 struct nix_rx_flowkey_alg *field; 3926 struct nix_rx_flowkey_alg tmp; 3927 u32 key_type, valid_key; 3928 u32 l3_l4_src_dst; 3929 int l4_key_offset = 0; 3930 3931 if (!alg) 3932 return -EINVAL; 3933 3934 #define FIELDS_PER_ALG 5 3935 #define MAX_KEY_OFF 40 3936 /* Clear all fields */ 3937 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG); 3938 3939 /* Each of the 32 possible flow key algorithm definitions should 3940 * fall into above incremental config (except ALG0). Otherwise a 3941 * single NPC MCAM entry is not sufficient for supporting RSS. 3942 * 3943 * If a different definition or combination needed then NPC MCAM 3944 * has to be programmed to filter such pkts and it's action should 3945 * point to this definition to calculate flowtag or hash. 3946 * 3947 * The `for loop` goes over _all_ protocol field and the following 3948 * variables depicts the state machine forward progress logic. 3949 * 3950 * keyoff_marker - Enabled when hash byte length needs to be accounted 3951 * in field->key_offset update. 3952 * field_marker - Enabled when a new field needs to be selected. 3953 * group_member - Enabled when protocol is part of a group. 3954 */ 3955 3956 /* Last 4 bits (31:28) are reserved to specify SRC, DST 3957 * selection for L3, L4 i.e IPV[4,6]_SRC, IPV[4,6]_DST, 3958 * [TCP,UDP,SCTP]_SRC, [TCP,UDP,SCTP]_DST 3959 * 31 => L3_SRC, 30 => L3_DST, 29 => L4_SRC, 28 => L4_DST 3960 */ 3961 l3_l4_src_dst = flow_cfg; 3962 /* Reset these 4 bits, so that these won't be part of key */ 3963 flow_cfg &= NIX_FLOW_KEY_TYPE_L3_L4_MASK; 3964 3965 keyoff_marker = 0; max_key_off = 0; group_member = 0; 3966 nr_field = 0; key_off = 0; field_marker = 1; 3967 field = &tmp; max_bit_pos = fls(flow_cfg); 3968 for (idx = 0; 3969 idx < max_bit_pos && nr_field < FIELDS_PER_ALG && 3970 key_off < MAX_KEY_OFF; idx++) { 3971 key_type = BIT(idx); 3972 valid_key = flow_cfg & key_type; 3973 /* Found a field marker, reset the field values */ 3974 if (field_marker) 3975 memset(&tmp, 0, sizeof(tmp)); 3976 3977 field_marker = true; 3978 keyoff_marker = true; 3979 switch (key_type) { 3980 case NIX_FLOW_KEY_TYPE_PORT: 3981 field->sel_chan = true; 3982 /* This should be set to 1, when SEL_CHAN is set */ 3983 field->bytesm1 = 1; 3984 break; 3985 case NIX_FLOW_KEY_TYPE_IPV4_PROTO: 3986 field->lid = NPC_LID_LC; 3987 field->hdr_offset = 9; /* offset */ 3988 field->bytesm1 = 0; /* 1 byte */ 3989 field->ltype_match = NPC_LT_LC_IP; 3990 field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK; 3991 break; 3992 case NIX_FLOW_KEY_TYPE_IPV4: 3993 case NIX_FLOW_KEY_TYPE_INNR_IPV4: 3994 field->lid = NPC_LID_LC; 3995 field->ltype_match = NPC_LT_LC_IP; 3996 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) { 3997 field->lid = NPC_LID_LG; 3998 field->ltype_match = NPC_LT_LG_TU_IP; 3999 } 4000 field->hdr_offset = 12; /* SIP offset */ 4001 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */ 4002 4003 /* Only SIP */ 4004 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY) 4005 field->bytesm1 = 3; /* SIP, 4 bytes */ 4006 4007 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) { 4008 /* Both SIP + DIP */ 4009 if (field->bytesm1 == 3) { 4010 field->bytesm1 = 7; /* SIP + DIP, 8B */ 4011 } else { 4012 /* Only DIP */ 4013 field->hdr_offset = 16; /* DIP off */ 4014 field->bytesm1 = 3; /* DIP, 4 bytes */ 4015 } 4016 } 4017 field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK; 4018 keyoff_marker = false; 4019 break; 4020 case NIX_FLOW_KEY_TYPE_IPV6: 4021 case NIX_FLOW_KEY_TYPE_INNR_IPV6: 4022 field->lid = NPC_LID_LC; 4023 field->ltype_match = NPC_LT_LC_IP6; 4024 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) { 4025 field->lid = NPC_LID_LG; 4026 field->ltype_match = NPC_LT_LG_TU_IP6; 4027 } 4028 field->hdr_offset = 8; /* SIP offset */ 4029 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */ 4030 4031 /* Only SIP */ 4032 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY) 4033 field->bytesm1 = 15; /* SIP, 16 bytes */ 4034 4035 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) { 4036 /* Both SIP + DIP */ 4037 if (field->bytesm1 == 15) { 4038 /* SIP + DIP, 32 bytes */ 4039 field->bytesm1 = 31; 4040 } else { 4041 /* Only DIP */ 4042 field->hdr_offset = 24; /* DIP off */ 4043 field->bytesm1 = 15; /* DIP,16 bytes */ 4044 } 4045 } 4046 field->ltype_mask = NPC_LT_LC_IP6_MATCH_MSK; 4047 break; 4048 case NIX_FLOW_KEY_TYPE_TCP: 4049 case NIX_FLOW_KEY_TYPE_UDP: 4050 case NIX_FLOW_KEY_TYPE_SCTP: 4051 case NIX_FLOW_KEY_TYPE_INNR_TCP: 4052 case NIX_FLOW_KEY_TYPE_INNR_UDP: 4053 case NIX_FLOW_KEY_TYPE_INNR_SCTP: 4054 field->lid = NPC_LID_LD; 4055 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP || 4056 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP || 4057 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) 4058 field->lid = NPC_LID_LH; 4059 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */ 4060 4061 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_SRC_ONLY) 4062 field->bytesm1 = 1; /* SRC, 2 bytes */ 4063 4064 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_DST_ONLY) { 4065 /* Both SRC + DST */ 4066 if (field->bytesm1 == 1) { 4067 /* SRC + DST, 4 bytes */ 4068 field->bytesm1 = 3; 4069 } else { 4070 /* Only DIP */ 4071 field->hdr_offset = 2; /* DST off */ 4072 field->bytesm1 = 1; /* DST, 2 bytes */ 4073 } 4074 } 4075 4076 /* Enum values for NPC_LID_LD and NPC_LID_LG are same, 4077 * so no need to change the ltype_match, just change 4078 * the lid for inner protocols 4079 */ 4080 BUILD_BUG_ON((int)NPC_LT_LD_TCP != 4081 (int)NPC_LT_LH_TU_TCP); 4082 BUILD_BUG_ON((int)NPC_LT_LD_UDP != 4083 (int)NPC_LT_LH_TU_UDP); 4084 BUILD_BUG_ON((int)NPC_LT_LD_SCTP != 4085 (int)NPC_LT_LH_TU_SCTP); 4086 4087 if ((key_type == NIX_FLOW_KEY_TYPE_TCP || 4088 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) && 4089 valid_key) { 4090 field->ltype_match |= NPC_LT_LD_TCP; 4091 group_member = true; 4092 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP || 4093 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) && 4094 valid_key) { 4095 field->ltype_match |= NPC_LT_LD_UDP; 4096 group_member = true; 4097 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP || 4098 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) && 4099 valid_key) { 4100 field->ltype_match |= NPC_LT_LD_SCTP; 4101 group_member = true; 4102 } 4103 field->ltype_mask = ~field->ltype_match; 4104 if (key_type == NIX_FLOW_KEY_TYPE_SCTP || 4105 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) { 4106 /* Handle the case where any of the group item 4107 * is enabled in the group but not the final one 4108 */ 4109 if (group_member) { 4110 valid_key = true; 4111 group_member = false; 4112 } 4113 } else { 4114 field_marker = false; 4115 keyoff_marker = false; 4116 } 4117 4118 /* TCP/UDP/SCTP and ESP/AH falls at same offset so 4119 * remember the TCP key offset of 40 byte hash key. 4120 */ 4121 if (key_type == NIX_FLOW_KEY_TYPE_TCP) 4122 l4_key_offset = key_off; 4123 break; 4124 case NIX_FLOW_KEY_TYPE_NVGRE: 4125 field->lid = NPC_LID_LD; 4126 field->hdr_offset = 4; /* VSID offset */ 4127 field->bytesm1 = 2; 4128 field->ltype_match = NPC_LT_LD_NVGRE; 4129 field->ltype_mask = 0xF; 4130 break; 4131 case NIX_FLOW_KEY_TYPE_VXLAN: 4132 case NIX_FLOW_KEY_TYPE_GENEVE: 4133 field->lid = NPC_LID_LE; 4134 field->bytesm1 = 2; 4135 field->hdr_offset = 4; 4136 field->ltype_mask = 0xF; 4137 field_marker = false; 4138 keyoff_marker = false; 4139 4140 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) { 4141 field->ltype_match |= NPC_LT_LE_VXLAN; 4142 group_member = true; 4143 } 4144 4145 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) { 4146 field->ltype_match |= NPC_LT_LE_GENEVE; 4147 group_member = true; 4148 } 4149 4150 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) { 4151 if (group_member) { 4152 field->ltype_mask = ~field->ltype_match; 4153 field_marker = true; 4154 keyoff_marker = true; 4155 valid_key = true; 4156 group_member = false; 4157 } 4158 } 4159 break; 4160 case NIX_FLOW_KEY_TYPE_ETH_DMAC: 4161 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC: 4162 field->lid = NPC_LID_LA; 4163 field->ltype_match = NPC_LT_LA_ETHER; 4164 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) { 4165 field->lid = NPC_LID_LF; 4166 field->ltype_match = NPC_LT_LF_TU_ETHER; 4167 } 4168 field->hdr_offset = 0; 4169 field->bytesm1 = 5; /* DMAC 6 Byte */ 4170 field->ltype_mask = 0xF; 4171 break; 4172 case NIX_FLOW_KEY_TYPE_IPV6_EXT: 4173 field->lid = NPC_LID_LC; 4174 field->hdr_offset = 40; /* IPV6 hdr */ 4175 field->bytesm1 = 0; /* 1 Byte ext hdr*/ 4176 field->ltype_match = NPC_LT_LC_IP6_EXT; 4177 field->ltype_mask = 0xF; 4178 break; 4179 case NIX_FLOW_KEY_TYPE_GTPU: 4180 field->lid = NPC_LID_LE; 4181 field->hdr_offset = 4; 4182 field->bytesm1 = 3; /* 4 bytes TID*/ 4183 field->ltype_match = NPC_LT_LE_GTPU; 4184 field->ltype_mask = 0xF; 4185 break; 4186 case NIX_FLOW_KEY_TYPE_CUSTOM0: 4187 field->lid = NPC_LID_LC; 4188 field->hdr_offset = 6; 4189 field->bytesm1 = 1; /* 2 Bytes*/ 4190 field->ltype_match = NPC_LT_LC_CUSTOM0; 4191 field->ltype_mask = 0xF; 4192 break; 4193 case NIX_FLOW_KEY_TYPE_VLAN: 4194 field->lid = NPC_LID_LB; 4195 field->hdr_offset = 2; /* Skip TPID (2-bytes) */ 4196 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */ 4197 field->ltype_match = NPC_LT_LB_CTAG; 4198 field->ltype_mask = 0xF; 4199 field->fn_mask = 1; /* Mask out the first nibble */ 4200 break; 4201 case NIX_FLOW_KEY_TYPE_AH: 4202 case NIX_FLOW_KEY_TYPE_ESP: 4203 field->hdr_offset = 0; 4204 field->bytesm1 = 7; /* SPI + sequence number */ 4205 field->ltype_mask = 0xF; 4206 field->lid = NPC_LID_LE; 4207 field->ltype_match = NPC_LT_LE_ESP; 4208 if (key_type == NIX_FLOW_KEY_TYPE_AH) { 4209 field->lid = NPC_LID_LD; 4210 field->ltype_match = NPC_LT_LD_AH; 4211 field->hdr_offset = 4; 4212 keyoff_marker = false; 4213 } 4214 break; 4215 } 4216 field->ena = 1; 4217 4218 /* Found a valid flow key type */ 4219 if (valid_key) { 4220 /* Use the key offset of TCP/UDP/SCTP fields 4221 * for ESP/AH fields. 4222 */ 4223 if (key_type == NIX_FLOW_KEY_TYPE_ESP || 4224 key_type == NIX_FLOW_KEY_TYPE_AH) 4225 key_off = l4_key_offset; 4226 field->key_offset = key_off; 4227 memcpy(&alg[nr_field], field, sizeof(*field)); 4228 max_key_off = max(max_key_off, field->bytesm1 + 1); 4229 4230 /* Found a field marker, get the next field */ 4231 if (field_marker) 4232 nr_field++; 4233 } 4234 4235 /* Found a keyoff marker, update the new key_off */ 4236 if (keyoff_marker) { 4237 key_off += max_key_off; 4238 max_key_off = 0; 4239 } 4240 } 4241 /* Processed all the flow key types */ 4242 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF) 4243 return 0; 4244 else 4245 return NIX_AF_ERR_RSS_NOSPC_FIELD; 4246 } 4247 4248 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg) 4249 { 4250 u64 field[FIELDS_PER_ALG]; 4251 struct nix_hw *hw; 4252 int fid, rc; 4253 4254 hw = get_nix_hw(rvu->hw, blkaddr); 4255 if (!hw) 4256 return NIX_AF_ERR_INVALID_NIXBLK; 4257 4258 /* No room to add new flow hash algoritham */ 4259 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX) 4260 return NIX_AF_ERR_RSS_NOSPC_ALGO; 4261 4262 /* Generate algo fields for the given flow_cfg */ 4263 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg); 4264 if (rc) 4265 return rc; 4266 4267 /* Update ALGX_FIELDX register with generated fields */ 4268 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 4269 rvu_write64(rvu, blkaddr, 4270 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use, 4271 fid), field[fid]); 4272 4273 /* Store the flow_cfg for futher lookup */ 4274 rc = hw->flowkey.in_use; 4275 hw->flowkey.flowkey[rc] = flow_cfg; 4276 hw->flowkey.in_use++; 4277 4278 return rc; 4279 } 4280 4281 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, 4282 struct nix_rss_flowkey_cfg *req, 4283 struct nix_rss_flowkey_cfg_rsp *rsp) 4284 { 4285 u16 pcifunc = req->hdr.pcifunc; 4286 int alg_idx, nixlf, blkaddr; 4287 struct nix_hw *nix_hw; 4288 int err; 4289 4290 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 4291 if (err) 4292 return err; 4293 4294 nix_hw = get_nix_hw(rvu->hw, blkaddr); 4295 if (!nix_hw) 4296 return NIX_AF_ERR_INVALID_NIXBLK; 4297 4298 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg); 4299 /* Failed to get algo index from the exiting list, reserve new */ 4300 if (alg_idx < 0) { 4301 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr, 4302 req->flowkey_cfg); 4303 if (alg_idx < 0) 4304 return alg_idx; 4305 } 4306 rsp->alg_idx = alg_idx; 4307 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group, 4308 alg_idx, req->mcam_index); 4309 return 0; 4310 } 4311 4312 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr) 4313 { 4314 u32 flowkey_cfg, minkey_cfg; 4315 int alg, fid, rc; 4316 4317 /* Disable all flow key algx fieldx */ 4318 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) { 4319 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 4320 rvu_write64(rvu, blkaddr, 4321 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid), 4322 0); 4323 } 4324 4325 /* IPv4/IPv6 SIP/DIPs */ 4326 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6; 4327 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 4328 if (rc < 0) 4329 return rc; 4330 4331 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 4332 minkey_cfg = flowkey_cfg; 4333 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP; 4334 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 4335 if (rc < 0) 4336 return rc; 4337 4338 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 4339 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP; 4340 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 4341 if (rc < 0) 4342 return rc; 4343 4344 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 4345 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP; 4346 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 4347 if (rc < 0) 4348 return rc; 4349 4350 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */ 4351 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 4352 NIX_FLOW_KEY_TYPE_UDP; 4353 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 4354 if (rc < 0) 4355 return rc; 4356 4357 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 4358 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 4359 NIX_FLOW_KEY_TYPE_SCTP; 4360 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 4361 if (rc < 0) 4362 return rc; 4363 4364 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 4365 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP | 4366 NIX_FLOW_KEY_TYPE_SCTP; 4367 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 4368 if (rc < 0) 4369 return rc; 4370 4371 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 4372 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 4373 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP; 4374 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 4375 if (rc < 0) 4376 return rc; 4377 4378 return 0; 4379 } 4380 4381 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, 4382 struct nix_set_mac_addr *req, 4383 struct msg_rsp *rsp) 4384 { 4385 bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK; 4386 u16 pcifunc = req->hdr.pcifunc; 4387 int blkaddr, nixlf, err; 4388 struct rvu_pfvf *pfvf; 4389 4390 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 4391 if (err) 4392 return err; 4393 4394 pfvf = rvu_get_pfvf(rvu, pcifunc); 4395 4396 /* untrusted VF can't overwrite admin(PF) changes */ 4397 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && 4398 (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) { 4399 dev_warn(rvu->dev, 4400 "MAC address set by admin(PF) cannot be overwritten by untrusted VF"); 4401 return -EPERM; 4402 } 4403 4404 ether_addr_copy(pfvf->mac_addr, req->mac_addr); 4405 4406 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 4407 pfvf->rx_chan_base, req->mac_addr); 4408 4409 if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf) 4410 ether_addr_copy(pfvf->default_mac, req->mac_addr); 4411 4412 return 0; 4413 } 4414 4415 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu, 4416 struct msg_req *req, 4417 struct nix_get_mac_addr_rsp *rsp) 4418 { 4419 u16 pcifunc = req->hdr.pcifunc; 4420 struct rvu_pfvf *pfvf; 4421 4422 if (!is_nixlf_attached(rvu, pcifunc)) 4423 return NIX_AF_ERR_AF_LF_INVALID; 4424 4425 pfvf = rvu_get_pfvf(rvu, pcifunc); 4426 4427 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 4428 4429 return 0; 4430 } 4431 4432 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, 4433 struct msg_rsp *rsp) 4434 { 4435 bool allmulti, promisc, nix_rx_multicast; 4436 u16 pcifunc = req->hdr.pcifunc; 4437 struct rvu_pfvf *pfvf; 4438 int nixlf, err; 4439 4440 pfvf = rvu_get_pfvf(rvu, pcifunc); 4441 promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false; 4442 allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false; 4443 pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false; 4444 4445 nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list; 4446 4447 if (is_vf(pcifunc) && !nix_rx_multicast && 4448 (promisc || allmulti)) { 4449 dev_warn_ratelimited(rvu->dev, 4450 "VF promisc/multicast not supported\n"); 4451 return 0; 4452 } 4453 4454 /* untrusted VF can't configure promisc/allmulti */ 4455 if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && 4456 (promisc || allmulti)) 4457 return 0; 4458 4459 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 4460 if (err) 4461 return err; 4462 4463 if (nix_rx_multicast) { 4464 /* add/del this PF_FUNC to/from mcast pkt replication list */ 4465 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY, 4466 allmulti); 4467 if (err) { 4468 dev_err(rvu->dev, 4469 "Failed to update pcifunc 0x%x to multicast list\n", 4470 pcifunc); 4471 return err; 4472 } 4473 4474 /* add/del this PF_FUNC to/from promisc pkt replication list */ 4475 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY, 4476 promisc); 4477 if (err) { 4478 dev_err(rvu->dev, 4479 "Failed to update pcifunc 0x%x to promisc list\n", 4480 pcifunc); 4481 return err; 4482 } 4483 } 4484 4485 /* install/uninstall allmulti entry */ 4486 if (allmulti) { 4487 rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf, 4488 pfvf->rx_chan_base); 4489 } else { 4490 if (!nix_rx_multicast) 4491 rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false); 4492 } 4493 4494 /* install/uninstall promisc entry */ 4495 if (promisc) 4496 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 4497 pfvf->rx_chan_base, 4498 pfvf->rx_chan_cnt); 4499 else 4500 if (!nix_rx_multicast) 4501 rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false); 4502 4503 return 0; 4504 } 4505 4506 static void nix_find_link_frs(struct rvu *rvu, 4507 struct nix_frs_cfg *req, u16 pcifunc) 4508 { 4509 int pf = rvu_get_pf(pcifunc); 4510 struct rvu_pfvf *pfvf; 4511 int maxlen, minlen; 4512 int numvfs, hwvf; 4513 int vf; 4514 4515 /* Update with requester's min/max lengths */ 4516 pfvf = rvu_get_pfvf(rvu, pcifunc); 4517 pfvf->maxlen = req->maxlen; 4518 if (req->update_minlen) 4519 pfvf->minlen = req->minlen; 4520 4521 maxlen = req->maxlen; 4522 minlen = req->update_minlen ? req->minlen : 0; 4523 4524 /* Get this PF's numVFs and starting hwvf */ 4525 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); 4526 4527 /* For each VF, compare requested max/minlen */ 4528 for (vf = 0; vf < numvfs; vf++) { 4529 pfvf = &rvu->hwvf[hwvf + vf]; 4530 if (pfvf->maxlen > maxlen) 4531 maxlen = pfvf->maxlen; 4532 if (req->update_minlen && 4533 pfvf->minlen && pfvf->minlen < minlen) 4534 minlen = pfvf->minlen; 4535 } 4536 4537 /* Compare requested max/minlen with PF's max/minlen */ 4538 pfvf = &rvu->pf[pf]; 4539 if (pfvf->maxlen > maxlen) 4540 maxlen = pfvf->maxlen; 4541 if (req->update_minlen && 4542 pfvf->minlen && pfvf->minlen < minlen) 4543 minlen = pfvf->minlen; 4544 4545 /* Update the request with max/min PF's and it's VF's max/min */ 4546 req->maxlen = maxlen; 4547 if (req->update_minlen) 4548 req->minlen = minlen; 4549 } 4550 4551 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, 4552 struct msg_rsp *rsp) 4553 { 4554 struct rvu_hwinfo *hw = rvu->hw; 4555 u16 pcifunc = req->hdr.pcifunc; 4556 int pf = rvu_get_pf(pcifunc); 4557 int blkaddr, link = -1; 4558 struct nix_hw *nix_hw; 4559 struct rvu_pfvf *pfvf; 4560 u8 cgx = 0, lmac = 0; 4561 u16 max_mtu; 4562 u64 cfg; 4563 4564 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 4565 if (blkaddr < 0) 4566 return NIX_AF_ERR_AF_LF_INVALID; 4567 4568 nix_hw = get_nix_hw(rvu->hw, blkaddr); 4569 if (!nix_hw) 4570 return NIX_AF_ERR_INVALID_NIXBLK; 4571 4572 if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc)) 4573 rvu_get_lbk_link_max_frs(rvu, &max_mtu); 4574 else 4575 rvu_get_lmac_link_max_frs(rvu, &max_mtu); 4576 4577 if (!req->sdp_link && req->maxlen > max_mtu) 4578 return NIX_AF_ERR_FRS_INVALID; 4579 4580 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS) 4581 return NIX_AF_ERR_FRS_INVALID; 4582 4583 /* Check if config is for SDP link */ 4584 if (req->sdp_link) { 4585 if (!hw->sdp_links) 4586 return NIX_AF_ERR_RX_LINK_INVALID; 4587 link = hw->cgx_links + hw->lbk_links; 4588 goto linkcfg; 4589 } 4590 4591 /* Check if the request is from CGX mapped RVU PF */ 4592 if (is_pf_cgxmapped(rvu, pf)) { 4593 /* Get CGX and LMAC to which this PF is mapped and find link */ 4594 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac); 4595 link = (cgx * hw->lmac_per_cgx) + lmac; 4596 } else if (pf == 0) { 4597 /* For VFs of PF0 ingress is LBK port, so config LBK link */ 4598 pfvf = rvu_get_pfvf(rvu, pcifunc); 4599 link = hw->cgx_links + pfvf->lbkid; 4600 } else if (is_rep_dev(rvu, pcifunc)) { 4601 link = hw->cgx_links + 0; 4602 } 4603 4604 if (link < 0) 4605 return NIX_AF_ERR_RX_LINK_INVALID; 4606 4607 linkcfg: 4608 nix_find_link_frs(rvu, req, pcifunc); 4609 4610 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link)); 4611 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16); 4612 if (req->update_minlen) 4613 cfg = (cfg & ~0xFFFFULL) | req->minlen; 4614 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg); 4615 4616 return 0; 4617 } 4618 4619 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, 4620 struct msg_rsp *rsp) 4621 { 4622 int nixlf, blkaddr, err; 4623 u64 cfg; 4624 4625 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr); 4626 if (err) 4627 return err; 4628 4629 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf)); 4630 /* Set the interface configuration */ 4631 if (req->len_verify & BIT(0)) 4632 cfg |= BIT_ULL(41); 4633 else 4634 cfg &= ~BIT_ULL(41); 4635 4636 if (req->len_verify & BIT(1)) 4637 cfg |= BIT_ULL(40); 4638 else 4639 cfg &= ~BIT_ULL(40); 4640 4641 if (req->len_verify & NIX_RX_DROP_RE) 4642 cfg |= BIT_ULL(32); 4643 else 4644 cfg &= ~BIT_ULL(32); 4645 4646 if (req->csum_verify & BIT(0)) 4647 cfg |= BIT_ULL(37); 4648 else 4649 cfg &= ~BIT_ULL(37); 4650 4651 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg); 4652 4653 return 0; 4654 } 4655 4656 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs) 4657 { 4658 return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */ 4659 } 4660 4661 static void nix_link_config(struct rvu *rvu, int blkaddr, 4662 struct nix_hw *nix_hw) 4663 { 4664 struct rvu_hwinfo *hw = rvu->hw; 4665 int cgx, lmac_cnt, slink, link; 4666 u16 lbk_max_frs, lmac_max_frs; 4667 unsigned long lmac_bmap; 4668 u64 tx_credits, cfg; 4669 u64 lmac_fifo_len; 4670 int iter; 4671 4672 rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs); 4673 rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs); 4674 4675 /* Set default min/max packet lengths allowed on NIX Rx links. 4676 * 4677 * With HW reset minlen value of 60byte, HW will treat ARP pkts 4678 * as undersize and report them to SW as error pkts, hence 4679 * setting it to 40 bytes. 4680 */ 4681 for (link = 0; link < hw->cgx_links; link++) { 4682 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 4683 ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS); 4684 } 4685 4686 for (link = hw->cgx_links; link < hw->lbk_links; link++) { 4687 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 4688 ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS); 4689 } 4690 if (hw->sdp_links) { 4691 link = hw->cgx_links + hw->lbk_links; 4692 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 4693 SDP_HW_MAX_FRS << 16 | SDP_HW_MIN_FRS); 4694 } 4695 4696 /* Get MCS external bypass status for CN10K-B */ 4697 if (mcs_get_blkcnt() == 1) { 4698 /* Adjust for 2 credits when external bypass is disabled */ 4699 nix_hw->cc_mcs_cnt = is_mcs_bypass(0) ? 0 : 2; 4700 } 4701 4702 /* Set credits for Tx links assuming max packet length allowed. 4703 * This will be reconfigured based on MTU set for PF/VF. 4704 */ 4705 for (cgx = 0; cgx < hw->cgx; cgx++) { 4706 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); 4707 /* Skip when cgx is not available or lmac cnt is zero */ 4708 if (lmac_cnt <= 0) 4709 continue; 4710 slink = cgx * hw->lmac_per_cgx; 4711 4712 /* Get LMAC id's from bitmap */ 4713 lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu)); 4714 for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) { 4715 lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter); 4716 if (!lmac_fifo_len) { 4717 dev_err(rvu->dev, 4718 "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n", 4719 __func__, cgx, iter); 4720 continue; 4721 } 4722 tx_credits = (lmac_fifo_len - lmac_max_frs) / 16; 4723 /* Enable credits and set credit pkt count to max allowed */ 4724 cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 4725 cfg |= FIELD_PREP(NIX_AF_LINKX_MCS_CNT_MASK, nix_hw->cc_mcs_cnt); 4726 4727 link = iter + slink; 4728 nix_hw->tx_credits[link] = tx_credits; 4729 rvu_write64(rvu, blkaddr, 4730 NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); 4731 } 4732 } 4733 4734 /* Set Tx credits for LBK link */ 4735 slink = hw->cgx_links; 4736 for (link = slink; link < (slink + hw->lbk_links); link++) { 4737 tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs); 4738 nix_hw->tx_credits[link] = tx_credits; 4739 /* Enable credits and set credit pkt count to max allowed */ 4740 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 4741 rvu_write64(rvu, blkaddr, 4742 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits); 4743 } 4744 } 4745 4746 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr) 4747 { 4748 int idx, err; 4749 u64 status; 4750 4751 /* Start X2P bus calibration */ 4752 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 4753 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9)); 4754 /* Wait for calibration to complete */ 4755 err = rvu_poll_reg(rvu, blkaddr, 4756 NIX_AF_STATUS, BIT_ULL(10), false); 4757 if (err) { 4758 dev_err(rvu->dev, "NIX X2P bus calibration failed\n"); 4759 return err; 4760 } 4761 4762 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS); 4763 /* Check if CGX devices are ready */ 4764 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) { 4765 /* Skip when cgx port is not available */ 4766 if (!rvu_cgx_pdata(idx, rvu) || 4767 (status & (BIT_ULL(16 + idx)))) 4768 continue; 4769 dev_err(rvu->dev, 4770 "CGX%d didn't respond to NIX X2P calibration\n", idx); 4771 err = -EBUSY; 4772 } 4773 4774 /* Check if LBK is ready */ 4775 if (!(status & BIT_ULL(19))) { 4776 dev_err(rvu->dev, 4777 "LBK didn't respond to NIX X2P calibration\n"); 4778 err = -EBUSY; 4779 } 4780 4781 /* Clear 'calibrate_x2p' bit */ 4782 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 4783 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9)); 4784 if (err || (status & 0x3FFULL)) 4785 dev_err(rvu->dev, 4786 "NIX X2P calibration failed, status 0x%llx\n", status); 4787 if (err) 4788 return err; 4789 return 0; 4790 } 4791 4792 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) 4793 { 4794 u64 cfg; 4795 int err; 4796 4797 /* Set admin queue endianness */ 4798 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG); 4799 #ifdef __BIG_ENDIAN 4800 cfg |= BIT_ULL(8); 4801 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 4802 #else 4803 cfg &= ~BIT_ULL(8); 4804 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 4805 #endif 4806 4807 /* Do not bypass NDC cache */ 4808 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG); 4809 cfg &= ~0x3FFEULL; 4810 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 4811 /* Disable caching of SQB aka SQEs */ 4812 cfg |= 0x04ULL; 4813 #endif 4814 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg); 4815 4816 /* Result structure can be followed by RQ/SQ/CQ context at 4817 * RES + 128bytes and a write mask at RES + 256 bytes, depending on 4818 * operation type. Alloc sufficient result memory for all operations. 4819 */ 4820 err = rvu_aq_alloc(rvu, &block->aq, 4821 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s), 4822 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256); 4823 if (err) 4824 return err; 4825 4826 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE); 4827 rvu_write64(rvu, block->addr, 4828 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova); 4829 return 0; 4830 } 4831 4832 static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr) 4833 { 4834 struct rvu_hwinfo *hw = rvu->hw; 4835 u64 hw_const; 4836 4837 hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 4838 4839 /* On OcteonTx2 DWRR quantum is directly configured into each of 4840 * the transmit scheduler queues. And PF/VF drivers were free to 4841 * config any value upto 2^24. 4842 * On CN10K, HW is modified, the quantum configuration at scheduler 4843 * queues is in terms of weight. And SW needs to setup a base DWRR MTU 4844 * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do 4845 * 'DWRR MTU * weight' to get the quantum. 4846 * 4847 * Check if HW uses a common MTU for all DWRR quantum configs. 4848 * On OcteonTx2 this register field is '0'. 4849 */ 4850 if ((((hw_const >> 56) & 0x10) == 0x10) && !(hw_const & BIT_ULL(61))) 4851 hw->cap.nix_common_dwrr_mtu = true; 4852 4853 if (hw_const & BIT_ULL(61)) 4854 hw->cap.nix_multiple_dwrr_mtu = true; 4855 } 4856 4857 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) 4858 { 4859 const struct npc_lt_def_cfg *ltdefs; 4860 struct rvu_hwinfo *hw = rvu->hw; 4861 int blkaddr = nix_hw->blkaddr; 4862 struct rvu_block *block; 4863 int err; 4864 u64 cfg; 4865 4866 block = &hw->block[blkaddr]; 4867 4868 if (is_rvu_96xx_B0(rvu)) { 4869 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt 4870 * internal state when conditional clocks are turned off. 4871 * Hence enable them. 4872 */ 4873 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 4874 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL); 4875 } 4876 4877 /* Set chan/link to backpressure TL3 instead of TL2 */ 4878 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01); 4879 4880 /* Disable SQ manager's sticky mode operation (set TM6 = 0) 4881 * This sticky mode is known to cause SQ stalls when multiple 4882 * SQs are mapped to same SMQ and transmitting pkts at a time. 4883 */ 4884 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS); 4885 cfg &= ~BIT_ULL(15); 4886 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg); 4887 4888 ltdefs = rvu->kpu.lt_def; 4889 /* Calibrate X2P bus to check if CGX/LBK links are fine */ 4890 err = nix_calibrate_x2p(rvu, blkaddr); 4891 if (err) 4892 return err; 4893 4894 /* Setup capabilities of the NIX block */ 4895 rvu_nix_setup_capabilities(rvu, blkaddr); 4896 4897 /* Initialize admin queue */ 4898 err = nix_aq_init(rvu, block); 4899 if (err) 4900 return err; 4901 4902 /* Restore CINT timer delay to HW reset values */ 4903 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL); 4904 4905 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG); 4906 4907 /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */ 4908 cfg |= 1ULL; 4909 if (!is_rvu_otx2(rvu)) 4910 cfg |= NIX_PTP_1STEP_EN; 4911 4912 rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg); 4913 4914 if (!is_rvu_otx2(rvu)) 4915 rvu_nix_block_cn10k_init(rvu, nix_hw); 4916 4917 if (is_block_implemented(hw, blkaddr)) { 4918 err = nix_setup_txschq(rvu, nix_hw, blkaddr); 4919 if (err) 4920 return err; 4921 4922 err = nix_setup_ipolicers(rvu, nix_hw, blkaddr); 4923 if (err) 4924 return err; 4925 4926 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr); 4927 if (err) 4928 return err; 4929 4930 err = nix_setup_mcast(rvu, nix_hw, blkaddr); 4931 if (err) 4932 return err; 4933 4934 err = nix_setup_txvlan(rvu, nix_hw); 4935 if (err) 4936 return err; 4937 4938 err = nix_setup_bpids(rvu, nix_hw, blkaddr); 4939 if (err) 4940 return err; 4941 4942 /* Configure segmentation offload formats */ 4943 nix_setup_lso(rvu, nix_hw, blkaddr); 4944 4945 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info. 4946 * This helps HW protocol checker to identify headers 4947 * and validate length and checksums. 4948 */ 4949 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2, 4950 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) | 4951 ltdefs->rx_ol2.ltype_mask); 4952 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4, 4953 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) | 4954 ltdefs->rx_oip4.ltype_mask); 4955 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4, 4956 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) | 4957 ltdefs->rx_iip4.ltype_mask); 4958 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6, 4959 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) | 4960 ltdefs->rx_oip6.ltype_mask); 4961 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6, 4962 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) | 4963 ltdefs->rx_iip6.ltype_mask); 4964 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP, 4965 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) | 4966 ltdefs->rx_otcp.ltype_mask); 4967 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP, 4968 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) | 4969 ltdefs->rx_itcp.ltype_mask); 4970 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP, 4971 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) | 4972 ltdefs->rx_oudp.ltype_mask); 4973 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP, 4974 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) | 4975 ltdefs->rx_iudp.ltype_mask); 4976 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP, 4977 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) | 4978 ltdefs->rx_osctp.ltype_mask); 4979 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP, 4980 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) | 4981 ltdefs->rx_isctp.ltype_mask); 4982 4983 if (!is_rvu_otx2(rvu)) { 4984 /* Enable APAD calculation for other protocols 4985 * matching APAD0 and APAD1 lt def registers. 4986 */ 4987 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0, 4988 (ltdefs->rx_apad0.valid << 11) | 4989 (ltdefs->rx_apad0.lid << 8) | 4990 (ltdefs->rx_apad0.ltype_match << 4) | 4991 ltdefs->rx_apad0.ltype_mask); 4992 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1, 4993 (ltdefs->rx_apad1.valid << 11) | 4994 (ltdefs->rx_apad1.lid << 8) | 4995 (ltdefs->rx_apad1.ltype_match << 4) | 4996 ltdefs->rx_apad1.ltype_mask); 4997 4998 /* Receive ethertype defination register defines layer 4999 * information in NPC_RESULT_S to identify the Ethertype 5000 * location in L2 header. Used for Ethertype overwriting 5001 * in inline IPsec flow. 5002 */ 5003 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0), 5004 (ltdefs->rx_et[0].offset << 12) | 5005 (ltdefs->rx_et[0].valid << 11) | 5006 (ltdefs->rx_et[0].lid << 8) | 5007 (ltdefs->rx_et[0].ltype_match << 4) | 5008 ltdefs->rx_et[0].ltype_mask); 5009 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1), 5010 (ltdefs->rx_et[1].offset << 12) | 5011 (ltdefs->rx_et[1].valid << 11) | 5012 (ltdefs->rx_et[1].lid << 8) | 5013 (ltdefs->rx_et[1].ltype_match << 4) | 5014 ltdefs->rx_et[1].ltype_mask); 5015 } 5016 5017 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr); 5018 if (err) 5019 return err; 5020 5021 nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links, 5022 sizeof(u64), GFP_KERNEL); 5023 if (!nix_hw->tx_credits) 5024 return -ENOMEM; 5025 5026 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */ 5027 nix_link_config(rvu, blkaddr, nix_hw); 5028 5029 /* Enable Channel backpressure */ 5030 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0)); 5031 } 5032 return 0; 5033 } 5034 5035 int rvu_nix_init(struct rvu *rvu) 5036 { 5037 struct rvu_hwinfo *hw = rvu->hw; 5038 struct nix_hw *nix_hw; 5039 int blkaddr = 0, err; 5040 int i = 0; 5041 5042 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw), 5043 GFP_KERNEL); 5044 if (!hw->nix) 5045 return -ENOMEM; 5046 5047 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 5048 while (blkaddr) { 5049 nix_hw = &hw->nix[i]; 5050 nix_hw->rvu = rvu; 5051 nix_hw->blkaddr = blkaddr; 5052 err = rvu_nix_block_init(rvu, nix_hw); 5053 if (err) 5054 return err; 5055 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 5056 i++; 5057 } 5058 5059 return 0; 5060 } 5061 5062 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr, 5063 struct rvu_block *block) 5064 { 5065 struct nix_txsch *txsch; 5066 struct nix_mcast *mcast; 5067 struct nix_txvlan *vlan; 5068 struct nix_hw *nix_hw; 5069 int lvl; 5070 5071 rvu_aq_free(rvu, block->aq); 5072 5073 if (is_block_implemented(rvu->hw, blkaddr)) { 5074 nix_hw = get_nix_hw(rvu->hw, blkaddr); 5075 if (!nix_hw) 5076 return; 5077 5078 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 5079 txsch = &nix_hw->txsch[lvl]; 5080 kfree(txsch->schq.bmap); 5081 } 5082 5083 kfree(nix_hw->tx_credits); 5084 5085 nix_ipolicer_freemem(rvu, nix_hw); 5086 5087 vlan = &nix_hw->txvlan; 5088 kfree(vlan->rsrc.bmap); 5089 mutex_destroy(&vlan->rsrc_lock); 5090 5091 mcast = &nix_hw->mcast; 5092 qmem_free(rvu->dev, mcast->mce_ctx); 5093 qmem_free(rvu->dev, mcast->mcast_buf); 5094 mutex_destroy(&mcast->mce_lock); 5095 } 5096 } 5097 5098 void rvu_nix_freemem(struct rvu *rvu) 5099 { 5100 struct rvu_hwinfo *hw = rvu->hw; 5101 struct rvu_block *block; 5102 int blkaddr = 0; 5103 5104 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 5105 while (blkaddr) { 5106 block = &hw->block[blkaddr]; 5107 rvu_nix_block_freemem(rvu, blkaddr, block); 5108 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 5109 } 5110 } 5111 5112 static void nix_mcast_update_action(struct rvu *rvu, 5113 struct nix_mcast_grp_elem *elem) 5114 { 5115 struct npc_mcam *mcam = &rvu->hw->mcam; 5116 struct nix_rx_action rx_action = { 0 }; 5117 struct nix_tx_action tx_action = { 0 }; 5118 int npc_blkaddr; 5119 5120 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 5121 if (elem->dir == NIX_MCAST_INGRESS) { 5122 *(u64 *)&rx_action = npc_get_mcam_action(rvu, mcam, 5123 npc_blkaddr, 5124 elem->mcam_index); 5125 rx_action.index = elem->mce_start_index; 5126 npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index, 5127 *(u64 *)&rx_action); 5128 } else { 5129 *(u64 *)&tx_action = npc_get_mcam_action(rvu, mcam, 5130 npc_blkaddr, 5131 elem->mcam_index); 5132 tx_action.index = elem->mce_start_index; 5133 npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index, 5134 *(u64 *)&tx_action); 5135 } 5136 } 5137 5138 static void nix_mcast_update_mce_entry(struct rvu *rvu, u16 pcifunc, u8 is_active) 5139 { 5140 struct nix_mcast_grp_elem *elem; 5141 struct nix_mcast_grp *mcast_grp; 5142 struct nix_hw *nix_hw; 5143 int blkaddr; 5144 5145 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 5146 nix_hw = get_nix_hw(rvu->hw, blkaddr); 5147 if (!nix_hw) 5148 return; 5149 5150 mcast_grp = &nix_hw->mcast_grp; 5151 5152 mutex_lock(&mcast_grp->mcast_grp_lock); 5153 list_for_each_entry(elem, &mcast_grp->mcast_grp_head, list) { 5154 struct nix_mce_list *mce_list; 5155 struct mce *mce; 5156 5157 /* Iterate the group elements and disable the element which 5158 * received the disable request. 5159 */ 5160 mce_list = &elem->mcast_mce_list; 5161 hlist_for_each_entry(mce, &mce_list->head, node) { 5162 if (mce->pcifunc == pcifunc) { 5163 mce->is_active = is_active; 5164 break; 5165 } 5166 } 5167 5168 /* Dump the updated list to HW */ 5169 if (elem->dir == NIX_MCAST_INGRESS) 5170 nix_update_ingress_mce_list_hw(rvu, nix_hw, elem); 5171 else 5172 nix_update_egress_mce_list_hw(rvu, nix_hw, elem); 5173 5174 /* Update the multicast index in NPC rule */ 5175 nix_mcast_update_action(rvu, elem); 5176 } 5177 mutex_unlock(&mcast_grp->mcast_grp_lock); 5178 } 5179 5180 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, 5181 struct msg_rsp *rsp) 5182 { 5183 u16 pcifunc = req->hdr.pcifunc; 5184 struct rvu_pfvf *pfvf; 5185 int nixlf, err, pf; 5186 5187 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 5188 if (err) 5189 return err; 5190 5191 /* Enable the interface if it is in any multicast list */ 5192 nix_mcast_update_mce_entry(rvu, pcifunc, 1); 5193 5194 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf); 5195 5196 npc_mcam_enable_flows(rvu, pcifunc); 5197 5198 pfvf = rvu_get_pfvf(rvu, pcifunc); 5199 set_bit(NIXLF_INITIALIZED, &pfvf->flags); 5200 5201 rvu_switch_update_rules(rvu, pcifunc, true); 5202 5203 pf = rvu_get_pf(pcifunc); 5204 if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode) 5205 rvu_rep_notify_pfvf_state(rvu, pcifunc, true); 5206 5207 return rvu_cgx_start_stop_io(rvu, pcifunc, true); 5208 } 5209 5210 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, 5211 struct msg_rsp *rsp) 5212 { 5213 u16 pcifunc = req->hdr.pcifunc; 5214 struct rvu_pfvf *pfvf; 5215 int nixlf, err, pf; 5216 5217 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 5218 if (err) 5219 return err; 5220 5221 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 5222 /* Disable the interface if it is in any multicast list */ 5223 nix_mcast_update_mce_entry(rvu, pcifunc, 0); 5224 5225 5226 pfvf = rvu_get_pfvf(rvu, pcifunc); 5227 clear_bit(NIXLF_INITIALIZED, &pfvf->flags); 5228 5229 err = rvu_cgx_start_stop_io(rvu, pcifunc, false); 5230 if (err) 5231 return err; 5232 5233 rvu_switch_update_rules(rvu, pcifunc, false); 5234 rvu_cgx_tx_enable(rvu, pcifunc, true); 5235 5236 pf = rvu_get_pf(pcifunc); 5237 if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode) 5238 rvu_rep_notify_pfvf_state(rvu, pcifunc, false); 5239 return 0; 5240 } 5241 5242 #define RX_SA_BASE GENMASK_ULL(52, 7) 5243 5244 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) 5245 { 5246 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 5247 struct hwctx_disable_req ctx_req; 5248 int pf = rvu_get_pf(pcifunc); 5249 struct mac_ops *mac_ops; 5250 u8 cgx_id, lmac_id; 5251 u64 sa_base; 5252 void *cgxd; 5253 int err; 5254 5255 ctx_req.hdr.pcifunc = pcifunc; 5256 5257 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */ 5258 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 5259 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 5260 nix_interface_deinit(rvu, pcifunc, nixlf); 5261 nix_rx_sync(rvu, blkaddr); 5262 nix_txschq_free(rvu, pcifunc); 5263 5264 clear_bit(NIXLF_INITIALIZED, &pfvf->flags); 5265 5266 if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode) 5267 rvu_rep_notify_pfvf_state(rvu, pcifunc, false); 5268 5269 rvu_cgx_start_stop_io(rvu, pcifunc, false); 5270 5271 if (pfvf->sq_ctx) { 5272 ctx_req.ctype = NIX_AQ_CTYPE_SQ; 5273 err = nix_lf_hwctx_disable(rvu, &ctx_req); 5274 if (err) 5275 dev_err(rvu->dev, "SQ ctx disable failed\n"); 5276 } 5277 5278 if (pfvf->rq_ctx) { 5279 ctx_req.ctype = NIX_AQ_CTYPE_RQ; 5280 err = nix_lf_hwctx_disable(rvu, &ctx_req); 5281 if (err) 5282 dev_err(rvu->dev, "RQ ctx disable failed\n"); 5283 } 5284 5285 if (pfvf->cq_ctx) { 5286 ctx_req.ctype = NIX_AQ_CTYPE_CQ; 5287 err = nix_lf_hwctx_disable(rvu, &ctx_req); 5288 if (err) 5289 dev_err(rvu->dev, "CQ ctx disable failed\n"); 5290 } 5291 5292 /* reset HW config done for Switch headers */ 5293 rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT, 5294 (PKIND_TX | PKIND_RX), 0, 0, 0, 0); 5295 5296 /* Disabling CGX and NPC config done for PTP */ 5297 if (pfvf->hw_rx_tstamp_en) { 5298 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 5299 cgxd = rvu_cgx_pdata(cgx_id, rvu); 5300 mac_ops = get_mac_ops(cgxd); 5301 mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false); 5302 /* Undo NPC config done for PTP */ 5303 if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false)) 5304 dev_err(rvu->dev, "NPC config for PTP failed\n"); 5305 pfvf->hw_rx_tstamp_en = false; 5306 } 5307 5308 /* reset priority flow control config */ 5309 rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0); 5310 5311 /* reset 802.3x flow control config */ 5312 rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0); 5313 5314 nix_ctx_free(rvu, pfvf); 5315 5316 nix_free_all_bandprof(rvu, pcifunc); 5317 5318 sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf)); 5319 if (FIELD_GET(RX_SA_BASE, sa_base)) { 5320 err = rvu_cpt_ctx_flush(rvu, pcifunc); 5321 if (err) 5322 dev_err(rvu->dev, 5323 "CPT ctx flush failed with error: %d\n", err); 5324 } 5325 } 5326 5327 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32) 5328 5329 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) 5330 { 5331 struct rvu_hwinfo *hw = rvu->hw; 5332 struct rvu_block *block; 5333 int blkaddr, pf; 5334 int nixlf; 5335 u64 cfg; 5336 5337 pf = rvu_get_pf(pcifunc); 5338 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) 5339 return 0; 5340 5341 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 5342 if (blkaddr < 0) 5343 return NIX_AF_ERR_AF_LF_INVALID; 5344 5345 block = &hw->block[blkaddr]; 5346 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 5347 if (nixlf < 0) 5348 return NIX_AF_ERR_AF_LF_INVALID; 5349 5350 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf)); 5351 5352 if (enable) 5353 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN; 5354 else 5355 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN; 5356 5357 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 5358 5359 return 0; 5360 } 5361 5362 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req, 5363 struct msg_rsp *rsp) 5364 { 5365 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true); 5366 } 5367 5368 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req, 5369 struct msg_rsp *rsp) 5370 { 5371 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false); 5372 } 5373 5374 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, 5375 struct nix_lso_format_cfg *req, 5376 struct nix_lso_format_cfg_rsp *rsp) 5377 { 5378 u16 pcifunc = req->hdr.pcifunc; 5379 struct nix_hw *nix_hw; 5380 struct rvu_pfvf *pfvf; 5381 int blkaddr, idx, f; 5382 u64 reg; 5383 5384 pfvf = rvu_get_pfvf(rvu, pcifunc); 5385 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 5386 if (!pfvf->nixlf || blkaddr < 0) 5387 return NIX_AF_ERR_AF_LF_INVALID; 5388 5389 nix_hw = get_nix_hw(rvu->hw, blkaddr); 5390 if (!nix_hw) 5391 return NIX_AF_ERR_INVALID_NIXBLK; 5392 5393 /* Find existing matching LSO format, if any */ 5394 for (idx = 0; idx < nix_hw->lso.in_use; idx++) { 5395 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) { 5396 reg = rvu_read64(rvu, blkaddr, 5397 NIX_AF_LSO_FORMATX_FIELDX(idx, f)); 5398 if (req->fields[f] != (reg & req->field_mask)) 5399 break; 5400 } 5401 5402 if (f == NIX_LSO_FIELD_MAX) 5403 break; 5404 } 5405 5406 if (idx < nix_hw->lso.in_use) { 5407 /* Match found */ 5408 rsp->lso_format_idx = idx; 5409 return 0; 5410 } 5411 5412 if (nix_hw->lso.in_use == nix_hw->lso.total) 5413 return NIX_AF_ERR_LSO_CFG_FAIL; 5414 5415 rsp->lso_format_idx = nix_hw->lso.in_use++; 5416 5417 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) 5418 rvu_write64(rvu, blkaddr, 5419 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f), 5420 req->fields[f]); 5421 5422 return 0; 5423 } 5424 5425 #define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48) 5426 #define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32) 5427 #define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16) 5428 #define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0) 5429 5430 #define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24) 5431 #define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8) 5432 #define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0) 5433 5434 #define CPT_INST_CREDIT_TH GENMASK_ULL(53, 32) 5435 #define CPT_INST_CREDIT_BPID GENMASK_ULL(30, 22) 5436 #define CPT_INST_CREDIT_CNT GENMASK_ULL(21, 0) 5437 5438 static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req, 5439 int blkaddr) 5440 { 5441 u8 cpt_idx, cpt_blkaddr; 5442 u64 val; 5443 5444 cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1; 5445 if (req->enable) { 5446 val = 0; 5447 /* Enable context prefetching */ 5448 if (!is_rvu_otx2(rvu)) 5449 val |= BIT_ULL(51); 5450 5451 /* Set OPCODE and EGRP */ 5452 val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp); 5453 val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode); 5454 val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1); 5455 val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2); 5456 5457 rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val); 5458 5459 /* Set CPT queue for inline IPSec */ 5460 val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot); 5461 val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC, 5462 req->inst_qsel.cpt_pf_func); 5463 5464 if (!is_rvu_otx2(rvu)) { 5465 cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 : 5466 BLKADDR_CPT1; 5467 val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr); 5468 } 5469 5470 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), 5471 val); 5472 5473 /* Set CPT credit */ 5474 val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx)); 5475 if ((val & 0x3FFFFF) != 0x3FFFFF) 5476 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), 5477 0x3FFFFF - val); 5478 5479 val = FIELD_PREP(CPT_INST_CREDIT_CNT, req->cpt_credit); 5480 val |= FIELD_PREP(CPT_INST_CREDIT_BPID, req->bpid); 5481 val |= FIELD_PREP(CPT_INST_CREDIT_TH, req->credit_th); 5482 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), val); 5483 } else { 5484 rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0); 5485 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), 5486 0x0); 5487 val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx)); 5488 if ((val & 0x3FFFFF) != 0x3FFFFF) 5489 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), 5490 0x3FFFFF - val); 5491 } 5492 } 5493 5494 int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu, 5495 struct nix_inline_ipsec_cfg *req, 5496 struct msg_rsp *rsp) 5497 { 5498 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) 5499 return 0; 5500 5501 nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0); 5502 if (is_block_implemented(rvu->hw, BLKADDR_CPT1)) 5503 nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1); 5504 5505 return 0; 5506 } 5507 5508 int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu, 5509 struct msg_req *req, 5510 struct nix_inline_ipsec_cfg *rsp) 5511 5512 { 5513 u64 val; 5514 5515 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) 5516 return 0; 5517 5518 val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_IPSEC_GEN_CFG); 5519 rsp->gen_cfg.egrp = FIELD_GET(IPSEC_GEN_CFG_EGRP, val); 5520 rsp->gen_cfg.opcode = FIELD_GET(IPSEC_GEN_CFG_OPCODE, val); 5521 rsp->gen_cfg.param1 = FIELD_GET(IPSEC_GEN_CFG_PARAM1, val); 5522 rsp->gen_cfg.param2 = FIELD_GET(IPSEC_GEN_CFG_PARAM2, val); 5523 5524 val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_CPTX_CREDIT(0)); 5525 rsp->cpt_credit = FIELD_GET(CPT_INST_CREDIT_CNT, val); 5526 rsp->credit_th = FIELD_GET(CPT_INST_CREDIT_TH, val); 5527 rsp->bpid = FIELD_GET(CPT_INST_CREDIT_BPID, val); 5528 5529 return 0; 5530 } 5531 5532 int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu, 5533 struct nix_inline_ipsec_lf_cfg *req, 5534 struct msg_rsp *rsp) 5535 { 5536 int lf, blkaddr, err; 5537 u64 val; 5538 5539 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) 5540 return 0; 5541 5542 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr); 5543 if (err) 5544 return err; 5545 5546 if (req->enable) { 5547 /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */ 5548 val = (u64)req->ipsec_cfg0.tt << 44 | 5549 (u64)req->ipsec_cfg0.tag_const << 20 | 5550 (u64)req->ipsec_cfg0.sa_pow2_size << 16 | 5551 req->ipsec_cfg0.lenm1_max; 5552 5553 if (blkaddr == BLKADDR_NIX1) 5554 val |= BIT_ULL(46); 5555 5556 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val); 5557 5558 /* Set SA_IDX_W and SA_IDX_MAX */ 5559 val = (u64)req->ipsec_cfg1.sa_idx_w << 32 | 5560 req->ipsec_cfg1.sa_idx_max; 5561 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val); 5562 5563 /* Set SA base address */ 5564 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), 5565 req->sa_base_addr); 5566 } else { 5567 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0); 5568 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0); 5569 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), 5570 0x0); 5571 } 5572 5573 return 0; 5574 } 5575 5576 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc) 5577 { 5578 bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); 5579 5580 /* overwrite vf mac address with default_mac */ 5581 if (from_vf) 5582 ether_addr_copy(pfvf->mac_addr, pfvf->default_mac); 5583 } 5584 5585 /* NIX ingress policers or bandwidth profiles APIs */ 5586 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr) 5587 { 5588 struct npc_lt_def_cfg defs, *ltdefs; 5589 5590 ltdefs = &defs; 5591 memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg)); 5592 5593 /* Extract PCP and DEI fields from outer VLAN from byte offset 5594 * 2 from the start of LB_PTR (ie TAG). 5595 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN 5596 * fields are considered when 'Tunnel enable' is set in profile. 5597 */ 5598 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI, 5599 (2UL << 12) | (ltdefs->ovlan.lid << 8) | 5600 (ltdefs->ovlan.ltype_match << 4) | 5601 ltdefs->ovlan.ltype_mask); 5602 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI, 5603 (2UL << 12) | (ltdefs->ivlan.lid << 8) | 5604 (ltdefs->ivlan.ltype_match << 4) | 5605 ltdefs->ivlan.ltype_mask); 5606 5607 /* DSCP field in outer and tunneled IPv4 packets */ 5608 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP, 5609 (1UL << 12) | (ltdefs->rx_oip4.lid << 8) | 5610 (ltdefs->rx_oip4.ltype_match << 4) | 5611 ltdefs->rx_oip4.ltype_mask); 5612 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP, 5613 (1UL << 12) | (ltdefs->rx_iip4.lid << 8) | 5614 (ltdefs->rx_iip4.ltype_match << 4) | 5615 ltdefs->rx_iip4.ltype_mask); 5616 5617 /* DSCP field (traffic class) in outer and tunneled IPv6 packets */ 5618 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP, 5619 (1UL << 11) | (ltdefs->rx_oip6.lid << 8) | 5620 (ltdefs->rx_oip6.ltype_match << 4) | 5621 ltdefs->rx_oip6.ltype_mask); 5622 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP, 5623 (1UL << 11) | (ltdefs->rx_iip6.lid << 8) | 5624 (ltdefs->rx_iip6.ltype_match << 4) | 5625 ltdefs->rx_iip6.ltype_mask); 5626 } 5627 5628 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw, 5629 int layer, int prof_idx) 5630 { 5631 struct nix_cn10k_aq_enq_req aq_req; 5632 int rc; 5633 5634 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 5635 5636 aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14); 5637 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; 5638 aq_req.op = NIX_AQ_INSTOP_INIT; 5639 5640 /* Context is all zeros, submit to AQ */ 5641 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 5642 (struct nix_aq_enq_req *)&aq_req, NULL); 5643 if (rc) 5644 dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n", 5645 layer, prof_idx); 5646 return rc; 5647 } 5648 5649 static int nix_setup_ipolicers(struct rvu *rvu, 5650 struct nix_hw *nix_hw, int blkaddr) 5651 { 5652 struct rvu_hwinfo *hw = rvu->hw; 5653 struct nix_ipolicer *ipolicer; 5654 int err, layer, prof_idx; 5655 u64 cfg; 5656 5657 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 5658 if (!(cfg & BIT_ULL(61))) { 5659 hw->cap.ipolicer = false; 5660 return 0; 5661 } 5662 5663 hw->cap.ipolicer = true; 5664 nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS, 5665 sizeof(*ipolicer), GFP_KERNEL); 5666 if (!nix_hw->ipolicer) 5667 return -ENOMEM; 5668 5669 cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST); 5670 5671 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5672 ipolicer = &nix_hw->ipolicer[layer]; 5673 switch (layer) { 5674 case BAND_PROF_LEAF_LAYER: 5675 ipolicer->band_prof.max = cfg & 0XFFFF; 5676 break; 5677 case BAND_PROF_MID_LAYER: 5678 ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF; 5679 break; 5680 case BAND_PROF_TOP_LAYER: 5681 ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF; 5682 break; 5683 } 5684 5685 if (!ipolicer->band_prof.max) 5686 continue; 5687 5688 err = rvu_alloc_bitmap(&ipolicer->band_prof); 5689 if (err) 5690 return err; 5691 5692 ipolicer->pfvf_map = devm_kcalloc(rvu->dev, 5693 ipolicer->band_prof.max, 5694 sizeof(u16), GFP_KERNEL); 5695 if (!ipolicer->pfvf_map) 5696 return -ENOMEM; 5697 5698 ipolicer->match_id = devm_kcalloc(rvu->dev, 5699 ipolicer->band_prof.max, 5700 sizeof(u16), GFP_KERNEL); 5701 if (!ipolicer->match_id) 5702 return -ENOMEM; 5703 5704 for (prof_idx = 0; 5705 prof_idx < ipolicer->band_prof.max; prof_idx++) { 5706 /* Set AF as current owner for INIT ops to succeed */ 5707 ipolicer->pfvf_map[prof_idx] = 0x00; 5708 5709 /* There is no enable bit in the profile context, 5710 * so no context disable. So let's INIT them here 5711 * so that PF/VF later on have to just do WRITE to 5712 * setup policer rates and config. 5713 */ 5714 err = nix_init_policer_context(rvu, nix_hw, 5715 layer, prof_idx); 5716 if (err) 5717 return err; 5718 } 5719 5720 /* Allocate memory for maintaining ref_counts for MID level 5721 * profiles, this will be needed for leaf layer profiles' 5722 * aggregation. 5723 */ 5724 if (layer != BAND_PROF_MID_LAYER) 5725 continue; 5726 5727 ipolicer->ref_count = devm_kcalloc(rvu->dev, 5728 ipolicer->band_prof.max, 5729 sizeof(u16), GFP_KERNEL); 5730 if (!ipolicer->ref_count) 5731 return -ENOMEM; 5732 } 5733 5734 /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */ 5735 rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19); 5736 5737 nix_config_rx_pkt_policer_precolor(rvu, blkaddr); 5738 5739 return 0; 5740 } 5741 5742 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw) 5743 { 5744 struct nix_ipolicer *ipolicer; 5745 int layer; 5746 5747 if (!rvu->hw->cap.ipolicer) 5748 return; 5749 5750 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5751 ipolicer = &nix_hw->ipolicer[layer]; 5752 5753 if (!ipolicer->band_prof.max) 5754 continue; 5755 5756 kfree(ipolicer->band_prof.bmap); 5757 } 5758 } 5759 5760 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, 5761 struct nix_hw *nix_hw, u16 pcifunc) 5762 { 5763 struct nix_ipolicer *ipolicer; 5764 int layer, hi_layer, prof_idx; 5765 5766 /* Bits [15:14] in profile index represent layer */ 5767 layer = (req->qidx >> 14) & 0x03; 5768 prof_idx = req->qidx & 0x3FFF; 5769 5770 ipolicer = &nix_hw->ipolicer[layer]; 5771 if (prof_idx >= ipolicer->band_prof.max) 5772 return -EINVAL; 5773 5774 /* Check if the profile is allocated to the requesting PCIFUNC or not 5775 * with the exception of AF. AF is allowed to read and update contexts. 5776 */ 5777 if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc) 5778 return -EINVAL; 5779 5780 /* If this profile is linked to higher layer profile then check 5781 * if that profile is also allocated to the requesting PCIFUNC 5782 * or not. 5783 */ 5784 if (!req->prof.hl_en) 5785 return 0; 5786 5787 /* Leaf layer profile can link only to mid layer and 5788 * mid layer to top layer. 5789 */ 5790 if (layer == BAND_PROF_LEAF_LAYER) 5791 hi_layer = BAND_PROF_MID_LAYER; 5792 else if (layer == BAND_PROF_MID_LAYER) 5793 hi_layer = BAND_PROF_TOP_LAYER; 5794 else 5795 return -EINVAL; 5796 5797 ipolicer = &nix_hw->ipolicer[hi_layer]; 5798 prof_idx = req->prof.band_prof_id; 5799 if (prof_idx >= ipolicer->band_prof.max || 5800 ipolicer->pfvf_map[prof_idx] != pcifunc) 5801 return -EINVAL; 5802 5803 return 0; 5804 } 5805 5806 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu, 5807 struct nix_bandprof_alloc_req *req, 5808 struct nix_bandprof_alloc_rsp *rsp) 5809 { 5810 int blkaddr, layer, prof, idx, err; 5811 u16 pcifunc = req->hdr.pcifunc; 5812 struct nix_ipolicer *ipolicer; 5813 struct nix_hw *nix_hw; 5814 5815 if (!rvu->hw->cap.ipolicer) 5816 return NIX_AF_ERR_IPOLICER_NOTSUPP; 5817 5818 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 5819 if (err) 5820 return err; 5821 5822 mutex_lock(&rvu->rsrc_lock); 5823 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5824 if (layer == BAND_PROF_INVAL_LAYER) 5825 continue; 5826 if (!req->prof_count[layer]) 5827 continue; 5828 5829 ipolicer = &nix_hw->ipolicer[layer]; 5830 for (idx = 0; idx < req->prof_count[layer]; idx++) { 5831 /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */ 5832 if (idx == MAX_BANDPROF_PER_PFFUNC) 5833 break; 5834 5835 prof = rvu_alloc_rsrc(&ipolicer->band_prof); 5836 if (prof < 0) 5837 break; 5838 rsp->prof_count[layer]++; 5839 rsp->prof_idx[layer][idx] = prof; 5840 ipolicer->pfvf_map[prof] = pcifunc; 5841 } 5842 } 5843 mutex_unlock(&rvu->rsrc_lock); 5844 return 0; 5845 } 5846 5847 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc) 5848 { 5849 int blkaddr, layer, prof_idx, err; 5850 struct nix_ipolicer *ipolicer; 5851 struct nix_hw *nix_hw; 5852 5853 if (!rvu->hw->cap.ipolicer) 5854 return NIX_AF_ERR_IPOLICER_NOTSUPP; 5855 5856 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 5857 if (err) 5858 return err; 5859 5860 mutex_lock(&rvu->rsrc_lock); 5861 /* Free all the profiles allocated to the PCIFUNC */ 5862 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5863 if (layer == BAND_PROF_INVAL_LAYER) 5864 continue; 5865 ipolicer = &nix_hw->ipolicer[layer]; 5866 5867 for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) { 5868 if (ipolicer->pfvf_map[prof_idx] != pcifunc) 5869 continue; 5870 5871 /* Clear ratelimit aggregation, if any */ 5872 if (layer == BAND_PROF_LEAF_LAYER && 5873 ipolicer->match_id[prof_idx]) 5874 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); 5875 5876 ipolicer->pfvf_map[prof_idx] = 0x00; 5877 ipolicer->match_id[prof_idx] = 0; 5878 rvu_free_rsrc(&ipolicer->band_prof, prof_idx); 5879 } 5880 } 5881 mutex_unlock(&rvu->rsrc_lock); 5882 return 0; 5883 } 5884 5885 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu, 5886 struct nix_bandprof_free_req *req, 5887 struct msg_rsp *rsp) 5888 { 5889 int blkaddr, layer, prof_idx, idx, err; 5890 u16 pcifunc = req->hdr.pcifunc; 5891 struct nix_ipolicer *ipolicer; 5892 struct nix_hw *nix_hw; 5893 5894 if (req->free_all) 5895 return nix_free_all_bandprof(rvu, pcifunc); 5896 5897 if (!rvu->hw->cap.ipolicer) 5898 return NIX_AF_ERR_IPOLICER_NOTSUPP; 5899 5900 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 5901 if (err) 5902 return err; 5903 5904 mutex_lock(&rvu->rsrc_lock); 5905 /* Free the requested profile indices */ 5906 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5907 if (layer == BAND_PROF_INVAL_LAYER) 5908 continue; 5909 if (!req->prof_count[layer]) 5910 continue; 5911 5912 ipolicer = &nix_hw->ipolicer[layer]; 5913 for (idx = 0; idx < req->prof_count[layer]; idx++) { 5914 if (idx == MAX_BANDPROF_PER_PFFUNC) 5915 break; 5916 prof_idx = req->prof_idx[layer][idx]; 5917 if (prof_idx >= ipolicer->band_prof.max || 5918 ipolicer->pfvf_map[prof_idx] != pcifunc) 5919 continue; 5920 5921 /* Clear ratelimit aggregation, if any */ 5922 if (layer == BAND_PROF_LEAF_LAYER && 5923 ipolicer->match_id[prof_idx]) 5924 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); 5925 5926 ipolicer->pfvf_map[prof_idx] = 0x00; 5927 ipolicer->match_id[prof_idx] = 0; 5928 rvu_free_rsrc(&ipolicer->band_prof, prof_idx); 5929 } 5930 } 5931 mutex_unlock(&rvu->rsrc_lock); 5932 return 0; 5933 } 5934 5935 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw, 5936 struct nix_cn10k_aq_enq_req *aq_req, 5937 struct nix_cn10k_aq_enq_rsp *aq_rsp, 5938 u16 pcifunc, u8 ctype, u32 qidx) 5939 { 5940 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 5941 aq_req->hdr.pcifunc = pcifunc; 5942 aq_req->ctype = ctype; 5943 aq_req->op = NIX_AQ_INSTOP_READ; 5944 aq_req->qidx = qidx; 5945 5946 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 5947 (struct nix_aq_enq_req *)aq_req, 5948 (struct nix_aq_enq_rsp *)aq_rsp); 5949 } 5950 5951 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu, 5952 struct nix_hw *nix_hw, 5953 struct nix_cn10k_aq_enq_req *aq_req, 5954 struct nix_cn10k_aq_enq_rsp *aq_rsp, 5955 u32 leaf_prof, u16 mid_prof) 5956 { 5957 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 5958 aq_req->hdr.pcifunc = 0x00; 5959 aq_req->ctype = NIX_AQ_CTYPE_BANDPROF; 5960 aq_req->op = NIX_AQ_INSTOP_WRITE; 5961 aq_req->qidx = leaf_prof; 5962 5963 aq_req->prof.band_prof_id = mid_prof; 5964 aq_req->prof_mask.band_prof_id = GENMASK(6, 0); 5965 aq_req->prof.hl_en = 1; 5966 aq_req->prof_mask.hl_en = 1; 5967 5968 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 5969 (struct nix_aq_enq_req *)aq_req, 5970 (struct nix_aq_enq_rsp *)aq_rsp); 5971 } 5972 5973 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc, 5974 u16 rq_idx, u16 match_id) 5975 { 5976 int leaf_prof, mid_prof, leaf_match; 5977 struct nix_cn10k_aq_enq_req aq_req; 5978 struct nix_cn10k_aq_enq_rsp aq_rsp; 5979 struct nix_ipolicer *ipolicer; 5980 struct nix_hw *nix_hw; 5981 int blkaddr, idx, rc; 5982 5983 if (!rvu->hw->cap.ipolicer) 5984 return 0; 5985 5986 rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 5987 if (rc) 5988 return rc; 5989 5990 /* Fetch the RQ's context to see if policing is enabled */ 5991 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc, 5992 NIX_AQ_CTYPE_RQ, rq_idx); 5993 if (rc) { 5994 dev_err(rvu->dev, 5995 "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n", 5996 __func__, rq_idx, pcifunc); 5997 return rc; 5998 } 5999 6000 if (!aq_rsp.rq.policer_ena) 6001 return 0; 6002 6003 /* Get the bandwidth profile ID mapped to this RQ */ 6004 leaf_prof = aq_rsp.rq.band_prof_id; 6005 6006 ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER]; 6007 ipolicer->match_id[leaf_prof] = match_id; 6008 6009 /* Check if any other leaf profile is marked with same match_id */ 6010 for (idx = 0; idx < ipolicer->band_prof.max; idx++) { 6011 if (idx == leaf_prof) 6012 continue; 6013 if (ipolicer->match_id[idx] != match_id) 6014 continue; 6015 6016 leaf_match = idx; 6017 break; 6018 } 6019 6020 if (idx == ipolicer->band_prof.max) 6021 return 0; 6022 6023 /* Fetch the matching profile's context to check if it's already 6024 * mapped to a mid level profile. 6025 */ 6026 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 6027 NIX_AQ_CTYPE_BANDPROF, leaf_match); 6028 if (rc) { 6029 dev_err(rvu->dev, 6030 "%s: Failed to fetch context of leaf profile %d\n", 6031 __func__, leaf_match); 6032 return rc; 6033 } 6034 6035 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; 6036 if (aq_rsp.prof.hl_en) { 6037 /* Get Mid layer prof index and map leaf_prof index 6038 * also such that flows that are being steered 6039 * to different RQs and marked with same match_id 6040 * are rate limited in a aggregate fashion 6041 */ 6042 mid_prof = aq_rsp.prof.band_prof_id; 6043 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 6044 &aq_req, &aq_rsp, 6045 leaf_prof, mid_prof); 6046 if (rc) { 6047 dev_err(rvu->dev, 6048 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 6049 __func__, leaf_prof, mid_prof); 6050 goto exit; 6051 } 6052 6053 mutex_lock(&rvu->rsrc_lock); 6054 ipolicer->ref_count[mid_prof]++; 6055 mutex_unlock(&rvu->rsrc_lock); 6056 goto exit; 6057 } 6058 6059 /* Allocate a mid layer profile and 6060 * map both 'leaf_prof' and 'leaf_match' profiles to it. 6061 */ 6062 mutex_lock(&rvu->rsrc_lock); 6063 mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof); 6064 if (mid_prof < 0) { 6065 dev_err(rvu->dev, 6066 "%s: Unable to allocate mid layer profile\n", __func__); 6067 mutex_unlock(&rvu->rsrc_lock); 6068 goto exit; 6069 } 6070 mutex_unlock(&rvu->rsrc_lock); 6071 ipolicer->pfvf_map[mid_prof] = 0x00; 6072 ipolicer->ref_count[mid_prof] = 0; 6073 6074 /* Initialize mid layer profile same as 'leaf_prof' */ 6075 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 6076 NIX_AQ_CTYPE_BANDPROF, leaf_prof); 6077 if (rc) { 6078 dev_err(rvu->dev, 6079 "%s: Failed to fetch context of leaf profile %d\n", 6080 __func__, leaf_prof); 6081 goto exit; 6082 } 6083 6084 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 6085 aq_req.hdr.pcifunc = 0x00; 6086 aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14); 6087 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; 6088 aq_req.op = NIX_AQ_INSTOP_WRITE; 6089 memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s)); 6090 memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s)); 6091 /* Clear higher layer enable bit in the mid profile, just in case */ 6092 aq_req.prof.hl_en = 0; 6093 aq_req.prof_mask.hl_en = 1; 6094 6095 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 6096 (struct nix_aq_enq_req *)&aq_req, NULL); 6097 if (rc) { 6098 dev_err(rvu->dev, 6099 "%s: Failed to INIT context of mid layer profile %d\n", 6100 __func__, mid_prof); 6101 goto exit; 6102 } 6103 6104 /* Map both leaf profiles to this mid layer profile */ 6105 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 6106 &aq_req, &aq_rsp, 6107 leaf_prof, mid_prof); 6108 if (rc) { 6109 dev_err(rvu->dev, 6110 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 6111 __func__, leaf_prof, mid_prof); 6112 goto exit; 6113 } 6114 6115 mutex_lock(&rvu->rsrc_lock); 6116 ipolicer->ref_count[mid_prof]++; 6117 mutex_unlock(&rvu->rsrc_lock); 6118 6119 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 6120 &aq_req, &aq_rsp, 6121 leaf_match, mid_prof); 6122 if (rc) { 6123 dev_err(rvu->dev, 6124 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 6125 __func__, leaf_match, mid_prof); 6126 ipolicer->ref_count[mid_prof]--; 6127 goto exit; 6128 } 6129 6130 mutex_lock(&rvu->rsrc_lock); 6131 ipolicer->ref_count[mid_prof]++; 6132 mutex_unlock(&rvu->rsrc_lock); 6133 6134 exit: 6135 return rc; 6136 } 6137 6138 /* Called with mutex rsrc_lock */ 6139 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, 6140 u32 leaf_prof) 6141 { 6142 struct nix_cn10k_aq_enq_req aq_req; 6143 struct nix_cn10k_aq_enq_rsp aq_rsp; 6144 struct nix_ipolicer *ipolicer; 6145 u16 mid_prof; 6146 int rc; 6147 6148 mutex_unlock(&rvu->rsrc_lock); 6149 6150 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 6151 NIX_AQ_CTYPE_BANDPROF, leaf_prof); 6152 6153 mutex_lock(&rvu->rsrc_lock); 6154 if (rc) { 6155 dev_err(rvu->dev, 6156 "%s: Failed to fetch context of leaf profile %d\n", 6157 __func__, leaf_prof); 6158 return; 6159 } 6160 6161 if (!aq_rsp.prof.hl_en) 6162 return; 6163 6164 mid_prof = aq_rsp.prof.band_prof_id; 6165 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; 6166 ipolicer->ref_count[mid_prof]--; 6167 /* If ref_count is zero, free mid layer profile */ 6168 if (!ipolicer->ref_count[mid_prof]) { 6169 ipolicer->pfvf_map[mid_prof] = 0x00; 6170 rvu_free_rsrc(&ipolicer->band_prof, mid_prof); 6171 } 6172 } 6173 6174 int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req, 6175 struct nix_bandprof_get_hwinfo_rsp *rsp) 6176 { 6177 struct nix_ipolicer *ipolicer; 6178 int blkaddr, layer, err; 6179 struct nix_hw *nix_hw; 6180 u64 tu; 6181 6182 if (!rvu->hw->cap.ipolicer) 6183 return NIX_AF_ERR_IPOLICER_NOTSUPP; 6184 6185 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); 6186 if (err) 6187 return err; 6188 6189 /* Return number of bandwidth profiles free at each layer */ 6190 mutex_lock(&rvu->rsrc_lock); 6191 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 6192 if (layer == BAND_PROF_INVAL_LAYER) 6193 continue; 6194 6195 ipolicer = &nix_hw->ipolicer[layer]; 6196 rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof); 6197 } 6198 mutex_unlock(&rvu->rsrc_lock); 6199 6200 /* Set the policer timeunit in nanosec */ 6201 tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0); 6202 rsp->policer_timeunit = (tu + 1) * 100; 6203 6204 return 0; 6205 } 6206 6207 static struct nix_mcast_grp_elem *rvu_nix_mcast_find_grp_elem(struct nix_mcast_grp *mcast_grp, 6208 u32 mcast_grp_idx) 6209 { 6210 struct nix_mcast_grp_elem *iter; 6211 bool is_found = false; 6212 6213 list_for_each_entry(iter, &mcast_grp->mcast_grp_head, list) { 6214 if (iter->mcast_grp_idx == mcast_grp_idx) { 6215 is_found = true; 6216 break; 6217 } 6218 } 6219 6220 if (is_found) 6221 return iter; 6222 6223 return NULL; 6224 } 6225 6226 int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, u32 mcast_grp_idx) 6227 { 6228 struct nix_mcast_grp_elem *elem; 6229 struct nix_mcast_grp *mcast_grp; 6230 struct nix_hw *nix_hw; 6231 int blkaddr, ret; 6232 6233 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 6234 nix_hw = get_nix_hw(rvu->hw, blkaddr); 6235 if (!nix_hw) 6236 return NIX_AF_ERR_INVALID_NIXBLK; 6237 6238 mcast_grp = &nix_hw->mcast_grp; 6239 mutex_lock(&mcast_grp->mcast_grp_lock); 6240 elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx); 6241 if (!elem) 6242 ret = NIX_AF_ERR_INVALID_MCAST_GRP; 6243 else 6244 ret = elem->mce_start_index; 6245 6246 mutex_unlock(&mcast_grp->mcast_grp_lock); 6247 return ret; 6248 } 6249 6250 void rvu_nix_mcast_flr_free_entries(struct rvu *rvu, u16 pcifunc) 6251 { 6252 struct nix_mcast_grp_destroy_req dreq = { 0 }; 6253 struct nix_mcast_grp_update_req ureq = { 0 }; 6254 struct nix_mcast_grp_update_rsp ursp = { 0 }; 6255 struct nix_mcast_grp_elem *elem, *tmp; 6256 struct nix_mcast_grp *mcast_grp; 6257 struct nix_hw *nix_hw; 6258 int blkaddr; 6259 6260 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 6261 nix_hw = get_nix_hw(rvu->hw, blkaddr); 6262 if (!nix_hw) 6263 return; 6264 6265 mcast_grp = &nix_hw->mcast_grp; 6266 6267 mutex_lock(&mcast_grp->mcast_grp_lock); 6268 list_for_each_entry_safe(elem, tmp, &mcast_grp->mcast_grp_head, list) { 6269 struct nix_mce_list *mce_list; 6270 struct hlist_node *tmp; 6271 struct mce *mce; 6272 6273 /* If the pcifunc which created the multicast/mirror 6274 * group received an FLR, then delete the entire group. 6275 */ 6276 if (elem->pcifunc == pcifunc) { 6277 /* Delete group */ 6278 dreq.hdr.pcifunc = elem->pcifunc; 6279 dreq.mcast_grp_idx = elem->mcast_grp_idx; 6280 dreq.is_af = 1; 6281 rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL); 6282 continue; 6283 } 6284 6285 /* Iterate the group elements and delete the element which 6286 * received the FLR. 6287 */ 6288 mce_list = &elem->mcast_mce_list; 6289 hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) { 6290 if (mce->pcifunc == pcifunc) { 6291 ureq.hdr.pcifunc = pcifunc; 6292 ureq.num_mce_entry = 1; 6293 ureq.mcast_grp_idx = elem->mcast_grp_idx; 6294 ureq.op = NIX_MCAST_OP_DEL_ENTRY; 6295 ureq.pcifunc[0] = pcifunc; 6296 ureq.is_af = 1; 6297 rvu_mbox_handler_nix_mcast_grp_update(rvu, &ureq, &ursp); 6298 break; 6299 } 6300 } 6301 } 6302 mutex_unlock(&mcast_grp->mcast_grp_lock); 6303 } 6304 6305 int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc, 6306 u32 mcast_grp_idx, u16 mcam_index) 6307 { 6308 struct nix_mcast_grp_elem *elem; 6309 struct nix_mcast_grp *mcast_grp; 6310 struct nix_hw *nix_hw; 6311 int blkaddr, ret = 0; 6312 6313 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 6314 nix_hw = get_nix_hw(rvu->hw, blkaddr); 6315 if (!nix_hw) 6316 return NIX_AF_ERR_INVALID_NIXBLK; 6317 6318 mcast_grp = &nix_hw->mcast_grp; 6319 mutex_lock(&mcast_grp->mcast_grp_lock); 6320 elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx); 6321 if (!elem) 6322 ret = NIX_AF_ERR_INVALID_MCAST_GRP; 6323 else 6324 elem->mcam_index = mcam_index; 6325 6326 mutex_unlock(&mcast_grp->mcast_grp_lock); 6327 return ret; 6328 } 6329 6330 int rvu_mbox_handler_nix_mcast_grp_create(struct rvu *rvu, 6331 struct nix_mcast_grp_create_req *req, 6332 struct nix_mcast_grp_create_rsp *rsp) 6333 { 6334 struct nix_mcast_grp_elem *elem; 6335 struct nix_mcast_grp *mcast_grp; 6336 struct nix_hw *nix_hw; 6337 int blkaddr, err; 6338 6339 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); 6340 if (err) 6341 return err; 6342 6343 mcast_grp = &nix_hw->mcast_grp; 6344 elem = kzalloc(sizeof(*elem), GFP_KERNEL); 6345 if (!elem) 6346 return -ENOMEM; 6347 6348 INIT_HLIST_HEAD(&elem->mcast_mce_list.head); 6349 elem->mcam_index = -1; 6350 elem->mce_start_index = -1; 6351 elem->pcifunc = req->hdr.pcifunc; 6352 elem->dir = req->dir; 6353 elem->mcast_grp_idx = mcast_grp->next_grp_index++; 6354 6355 mutex_lock(&mcast_grp->mcast_grp_lock); 6356 list_add_tail(&elem->list, &mcast_grp->mcast_grp_head); 6357 mcast_grp->count++; 6358 mutex_unlock(&mcast_grp->mcast_grp_lock); 6359 6360 rsp->mcast_grp_idx = elem->mcast_grp_idx; 6361 return 0; 6362 } 6363 6364 int rvu_mbox_handler_nix_mcast_grp_destroy(struct rvu *rvu, 6365 struct nix_mcast_grp_destroy_req *req, 6366 struct msg_rsp *rsp) 6367 { 6368 struct npc_delete_flow_req uninstall_req = { 0 }; 6369 struct npc_delete_flow_rsp uninstall_rsp = { 0 }; 6370 struct nix_mcast_grp_elem *elem; 6371 struct nix_mcast_grp *mcast_grp; 6372 int blkaddr, err, ret = 0; 6373 struct nix_mcast *mcast; 6374 struct nix_hw *nix_hw; 6375 6376 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); 6377 if (err) 6378 return err; 6379 6380 mcast_grp = &nix_hw->mcast_grp; 6381 6382 /* If AF is requesting for the deletion, 6383 * then AF is already taking the lock 6384 */ 6385 if (!req->is_af) 6386 mutex_lock(&mcast_grp->mcast_grp_lock); 6387 6388 elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx); 6389 if (!elem) { 6390 ret = NIX_AF_ERR_INVALID_MCAST_GRP; 6391 goto unlock_grp; 6392 } 6393 6394 /* If no mce entries are associated with the group 6395 * then just remove it from the global list. 6396 */ 6397 if (!elem->mcast_mce_list.count) 6398 goto delete_grp; 6399 6400 /* Delete the associated mcam entry and 6401 * remove all mce entries from the group 6402 */ 6403 mcast = &nix_hw->mcast; 6404 mutex_lock(&mcast->mce_lock); 6405 if (elem->mcam_index != -1) { 6406 uninstall_req.hdr.pcifunc = req->hdr.pcifunc; 6407 uninstall_req.entry = elem->mcam_index; 6408 rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp); 6409 } 6410 6411 nix_free_mce_list(mcast, elem->mcast_mce_list.count, 6412 elem->mce_start_index, elem->dir); 6413 nix_delete_mcast_mce_list(&elem->mcast_mce_list); 6414 mutex_unlock(&mcast->mce_lock); 6415 6416 delete_grp: 6417 list_del(&elem->list); 6418 kfree(elem); 6419 mcast_grp->count--; 6420 6421 unlock_grp: 6422 if (!req->is_af) 6423 mutex_unlock(&mcast_grp->mcast_grp_lock); 6424 6425 return ret; 6426 } 6427 6428 int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu, 6429 struct nix_mcast_grp_update_req *req, 6430 struct nix_mcast_grp_update_rsp *rsp) 6431 { 6432 struct nix_mcast_grp_destroy_req dreq = { 0 }; 6433 struct npc_mcam *mcam = &rvu->hw->mcam; 6434 struct nix_mcast_grp_elem *elem; 6435 struct nix_mcast_grp *mcast_grp; 6436 int blkaddr, err, npc_blkaddr; 6437 u16 prev_count, new_count; 6438 struct nix_mcast *mcast; 6439 struct nix_hw *nix_hw; 6440 int i, ret; 6441 6442 if (!req->num_mce_entry) 6443 return 0; 6444 6445 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); 6446 if (err) 6447 return err; 6448 6449 mcast_grp = &nix_hw->mcast_grp; 6450 6451 /* If AF is requesting for the updation, 6452 * then AF is already taking the lock 6453 */ 6454 if (!req->is_af) 6455 mutex_lock(&mcast_grp->mcast_grp_lock); 6456 6457 elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx); 6458 if (!elem) { 6459 ret = NIX_AF_ERR_INVALID_MCAST_GRP; 6460 goto unlock_grp; 6461 } 6462 6463 /* If any pcifunc matches the group's pcifunc, then we can 6464 * delete the entire group. 6465 */ 6466 if (req->op == NIX_MCAST_OP_DEL_ENTRY) { 6467 for (i = 0; i < req->num_mce_entry; i++) { 6468 if (elem->pcifunc == req->pcifunc[i]) { 6469 /* Delete group */ 6470 dreq.hdr.pcifunc = elem->pcifunc; 6471 dreq.mcast_grp_idx = elem->mcast_grp_idx; 6472 dreq.is_af = 1; 6473 rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL); 6474 ret = 0; 6475 goto unlock_grp; 6476 } 6477 } 6478 } 6479 6480 mcast = &nix_hw->mcast; 6481 mutex_lock(&mcast->mce_lock); 6482 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 6483 if (elem->mcam_index != -1) 6484 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, false); 6485 6486 prev_count = elem->mcast_mce_list.count; 6487 if (req->op == NIX_MCAST_OP_ADD_ENTRY) { 6488 new_count = prev_count + req->num_mce_entry; 6489 if (prev_count) 6490 nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir); 6491 6492 elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir); 6493 6494 /* It is possible not to get contiguous memory */ 6495 if (elem->mce_start_index < 0) { 6496 if (elem->mcam_index != -1) { 6497 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, 6498 elem->mcam_index, true); 6499 ret = NIX_AF_ERR_NON_CONTIG_MCE_LIST; 6500 goto unlock_mce; 6501 } 6502 } 6503 6504 ret = nix_add_mce_list_entry(rvu, nix_hw, elem, req); 6505 if (ret) { 6506 nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir); 6507 if (prev_count) 6508 elem->mce_start_index = nix_alloc_mce_list(mcast, 6509 prev_count, 6510 elem->dir); 6511 6512 if (elem->mcam_index != -1) 6513 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, 6514 elem->mcam_index, true); 6515 6516 goto unlock_mce; 6517 } 6518 } else { 6519 if (!prev_count || prev_count < req->num_mce_entry) { 6520 if (elem->mcam_index != -1) 6521 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, 6522 elem->mcam_index, true); 6523 ret = NIX_AF_ERR_INVALID_MCAST_DEL_REQ; 6524 goto unlock_mce; 6525 } 6526 6527 nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir); 6528 new_count = prev_count - req->num_mce_entry; 6529 elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir); 6530 ret = nix_del_mce_list_entry(rvu, nix_hw, elem, req); 6531 if (ret) { 6532 nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir); 6533 elem->mce_start_index = nix_alloc_mce_list(mcast, prev_count, elem->dir); 6534 if (elem->mcam_index != -1) 6535 npc_enable_mcam_entry(rvu, mcam, 6536 npc_blkaddr, 6537 elem->mcam_index, 6538 true); 6539 6540 goto unlock_mce; 6541 } 6542 } 6543 6544 if (elem->mcam_index == -1) { 6545 rsp->mce_start_index = elem->mce_start_index; 6546 ret = 0; 6547 goto unlock_mce; 6548 } 6549 6550 nix_mcast_update_action(rvu, elem); 6551 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, true); 6552 rsp->mce_start_index = elem->mce_start_index; 6553 ret = 0; 6554 6555 unlock_mce: 6556 mutex_unlock(&mcast->mce_lock); 6557 6558 unlock_grp: 6559 if (!req->is_af) 6560 mutex_unlock(&mcast_grp->mcast_grp_lock); 6561 6562 return ret; 6563 } 6564