1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell. 5 * 6 */ 7 8 #include <linux/bitfield.h> 9 #include <linux/module.h> 10 #include <linux/pci.h> 11 12 #include "rvu_struct.h" 13 #include "rvu_reg.h" 14 #include "rvu.h" 15 #include "npc.h" 16 #include "cgx.h" 17 #include "npc_profile.h" 18 #include "rvu_npc_hash.h" 19 20 #define RSVD_MCAM_ENTRIES_PER_PF 3 /* Broadcast, Promisc and AllMulticast */ 21 #define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */ 22 23 #define NPC_PARSE_RESULT_DMAC_OFFSET 8 24 #define NPC_HW_TSTAMP_OFFSET 8ULL 25 #define NPC_KEX_CHAN_MASK 0xFFFULL 26 #define NPC_KEX_PF_FUNC_MASK 0xFFFFULL 27 28 #define ALIGN_8B_CEIL(__a) (((__a) + 7) & (-8)) 29 30 static const char def_pfl_name[] = "default"; 31 32 static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam, 33 int blkaddr, u16 pcifunc); 34 static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam, 35 u16 pcifunc); 36 37 bool is_npc_intf_tx(u8 intf) 38 { 39 return !!(intf & 0x1); 40 } 41 42 bool is_npc_intf_rx(u8 intf) 43 { 44 return !(intf & 0x1); 45 } 46 47 bool is_npc_interface_valid(struct rvu *rvu, u8 intf) 48 { 49 struct rvu_hwinfo *hw = rvu->hw; 50 51 return intf < hw->npc_intfs; 52 } 53 54 int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena) 55 { 56 /* Due to a HW issue in these silicon versions, parse nibble enable 57 * configuration has to be identical for both Rx and Tx interfaces. 58 */ 59 if (is_rvu_96xx_B0(rvu)) 60 return nibble_ena; 61 return 0; 62 } 63 64 void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf) 65 { 66 int blkaddr; 67 u64 val = 0; 68 69 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 70 if (blkaddr < 0) 71 return; 72 73 /* Config CPI base for the PKIND */ 74 val = pkind | 1ULL << 62; 75 rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_CPI_DEFX(pkind, 0), val); 76 } 77 78 int rvu_npc_get_pkind(struct rvu *rvu, u16 pf) 79 { 80 struct npc_pkind *pkind = &rvu->hw->pkind; 81 u32 map; 82 int i; 83 84 for (i = 0; i < pkind->rsrc.max; i++) { 85 map = pkind->pfchan_map[i]; 86 if (((map >> 16) & 0x3F) == pf) 87 return i; 88 } 89 return -1; 90 } 91 92 #define NPC_AF_ACTION0_PTR_ADVANCE GENMASK_ULL(27, 20) 93 94 int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool enable) 95 { 96 int pkind, blkaddr; 97 u64 val; 98 99 pkind = rvu_npc_get_pkind(rvu, pf); 100 if (pkind < 0) { 101 dev_err(rvu->dev, "%s: pkind not mapped\n", __func__); 102 return -EINVAL; 103 } 104 105 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc); 106 if (blkaddr < 0) { 107 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 108 return -EINVAL; 109 } 110 111 val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind)); 112 val &= ~NPC_AF_ACTION0_PTR_ADVANCE; 113 /* If timestamp is enabled then configure NPC to shift 8 bytes */ 114 if (enable) 115 val |= FIELD_PREP(NPC_AF_ACTION0_PTR_ADVANCE, 116 NPC_HW_TSTAMP_OFFSET); 117 rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val); 118 119 return 0; 120 } 121 122 static int npc_get_ucast_mcam_index(struct npc_mcam *mcam, u16 pcifunc, 123 int nixlf) 124 { 125 struct rvu_hwinfo *hw = container_of(mcam, struct rvu_hwinfo, mcam); 126 struct rvu *rvu = hw->rvu; 127 int blkaddr = 0, max = 0; 128 struct rvu_block *block; 129 struct rvu_pfvf *pfvf; 130 131 pfvf = rvu_get_pfvf(rvu, pcifunc); 132 /* Given a PF/VF and NIX LF number calculate the unicast mcam 133 * entry index based on the NIX block assigned to the PF/VF. 134 */ 135 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 136 while (blkaddr) { 137 if (pfvf->nix_blkaddr == blkaddr) 138 break; 139 block = &rvu->hw->block[blkaddr]; 140 max += block->lf.max; 141 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 142 } 143 144 return mcam->nixlf_offset + (max + nixlf) * RSVD_MCAM_ENTRIES_PER_NIXLF; 145 } 146 147 int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, 148 u16 pcifunc, int nixlf, int type) 149 { 150 struct rvu_hwinfo *hw = container_of(mcam, struct rvu_hwinfo, mcam); 151 struct rvu *rvu = hw->rvu; 152 int pf = rvu_get_pf(rvu->pdev, pcifunc); 153 int index; 154 155 /* Check if this is for a PF */ 156 if (pf && !(pcifunc & RVU_PFVF_FUNC_MASK)) { 157 /* Reserved entries exclude PF0 */ 158 pf--; 159 index = mcam->pf_offset + (pf * RSVD_MCAM_ENTRIES_PER_PF); 160 /* Broadcast address matching entry should be first so 161 * that the packet can be replicated to all VFs. 162 */ 163 if (type == NIXLF_BCAST_ENTRY) 164 return index; 165 else if (type == NIXLF_ALLMULTI_ENTRY) 166 return index + 1; 167 else if (type == NIXLF_PROMISC_ENTRY) 168 return index + 2; 169 } 170 171 return npc_get_ucast_mcam_index(mcam, pcifunc, nixlf); 172 } 173 174 int npc_get_bank(struct npc_mcam *mcam, int index) 175 { 176 int bank = index / mcam->banksize; 177 178 /* 0,1 & 2,3 banks are combined for this keysize */ 179 if (mcam->keysize == NPC_MCAM_KEY_X2) 180 return bank ? 2 : 0; 181 182 return bank; 183 } 184 185 bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, 186 int blkaddr, int index) 187 { 188 int bank = npc_get_bank(mcam, index); 189 u64 cfg; 190 191 index &= (mcam->banksize - 1); 192 cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(index, bank)); 193 return (cfg & 1); 194 } 195 196 void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 197 int blkaddr, int index, bool enable) 198 { 199 int bank = npc_get_bank(mcam, index); 200 int actbank = bank; 201 202 index &= (mcam->banksize - 1); 203 for (; bank < (actbank + mcam->banks_per_entry); bank++) { 204 rvu_write64(rvu, blkaddr, 205 NPC_AF_MCAMEX_BANKX_CFG(index, bank), 206 enable ? 1 : 0); 207 } 208 } 209 210 static void npc_clear_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 211 int blkaddr, int index) 212 { 213 int bank = npc_get_bank(mcam, index); 214 int actbank = bank; 215 216 index &= (mcam->banksize - 1); 217 for (; bank < (actbank + mcam->banks_per_entry); bank++) { 218 rvu_write64(rvu, blkaddr, 219 NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), 0); 220 rvu_write64(rvu, blkaddr, 221 NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), 0); 222 223 rvu_write64(rvu, blkaddr, 224 NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), 0); 225 rvu_write64(rvu, blkaddr, 226 NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), 0); 227 228 rvu_write64(rvu, blkaddr, 229 NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), 0); 230 rvu_write64(rvu, blkaddr, 231 NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), 0); 232 } 233 } 234 235 static void npc_get_keyword(struct mcam_entry *entry, int idx, 236 u64 *cam0, u64 *cam1) 237 { 238 u64 kw_mask = 0x00; 239 240 #define CAM_MASK(n) (BIT_ULL(n) - 1) 241 242 /* 0, 2, 4, 6 indices refer to BANKX_CAMX_W0 and 243 * 1, 3, 5, 7 indices refer to BANKX_CAMX_W1. 244 * 245 * Also, only 48 bits of BANKX_CAMX_W1 are valid. 246 */ 247 switch (idx) { 248 case 0: 249 /* BANK(X)_CAM_W0<63:0> = MCAM_KEY[KW0]<63:0> */ 250 *cam1 = entry->kw[0]; 251 kw_mask = entry->kw_mask[0]; 252 break; 253 case 1: 254 /* BANK(X)_CAM_W1<47:0> = MCAM_KEY[KW1]<47:0> */ 255 *cam1 = entry->kw[1] & CAM_MASK(48); 256 kw_mask = entry->kw_mask[1] & CAM_MASK(48); 257 break; 258 case 2: 259 /* BANK(X + 1)_CAM_W0<15:0> = MCAM_KEY[KW1]<63:48> 260 * BANK(X + 1)_CAM_W0<63:16> = MCAM_KEY[KW2]<47:0> 261 */ 262 *cam1 = (entry->kw[1] >> 48) & CAM_MASK(16); 263 *cam1 |= ((entry->kw[2] & CAM_MASK(48)) << 16); 264 kw_mask = (entry->kw_mask[1] >> 48) & CAM_MASK(16); 265 kw_mask |= ((entry->kw_mask[2] & CAM_MASK(48)) << 16); 266 break; 267 case 3: 268 /* BANK(X + 1)_CAM_W1<15:0> = MCAM_KEY[KW2]<63:48> 269 * BANK(X + 1)_CAM_W1<47:16> = MCAM_KEY[KW3]<31:0> 270 */ 271 *cam1 = (entry->kw[2] >> 48) & CAM_MASK(16); 272 *cam1 |= ((entry->kw[3] & CAM_MASK(32)) << 16); 273 kw_mask = (entry->kw_mask[2] >> 48) & CAM_MASK(16); 274 kw_mask |= ((entry->kw_mask[3] & CAM_MASK(32)) << 16); 275 break; 276 case 4: 277 /* BANK(X + 2)_CAM_W0<31:0> = MCAM_KEY[KW3]<63:32> 278 * BANK(X + 2)_CAM_W0<63:32> = MCAM_KEY[KW4]<31:0> 279 */ 280 *cam1 = (entry->kw[3] >> 32) & CAM_MASK(32); 281 *cam1 |= ((entry->kw[4] & CAM_MASK(32)) << 32); 282 kw_mask = (entry->kw_mask[3] >> 32) & CAM_MASK(32); 283 kw_mask |= ((entry->kw_mask[4] & CAM_MASK(32)) << 32); 284 break; 285 case 5: 286 /* BANK(X + 2)_CAM_W1<31:0> = MCAM_KEY[KW4]<63:32> 287 * BANK(X + 2)_CAM_W1<47:32> = MCAM_KEY[KW5]<15:0> 288 */ 289 *cam1 = (entry->kw[4] >> 32) & CAM_MASK(32); 290 *cam1 |= ((entry->kw[5] & CAM_MASK(16)) << 32); 291 kw_mask = (entry->kw_mask[4] >> 32) & CAM_MASK(32); 292 kw_mask |= ((entry->kw_mask[5] & CAM_MASK(16)) << 32); 293 break; 294 case 6: 295 /* BANK(X + 3)_CAM_W0<47:0> = MCAM_KEY[KW5]<63:16> 296 * BANK(X + 3)_CAM_W0<63:48> = MCAM_KEY[KW6]<15:0> 297 */ 298 *cam1 = (entry->kw[5] >> 16) & CAM_MASK(48); 299 *cam1 |= ((entry->kw[6] & CAM_MASK(16)) << 48); 300 kw_mask = (entry->kw_mask[5] >> 16) & CAM_MASK(48); 301 kw_mask |= ((entry->kw_mask[6] & CAM_MASK(16)) << 48); 302 break; 303 case 7: 304 /* BANK(X + 3)_CAM_W1<47:0> = MCAM_KEY[KW6]<63:16> */ 305 *cam1 = (entry->kw[6] >> 16) & CAM_MASK(48); 306 kw_mask = (entry->kw_mask[6] >> 16) & CAM_MASK(48); 307 break; 308 } 309 310 *cam1 &= kw_mask; 311 *cam0 = ~*cam1 & kw_mask; 312 } 313 314 static void npc_fill_entryword(struct mcam_entry *entry, int idx, 315 u64 cam0, u64 cam1) 316 { 317 /* Similar to npc_get_keyword, but fills mcam_entry structure from 318 * CAM registers. 319 */ 320 switch (idx) { 321 case 0: 322 entry->kw[0] = cam1; 323 entry->kw_mask[0] = cam1 ^ cam0; 324 break; 325 case 1: 326 entry->kw[1] = cam1; 327 entry->kw_mask[1] = cam1 ^ cam0; 328 break; 329 case 2: 330 entry->kw[1] |= (cam1 & CAM_MASK(16)) << 48; 331 entry->kw[2] = (cam1 >> 16) & CAM_MASK(48); 332 entry->kw_mask[1] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48; 333 entry->kw_mask[2] = ((cam1 ^ cam0) >> 16) & CAM_MASK(48); 334 break; 335 case 3: 336 entry->kw[2] |= (cam1 & CAM_MASK(16)) << 48; 337 entry->kw[3] = (cam1 >> 16) & CAM_MASK(32); 338 entry->kw_mask[2] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48; 339 entry->kw_mask[3] = ((cam1 ^ cam0) >> 16) & CAM_MASK(32); 340 break; 341 case 4: 342 entry->kw[3] |= (cam1 & CAM_MASK(32)) << 32; 343 entry->kw[4] = (cam1 >> 32) & CAM_MASK(32); 344 entry->kw_mask[3] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32; 345 entry->kw_mask[4] = ((cam1 ^ cam0) >> 32) & CAM_MASK(32); 346 break; 347 case 5: 348 entry->kw[4] |= (cam1 & CAM_MASK(32)) << 32; 349 entry->kw[5] = (cam1 >> 32) & CAM_MASK(16); 350 entry->kw_mask[4] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32; 351 entry->kw_mask[5] = ((cam1 ^ cam0) >> 32) & CAM_MASK(16); 352 break; 353 case 6: 354 entry->kw[5] |= (cam1 & CAM_MASK(48)) << 16; 355 entry->kw[6] = (cam1 >> 48) & CAM_MASK(16); 356 entry->kw_mask[5] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16; 357 entry->kw_mask[6] = ((cam1 ^ cam0) >> 48) & CAM_MASK(16); 358 break; 359 case 7: 360 entry->kw[6] |= (cam1 & CAM_MASK(48)) << 16; 361 entry->kw_mask[6] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16; 362 break; 363 } 364 } 365 366 static u64 npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam, 367 int blkaddr, u16 pf_func) 368 { 369 int bank, nixlf, index; 370 371 /* get ucast entry rule entry index */ 372 if (nix_get_nixlf(rvu, pf_func, &nixlf, NULL)) { 373 dev_err(rvu->dev, "%s: nixlf not attached to pcifunc:0x%x\n", 374 __func__, pf_func); 375 /* Action 0 is drop */ 376 return 0; 377 } 378 379 index = npc_get_nixlf_mcam_index(mcam, pf_func, nixlf, 380 NIXLF_UCAST_ENTRY); 381 bank = npc_get_bank(mcam, index); 382 index &= (mcam->banksize - 1); 383 384 return rvu_read64(rvu, blkaddr, 385 NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); 386 } 387 388 static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam, 389 int blkaddr, int index, struct mcam_entry *entry, 390 bool *enable) 391 { 392 struct rvu_npc_mcam_rule *rule; 393 u16 owner, target_func; 394 struct rvu_pfvf *pfvf; 395 u64 rx_action; 396 397 owner = mcam->entry2pfvf_map[index]; 398 target_func = (entry->action >> 4) & 0xffff; 399 /* do nothing when target is LBK/PF or owner is not PF */ 400 if (is_pffunc_af(owner) || is_lbk_vf(rvu, target_func) || 401 (owner & RVU_PFVF_FUNC_MASK) || 402 !(target_func & RVU_PFVF_FUNC_MASK)) 403 return; 404 405 /* save entry2target_pffunc */ 406 pfvf = rvu_get_pfvf(rvu, target_func); 407 mcam->entry2target_pffunc[index] = target_func; 408 409 /* don't enable rule when nixlf not attached or initialized */ 410 if (!(is_nixlf_attached(rvu, target_func) && 411 test_bit(NIXLF_INITIALIZED, &pfvf->flags))) 412 *enable = false; 413 414 /* fix up not needed for the rules added by user(ntuple filters) */ 415 list_for_each_entry(rule, &mcam->mcam_rules, list) { 416 if (rule->entry == index) 417 return; 418 } 419 420 /* AF modifies given action iff PF/VF has requested for it */ 421 if ((entry->action & 0xFULL) != NIX_RX_ACTION_DEFAULT) 422 return; 423 424 /* copy VF default entry action to the VF mcam entry */ 425 rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr, 426 target_func); 427 if (rx_action) 428 entry->action = rx_action; 429 } 430 431 static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 432 int blkaddr, int index, u8 intf, 433 struct mcam_entry *entry, bool enable) 434 { 435 int bank = npc_get_bank(mcam, index); 436 int kw = 0, actbank, actindex; 437 u8 tx_intf_mask = ~intf & 0x3; 438 u8 tx_intf = intf; 439 u64 cam0, cam1; 440 441 actbank = bank; /* Save bank id, to set action later on */ 442 actindex = index; 443 index &= (mcam->banksize - 1); 444 445 /* Disable before mcam entry update */ 446 npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false); 447 448 /* Clear mcam entry to avoid writes being suppressed by NPC */ 449 npc_clear_mcam_entry(rvu, mcam, blkaddr, actindex); 450 451 /* CAM1 takes the comparison value and 452 * CAM0 specifies match for a bit in key being '0' or '1' or 'dontcare'. 453 * CAM1<n> = 0 & CAM0<n> = 1 => match if key<n> = 0 454 * CAM1<n> = 1 & CAM0<n> = 0 => match if key<n> = 1 455 * CAM1<n> = 0 & CAM0<n> = 0 => always match i.e dontcare. 456 */ 457 for (; bank < (actbank + mcam->banks_per_entry); bank++, kw = kw + 2) { 458 /* Interface should be set in all banks */ 459 if (is_npc_intf_tx(intf)) { 460 /* Last bit must be set and rest don't care 461 * for TX interfaces 462 */ 463 tx_intf_mask = 0x1; 464 tx_intf = intf & tx_intf_mask; 465 tx_intf_mask = ~tx_intf & tx_intf_mask; 466 } 467 468 rvu_write64(rvu, blkaddr, 469 NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), 470 tx_intf); 471 rvu_write64(rvu, blkaddr, 472 NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), 473 tx_intf_mask); 474 475 /* Set the match key */ 476 npc_get_keyword(entry, kw, &cam0, &cam1); 477 rvu_write64(rvu, blkaddr, 478 NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), cam1); 479 rvu_write64(rvu, blkaddr, 480 NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), cam0); 481 482 npc_get_keyword(entry, kw + 1, &cam0, &cam1); 483 rvu_write64(rvu, blkaddr, 484 NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), cam1); 485 rvu_write64(rvu, blkaddr, 486 NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), cam0); 487 } 488 489 /* PF installing VF rule */ 490 if (is_npc_intf_rx(intf) && actindex < mcam->bmap_entries) 491 npc_fixup_vf_rule(rvu, mcam, blkaddr, actindex, entry, &enable); 492 493 /* Set 'action' */ 494 rvu_write64(rvu, blkaddr, 495 NPC_AF_MCAMEX_BANKX_ACTION(index, actbank), entry->action); 496 497 /* Set TAG 'action' */ 498 rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_TAG_ACT(index, actbank), 499 entry->vtag_action); 500 501 /* Enable the entry */ 502 if (enable) 503 npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true); 504 } 505 506 void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 507 int blkaddr, u16 src, 508 struct mcam_entry *entry, u8 *intf, u8 *ena) 509 { 510 int sbank = npc_get_bank(mcam, src); 511 int bank, kw = 0; 512 u64 cam0, cam1; 513 514 src &= (mcam->banksize - 1); 515 bank = sbank; 516 517 for (; bank < (sbank + mcam->banks_per_entry); bank++, kw = kw + 2) { 518 cam1 = rvu_read64(rvu, blkaddr, 519 NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 1)); 520 cam0 = rvu_read64(rvu, blkaddr, 521 NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 0)); 522 npc_fill_entryword(entry, kw, cam0, cam1); 523 524 cam1 = rvu_read64(rvu, blkaddr, 525 NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 1)); 526 cam0 = rvu_read64(rvu, blkaddr, 527 NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 0)); 528 npc_fill_entryword(entry, kw + 1, cam0, cam1); 529 } 530 531 entry->action = rvu_read64(rvu, blkaddr, 532 NPC_AF_MCAMEX_BANKX_ACTION(src, sbank)); 533 entry->vtag_action = 534 rvu_read64(rvu, blkaddr, 535 NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank)); 536 *intf = rvu_read64(rvu, blkaddr, 537 NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank, 1)) & 3; 538 *ena = rvu_read64(rvu, blkaddr, 539 NPC_AF_MCAMEX_BANKX_CFG(src, sbank)) & 1; 540 } 541 542 static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 543 int blkaddr, u16 src, u16 dest) 544 { 545 int dbank = npc_get_bank(mcam, dest); 546 int sbank = npc_get_bank(mcam, src); 547 u64 cfg, sreg, dreg; 548 int bank, i; 549 550 src &= (mcam->banksize - 1); 551 dest &= (mcam->banksize - 1); 552 553 /* Copy INTF's, W0's, W1's CAM0 and CAM1 configuration */ 554 for (bank = 0; bank < mcam->banks_per_entry; bank++) { 555 sreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank + bank, 0); 556 dreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(dest, dbank + bank, 0); 557 for (i = 0; i < 6; i++) { 558 cfg = rvu_read64(rvu, blkaddr, sreg + (i * 8)); 559 rvu_write64(rvu, blkaddr, dreg + (i * 8), cfg); 560 } 561 } 562 563 /* Copy action */ 564 cfg = rvu_read64(rvu, blkaddr, 565 NPC_AF_MCAMEX_BANKX_ACTION(src, sbank)); 566 rvu_write64(rvu, blkaddr, 567 NPC_AF_MCAMEX_BANKX_ACTION(dest, dbank), cfg); 568 569 /* Copy TAG action */ 570 cfg = rvu_read64(rvu, blkaddr, 571 NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank)); 572 rvu_write64(rvu, blkaddr, 573 NPC_AF_MCAMEX_BANKX_TAG_ACT(dest, dbank), cfg); 574 575 /* Enable or disable */ 576 cfg = rvu_read64(rvu, blkaddr, 577 NPC_AF_MCAMEX_BANKX_CFG(src, sbank)); 578 rvu_write64(rvu, blkaddr, 579 NPC_AF_MCAMEX_BANKX_CFG(dest, dbank), cfg); 580 } 581 582 u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam, 583 int blkaddr, int index) 584 { 585 int bank = npc_get_bank(mcam, index); 586 587 index &= (mcam->banksize - 1); 588 return rvu_read64(rvu, blkaddr, 589 NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); 590 } 591 592 void npc_set_mcam_action(struct rvu *rvu, struct npc_mcam *mcam, 593 int blkaddr, int index, u64 cfg) 594 { 595 int bank = npc_get_bank(mcam, index); 596 597 index &= (mcam->banksize - 1); 598 return rvu_write64(rvu, blkaddr, 599 NPC_AF_MCAMEX_BANKX_ACTION(index, bank), cfg); 600 } 601 602 void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, 603 int nixlf, u64 chan, u8 *mac_addr) 604 { 605 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 606 struct npc_install_flow_req req = { 0 }; 607 struct npc_install_flow_rsp rsp = { 0 }; 608 struct npc_mcam *mcam = &rvu->hw->mcam; 609 struct nix_rx_action action = { 0 }; 610 int blkaddr, index; 611 612 /* AF's and SDP VFs work in promiscuous mode */ 613 if (is_lbk_vf(rvu, pcifunc) || is_sdp_vf(rvu, pcifunc)) 614 return; 615 616 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 617 if (blkaddr < 0) 618 return; 619 620 /* Ucast rule should not be installed if DMAC 621 * extraction is not supported by the profile. 622 */ 623 if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf)) 624 return; 625 626 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 627 nixlf, NIXLF_UCAST_ENTRY); 628 629 /* Don't change the action if entry is already enabled 630 * Otherwise RSS action may get overwritten. 631 */ 632 if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) { 633 *(u64 *)&action = npc_get_mcam_action(rvu, mcam, 634 blkaddr, index); 635 } else { 636 action.op = NIX_RX_ACTIONOP_UCAST; 637 action.pf_func = pcifunc; 638 } 639 640 req.default_rule = 1; 641 ether_addr_copy(req.packet.dmac, mac_addr); 642 eth_broadcast_addr((u8 *)&req.mask.dmac); 643 req.features = BIT_ULL(NPC_DMAC); 644 req.channel = chan; 645 req.chan_mask = 0xFFFU; 646 req.intf = pfvf->nix_rx_intf; 647 req.op = action.op; 648 req.hdr.pcifunc = 0; /* AF is requester */ 649 req.vf = action.pf_func; 650 req.index = action.index; 651 req.match_id = action.match_id; 652 req.flow_key_alg = action.flow_key_alg; 653 654 rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); 655 } 656 657 void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, 658 int nixlf, u64 chan, u8 chan_cnt) 659 { 660 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 661 struct npc_install_flow_req req = { 0 }; 662 struct npc_install_flow_rsp rsp = { 0 }; 663 struct npc_mcam *mcam = &rvu->hw->mcam; 664 struct rvu_hwinfo *hw = rvu->hw; 665 int blkaddr, ucast_idx, index; 666 struct nix_rx_action action = { 0 }; 667 u64 relaxed_mask; 668 u8 flow_key_alg; 669 670 if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc)) 671 return; 672 673 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 674 if (blkaddr < 0) 675 return; 676 677 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 678 nixlf, NIXLF_PROMISC_ENTRY); 679 680 if (is_cgx_vf(rvu, pcifunc)) 681 index = npc_get_nixlf_mcam_index(mcam, 682 pcifunc & ~RVU_PFVF_FUNC_MASK, 683 nixlf, NIXLF_PROMISC_ENTRY); 684 685 /* If the corresponding PF's ucast action is RSS, 686 * use the same action for promisc also 687 */ 688 ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc, 689 nixlf, NIXLF_UCAST_ENTRY); 690 if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx)) 691 *(u64 *)&action = npc_get_mcam_action(rvu, mcam, 692 blkaddr, ucast_idx); 693 694 if (action.op != NIX_RX_ACTIONOP_RSS) { 695 *(u64 *)&action = 0; 696 action.op = NIX_RX_ACTIONOP_UCAST; 697 } 698 699 flow_key_alg = action.flow_key_alg; 700 701 /* RX_ACTION set to MCAST for CGX PF's */ 702 if (hw->cap.nix_rx_multicast && pfvf->use_mce_list && 703 is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) { 704 *(u64 *)&action = 0; 705 action.op = NIX_RX_ACTIONOP_MCAST; 706 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); 707 action.index = pfvf->promisc_mce_idx; 708 } 709 710 /* For cn10k the upper two bits of the channel number are 711 * cpt channel number. with masking out these bits in the 712 * mcam entry, same entry used for NIX will allow packets 713 * received from cpt for parsing. 714 */ 715 if (!is_rvu_otx2(rvu)) { 716 req.chan_mask = NIX_CHAN_CPT_X2P_MASK; 717 } else { 718 req.chan_mask = 0xFFFU; 719 } 720 721 if (chan_cnt > 1) { 722 if (!is_power_of_2(chan_cnt)) { 723 dev_err(rvu->dev, 724 "%s: channel count more than 1, must be power of 2\n", __func__); 725 return; 726 } 727 relaxed_mask = GENMASK_ULL(BITS_PER_LONG_LONG - 1, 728 ilog2(chan_cnt)); 729 req.chan_mask &= relaxed_mask; 730 } 731 732 req.channel = chan; 733 req.intf = pfvf->nix_rx_intf; 734 req.entry = index; 735 req.op = action.op; 736 req.hdr.pcifunc = 0; /* AF is requester */ 737 req.vf = pcifunc; 738 req.index = action.index; 739 req.match_id = action.match_id; 740 req.flow_key_alg = flow_key_alg; 741 742 rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); 743 } 744 745 void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, 746 int nixlf, bool enable) 747 { 748 struct npc_mcam *mcam = &rvu->hw->mcam; 749 int blkaddr, index; 750 751 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 752 if (blkaddr < 0) 753 return; 754 755 /* Get 'pcifunc' of PF device */ 756 pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; 757 758 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 759 nixlf, NIXLF_PROMISC_ENTRY); 760 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 761 } 762 763 void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, 764 int nixlf, u64 chan) 765 { 766 struct rvu_pfvf *pfvf; 767 struct npc_install_flow_req req = { 0 }; 768 struct npc_install_flow_rsp rsp = { 0 }; 769 struct npc_mcam *mcam = &rvu->hw->mcam; 770 struct rvu_hwinfo *hw = rvu->hw; 771 int blkaddr, index; 772 773 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 774 if (blkaddr < 0) 775 return; 776 777 /* Skip LBK VFs */ 778 if (is_lbk_vf(rvu, pcifunc)) 779 return; 780 781 /* If pkt replication is not supported, 782 * then only PF is allowed to add a bcast match entry. 783 */ 784 if (!hw->cap.nix_rx_multicast && is_vf(pcifunc)) 785 return; 786 787 /* Get 'pcifunc' of PF device */ 788 pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; 789 pfvf = rvu_get_pfvf(rvu, pcifunc); 790 791 /* Bcast rule should not be installed if both DMAC 792 * and LXMB extraction is not supported by the profile. 793 */ 794 if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf) && 795 !npc_is_feature_supported(rvu, BIT_ULL(NPC_LXMB), pfvf->nix_rx_intf)) 796 return; 797 798 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 799 nixlf, NIXLF_BCAST_ENTRY); 800 801 if (!hw->cap.nix_rx_multicast) { 802 /* Early silicon doesn't support pkt replication, 803 * so install entry with UCAST action, so that PF 804 * receives all broadcast packets. 805 */ 806 req.op = NIX_RX_ACTIONOP_UCAST; 807 } else { 808 req.op = NIX_RX_ACTIONOP_MCAST; 809 req.index = pfvf->bcast_mce_idx; 810 } 811 812 eth_broadcast_addr((u8 *)&req.packet.dmac); 813 eth_broadcast_addr((u8 *)&req.mask.dmac); 814 req.features = BIT_ULL(NPC_DMAC); 815 req.channel = chan; 816 req.chan_mask = 0xFFFU; 817 req.intf = pfvf->nix_rx_intf; 818 req.entry = index; 819 req.hdr.pcifunc = 0; /* AF is requester */ 820 req.vf = pcifunc; 821 822 rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); 823 } 824 825 void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, 826 u64 chan) 827 { 828 struct npc_install_flow_req req = { 0 }; 829 struct npc_install_flow_rsp rsp = { 0 }; 830 struct npc_mcam *mcam = &rvu->hw->mcam; 831 struct rvu_hwinfo *hw = rvu->hw; 832 int blkaddr, ucast_idx, index; 833 u8 mac_addr[ETH_ALEN] = { 0 }; 834 struct nix_rx_action action = { 0 }; 835 struct rvu_pfvf *pfvf; 836 u8 flow_key_alg; 837 u16 vf_func; 838 839 /* Only CGX PF/VF can add allmulticast entry */ 840 if (is_lbk_vf(rvu, pcifunc) && is_sdp_vf(rvu, pcifunc)) 841 return; 842 843 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 844 if (blkaddr < 0) 845 return; 846 847 /* Get 'pcifunc' of PF device */ 848 vf_func = pcifunc & RVU_PFVF_FUNC_MASK; 849 pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; 850 pfvf = rvu_get_pfvf(rvu, pcifunc); 851 852 /* Mcast rule should not be installed if both DMAC 853 * and LXMB extraction is not supported by the profile. 854 */ 855 if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf) && 856 !npc_is_feature_supported(rvu, BIT_ULL(NPC_LXMB), pfvf->nix_rx_intf)) 857 return; 858 859 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 860 nixlf, NIXLF_ALLMULTI_ENTRY); 861 862 /* If the corresponding PF's ucast action is RSS, 863 * use the same action for multicast entry also 864 */ 865 ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc, 866 nixlf, NIXLF_UCAST_ENTRY); 867 if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx)) 868 *(u64 *)&action = npc_get_mcam_action(rvu, mcam, 869 blkaddr, ucast_idx); 870 871 flow_key_alg = action.flow_key_alg; 872 if (action.op != NIX_RX_ACTIONOP_RSS) { 873 *(u64 *)&action = 0; 874 action.op = NIX_RX_ACTIONOP_UCAST; 875 action.pf_func = pcifunc; 876 } 877 878 /* RX_ACTION set to MCAST for CGX PF's */ 879 if (hw->cap.nix_rx_multicast && pfvf->use_mce_list) { 880 *(u64 *)&action = 0; 881 action.op = NIX_RX_ACTIONOP_MCAST; 882 action.index = pfvf->mcast_mce_idx; 883 } 884 885 mac_addr[0] = 0x01; /* LSB bit of 1st byte in DMAC */ 886 ether_addr_copy(req.packet.dmac, mac_addr); 887 ether_addr_copy(req.mask.dmac, mac_addr); 888 req.features = BIT_ULL(NPC_DMAC); 889 890 /* For cn10k the upper two bits of the channel number are 891 * cpt channel number. with masking out these bits in the 892 * mcam entry, same entry used for NIX will allow packets 893 * received from cpt for parsing. 894 */ 895 if (!is_rvu_otx2(rvu)) 896 req.chan_mask = NIX_CHAN_CPT_X2P_MASK; 897 else 898 req.chan_mask = 0xFFFU; 899 900 req.channel = chan; 901 req.intf = pfvf->nix_rx_intf; 902 req.entry = index; 903 req.op = action.op; 904 req.hdr.pcifunc = 0; /* AF is requester */ 905 req.vf = pcifunc | vf_func; 906 req.index = action.index; 907 req.match_id = action.match_id; 908 req.flow_key_alg = flow_key_alg; 909 910 rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); 911 } 912 913 void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, 914 bool enable) 915 { 916 struct npc_mcam *mcam = &rvu->hw->mcam; 917 int blkaddr, index; 918 919 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 920 if (blkaddr < 0) 921 return; 922 923 /* Get 'pcifunc' of PF device */ 924 pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; 925 926 index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, 927 NIXLF_ALLMULTI_ENTRY); 928 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 929 } 930 931 static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam, 932 int blkaddr, u16 pcifunc, u64 rx_action) 933 { 934 int actindex, index, bank, entry; 935 struct rvu_npc_mcam_rule *rule; 936 bool enable, update; 937 938 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) 939 return; 940 941 mutex_lock(&mcam->lock); 942 for (index = 0; index < mcam->bmap_entries; index++) { 943 if (mcam->entry2target_pffunc[index] == pcifunc) { 944 update = true; 945 /* update not needed for the rules added via ntuple filters */ 946 list_for_each_entry(rule, &mcam->mcam_rules, list) { 947 if (rule->entry == index) 948 update = false; 949 } 950 if (!update) 951 continue; 952 bank = npc_get_bank(mcam, index); 953 actindex = index; 954 entry = index & (mcam->banksize - 1); 955 956 /* read vf flow entry enable status */ 957 enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, 958 actindex); 959 /* disable before mcam entry update */ 960 npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, 961 false); 962 /* update 'action' */ 963 rvu_write64(rvu, blkaddr, 964 NPC_AF_MCAMEX_BANKX_ACTION(entry, bank), 965 rx_action); 966 if (enable) 967 npc_enable_mcam_entry(rvu, mcam, blkaddr, 968 actindex, true); 969 } 970 } 971 mutex_unlock(&mcam->lock); 972 } 973 974 static void npc_update_rx_action_with_alg_idx(struct rvu *rvu, struct nix_rx_action action, 975 struct rvu_pfvf *pfvf, int mcam_index, int blkaddr, 976 int alg_idx) 977 978 { 979 struct npc_mcam *mcam = &rvu->hw->mcam; 980 struct rvu_hwinfo *hw = rvu->hw; 981 int bank, op_rss; 982 983 if (!is_mcam_entry_enabled(rvu, mcam, blkaddr, mcam_index)) 984 return; 985 986 op_rss = (!hw->cap.nix_rx_multicast || !pfvf->use_mce_list); 987 988 bank = npc_get_bank(mcam, mcam_index); 989 mcam_index &= (mcam->banksize - 1); 990 991 /* If Rx action is MCAST update only RSS algorithm index */ 992 if (!op_rss) { 993 *(u64 *)&action = rvu_read64(rvu, blkaddr, 994 NPC_AF_MCAMEX_BANKX_ACTION(mcam_index, bank)); 995 996 action.flow_key_alg = alg_idx; 997 } 998 rvu_write64(rvu, blkaddr, 999 NPC_AF_MCAMEX_BANKX_ACTION(mcam_index, bank), *(u64 *)&action); 1000 } 1001 1002 void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, 1003 int group, int alg_idx, int mcam_index) 1004 { 1005 struct npc_mcam *mcam = &rvu->hw->mcam; 1006 struct nix_rx_action action; 1007 int blkaddr, index, bank; 1008 struct rvu_pfvf *pfvf; 1009 1010 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1011 if (blkaddr < 0) 1012 return; 1013 1014 /* Check if this is for reserved default entry */ 1015 if (mcam_index < 0) { 1016 if (group != DEFAULT_RSS_CONTEXT_GROUP) 1017 return; 1018 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 1019 nixlf, NIXLF_UCAST_ENTRY); 1020 } else { 1021 /* TODO: validate this mcam index */ 1022 index = mcam_index; 1023 } 1024 1025 if (index >= mcam->total_entries) 1026 return; 1027 1028 bank = npc_get_bank(mcam, index); 1029 index &= (mcam->banksize - 1); 1030 1031 *(u64 *)&action = rvu_read64(rvu, blkaddr, 1032 NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); 1033 /* Ignore if no action was set earlier */ 1034 if (!*(u64 *)&action) 1035 return; 1036 1037 action.op = NIX_RX_ACTIONOP_RSS; 1038 action.pf_func = pcifunc; 1039 action.index = group; 1040 action.flow_key_alg = alg_idx; 1041 1042 rvu_write64(rvu, blkaddr, 1043 NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action); 1044 1045 /* update the VF flow rule action with the VF default entry action */ 1046 if (mcam_index < 0) 1047 npc_update_vf_flow_entry(rvu, mcam, blkaddr, pcifunc, 1048 *(u64 *)&action); 1049 1050 /* update the action change in default rule */ 1051 pfvf = rvu_get_pfvf(rvu, pcifunc); 1052 if (pfvf->def_ucast_rule) 1053 pfvf->def_ucast_rule->rx_action = action; 1054 1055 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 1056 nixlf, NIXLF_PROMISC_ENTRY); 1057 1058 /* If PF's promiscuous entry is enabled, 1059 * Set RSS action for that entry as well 1060 */ 1061 npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr, 1062 alg_idx); 1063 1064 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 1065 nixlf, NIXLF_ALLMULTI_ENTRY); 1066 /* If PF's allmulti entry is enabled, 1067 * Set RSS action for that entry as well 1068 */ 1069 npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr, 1070 alg_idx); 1071 } 1072 1073 void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc, 1074 int nixlf, int type, bool enable) 1075 { 1076 struct npc_mcam *mcam = &rvu->hw->mcam; 1077 struct rvu_hwinfo *hw = rvu->hw; 1078 struct nix_mce_list *mce_list; 1079 int index, blkaddr, mce_idx; 1080 struct rvu_pfvf *pfvf; 1081 1082 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1083 if (blkaddr < 0) 1084 return; 1085 1086 index = npc_get_nixlf_mcam_index(mcam, pcifunc & ~RVU_PFVF_FUNC_MASK, 1087 nixlf, type); 1088 1089 /* disable MCAM entry when packet replication is not supported by hw */ 1090 if (!hw->cap.nix_rx_multicast && !is_vf(pcifunc)) { 1091 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 1092 return; 1093 } 1094 1095 /* return incase mce list is not enabled */ 1096 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); 1097 if (hw->cap.nix_rx_multicast && is_vf(pcifunc) && 1098 type != NIXLF_BCAST_ENTRY && !pfvf->use_mce_list) 1099 return; 1100 1101 nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx); 1102 1103 nix_update_mce_list(rvu, pcifunc, mce_list, 1104 mce_idx, index, enable); 1105 if (enable) 1106 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 1107 } 1108 1109 static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc, 1110 int nixlf, bool enable) 1111 { 1112 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 1113 struct npc_mcam *mcam = &rvu->hw->mcam; 1114 int index, blkaddr; 1115 1116 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1117 if (blkaddr < 0) 1118 return; 1119 1120 /* Ucast MCAM match entry of this PF/VF */ 1121 if (npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), 1122 pfvf->nix_rx_intf)) { 1123 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 1124 nixlf, NIXLF_UCAST_ENTRY); 1125 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 1126 } 1127 1128 /* Nothing to do for VFs, on platforms where pkt replication 1129 * is not supported 1130 */ 1131 if ((pcifunc & RVU_PFVF_FUNC_MASK) && !rvu->hw->cap.nix_rx_multicast) 1132 return; 1133 1134 /* add/delete pf_func to broadcast MCE list */ 1135 npc_enadis_default_mce_entry(rvu, pcifunc, nixlf, 1136 NIXLF_BCAST_ENTRY, enable); 1137 } 1138 1139 void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf) 1140 { 1141 if (nixlf < 0) 1142 return; 1143 1144 npc_enadis_default_entries(rvu, pcifunc, nixlf, false); 1145 1146 /* Delete multicast and promisc MCAM entries */ 1147 npc_enadis_default_mce_entry(rvu, pcifunc, nixlf, 1148 NIXLF_ALLMULTI_ENTRY, false); 1149 npc_enadis_default_mce_entry(rvu, pcifunc, nixlf, 1150 NIXLF_PROMISC_ENTRY, false); 1151 } 1152 1153 bool rvu_npc_enable_mcam_by_entry_index(struct rvu *rvu, int entry, int intf, bool enable) 1154 { 1155 int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1156 struct npc_mcam *mcam = &rvu->hw->mcam; 1157 struct rvu_npc_mcam_rule *rule, *tmp; 1158 1159 mutex_lock(&mcam->lock); 1160 1161 list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) { 1162 if (rule->intf != intf) 1163 continue; 1164 1165 if (rule->entry != entry) 1166 continue; 1167 1168 rule->enable = enable; 1169 mutex_unlock(&mcam->lock); 1170 1171 npc_enable_mcam_entry(rvu, mcam, blkaddr, 1172 entry, enable); 1173 1174 return true; 1175 } 1176 1177 mutex_unlock(&mcam->lock); 1178 return false; 1179 } 1180 1181 void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf) 1182 { 1183 if (nixlf < 0) 1184 return; 1185 1186 /* Enables only broadcast match entry. Promisc/Allmulti are enabled 1187 * in set_rx_mode mbox handler. 1188 */ 1189 npc_enadis_default_entries(rvu, pcifunc, nixlf, true); 1190 } 1191 1192 void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf) 1193 { 1194 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 1195 struct npc_mcam *mcam = &rvu->hw->mcam; 1196 struct rvu_npc_mcam_rule *rule, *tmp; 1197 int blkaddr; 1198 1199 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1200 if (blkaddr < 0) 1201 return; 1202 1203 mutex_lock(&mcam->lock); 1204 1205 /* Disable MCAM entries directing traffic to this 'pcifunc' */ 1206 list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) { 1207 if (is_npc_intf_rx(rule->intf) && 1208 rule->rx_action.pf_func == pcifunc && 1209 rule->rx_action.op != NIX_RX_ACTIONOP_MCAST) { 1210 npc_enable_mcam_entry(rvu, mcam, blkaddr, 1211 rule->entry, false); 1212 rule->enable = false; 1213 /* Indicate that default rule is disabled */ 1214 if (rule->default_rule) { 1215 pfvf->def_ucast_rule = NULL; 1216 list_del(&rule->list); 1217 kfree(rule); 1218 } 1219 } 1220 } 1221 1222 mutex_unlock(&mcam->lock); 1223 1224 npc_mcam_disable_flows(rvu, pcifunc); 1225 1226 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 1227 } 1228 1229 void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf) 1230 { 1231 struct npc_mcam *mcam = &rvu->hw->mcam; 1232 struct rvu_npc_mcam_rule *rule, *tmp; 1233 int blkaddr; 1234 1235 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1236 if (blkaddr < 0) 1237 return; 1238 1239 mutex_lock(&mcam->lock); 1240 1241 /* Free all MCAM entries owned by this 'pcifunc' */ 1242 npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc); 1243 1244 /* Free all MCAM counters owned by this 'pcifunc' */ 1245 npc_mcam_free_all_counters(rvu, mcam, pcifunc); 1246 1247 /* Delete MCAM entries owned by this 'pcifunc' */ 1248 list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) { 1249 if (rule->owner == pcifunc && !rule->default_rule) { 1250 list_del(&rule->list); 1251 kfree(rule); 1252 } 1253 } 1254 1255 mutex_unlock(&mcam->lock); 1256 1257 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 1258 } 1259 1260 static void npc_program_mkex_rx(struct rvu *rvu, int blkaddr, 1261 struct npc_mcam_kex *mkex, u8 intf) 1262 { 1263 int lid, lt, ld, fl; 1264 1265 if (is_npc_intf_tx(intf)) 1266 return; 1267 1268 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf), 1269 mkex->keyx_cfg[NIX_INTF_RX]); 1270 1271 /* Program LDATA */ 1272 for (lid = 0; lid < NPC_MAX_LID; lid++) { 1273 for (lt = 0; lt < NPC_MAX_LT; lt++) { 1274 for (ld = 0; ld < NPC_MAX_LD; ld++) 1275 SET_KEX_LD(intf, lid, lt, ld, 1276 mkex->intf_lid_lt_ld[NIX_INTF_RX] 1277 [lid][lt][ld]); 1278 } 1279 } 1280 /* Program LFLAGS */ 1281 for (ld = 0; ld < NPC_MAX_LD; ld++) { 1282 for (fl = 0; fl < NPC_MAX_LFL; fl++) 1283 SET_KEX_LDFLAGS(intf, ld, fl, 1284 mkex->intf_ld_flags[NIX_INTF_RX] 1285 [ld][fl]); 1286 } 1287 } 1288 1289 static void npc_program_mkex_tx(struct rvu *rvu, int blkaddr, 1290 struct npc_mcam_kex *mkex, u8 intf) 1291 { 1292 int lid, lt, ld, fl; 1293 1294 if (is_npc_intf_rx(intf)) 1295 return; 1296 1297 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf), 1298 mkex->keyx_cfg[NIX_INTF_TX]); 1299 1300 /* Program LDATA */ 1301 for (lid = 0; lid < NPC_MAX_LID; lid++) { 1302 for (lt = 0; lt < NPC_MAX_LT; lt++) { 1303 for (ld = 0; ld < NPC_MAX_LD; ld++) 1304 SET_KEX_LD(intf, lid, lt, ld, 1305 mkex->intf_lid_lt_ld[NIX_INTF_TX] 1306 [lid][lt][ld]); 1307 } 1308 } 1309 /* Program LFLAGS */ 1310 for (ld = 0; ld < NPC_MAX_LD; ld++) { 1311 for (fl = 0; fl < NPC_MAX_LFL; fl++) 1312 SET_KEX_LDFLAGS(intf, ld, fl, 1313 mkex->intf_ld_flags[NIX_INTF_TX] 1314 [ld][fl]); 1315 } 1316 } 1317 1318 static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr, 1319 struct npc_mcam_kex *mkex) 1320 { 1321 struct rvu_hwinfo *hw = rvu->hw; 1322 u8 intf; 1323 int ld; 1324 1325 for (ld = 0; ld < NPC_MAX_LD; ld++) 1326 rvu_write64(rvu, blkaddr, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld), 1327 mkex->kex_ld_flags[ld]); 1328 1329 for (intf = 0; intf < hw->npc_intfs; intf++) { 1330 npc_program_mkex_rx(rvu, blkaddr, mkex, intf); 1331 npc_program_mkex_tx(rvu, blkaddr, mkex, intf); 1332 } 1333 1334 /* Programme mkex hash profile */ 1335 npc_program_mkex_hash(rvu, blkaddr); 1336 } 1337 1338 static int npc_fwdb_prfl_img_map(struct rvu *rvu, void __iomem **prfl_img_addr, 1339 u64 *size) 1340 { 1341 u64 prfl_addr, prfl_sz; 1342 1343 if (!rvu->fwdata) 1344 return -EINVAL; 1345 1346 prfl_addr = rvu->fwdata->mcam_addr; 1347 prfl_sz = rvu->fwdata->mcam_sz; 1348 1349 if (!prfl_addr || !prfl_sz) 1350 return -EINVAL; 1351 1352 *prfl_img_addr = ioremap_wc(prfl_addr, prfl_sz); 1353 if (!(*prfl_img_addr)) 1354 return -ENOMEM; 1355 1356 *size = prfl_sz; 1357 1358 return 0; 1359 } 1360 1361 /* strtoull of "mkexprof" with base:36 */ 1362 #define MKEX_END_SIGN 0xdeadbeef 1363 1364 static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr, 1365 const char *mkex_profile) 1366 { 1367 struct device *dev = &rvu->pdev->dev; 1368 struct npc_mcam_kex *mcam_kex; 1369 void __iomem *mkex_prfl_addr = NULL; 1370 u64 prfl_sz; 1371 int ret; 1372 1373 /* If user not selected mkex profile */ 1374 if (rvu->kpu_fwdata_sz || 1375 !strncmp(mkex_profile, def_pfl_name, MKEX_NAME_LEN)) 1376 goto program_mkex; 1377 1378 /* Setting up the mapping for mkex profile image */ 1379 ret = npc_fwdb_prfl_img_map(rvu, &mkex_prfl_addr, &prfl_sz); 1380 if (ret < 0) 1381 goto program_mkex; 1382 1383 mcam_kex = (struct npc_mcam_kex __force *)mkex_prfl_addr; 1384 1385 while (((s64)prfl_sz > 0) && (mcam_kex->mkex_sign != MKEX_END_SIGN)) { 1386 /* Compare with mkex mod_param name string */ 1387 if (mcam_kex->mkex_sign == MKEX_SIGN && 1388 !strncmp(mcam_kex->name, mkex_profile, MKEX_NAME_LEN)) { 1389 /* Due to an errata (35786) in A0/B0 pass silicon, 1390 * parse nibble enable configuration has to be 1391 * identical for both Rx and Tx interfaces. 1392 */ 1393 if (!is_rvu_96xx_B0(rvu) || 1394 mcam_kex->keyx_cfg[NIX_INTF_RX] == mcam_kex->keyx_cfg[NIX_INTF_TX]) 1395 rvu->kpu.mkex = mcam_kex; 1396 goto program_mkex; 1397 } 1398 1399 mcam_kex++; 1400 prfl_sz -= sizeof(struct npc_mcam_kex); 1401 } 1402 dev_warn(dev, "Failed to load requested profile: %s\n", mkex_profile); 1403 1404 program_mkex: 1405 dev_info(rvu->dev, "Using %s mkex profile\n", rvu->kpu.mkex->name); 1406 /* Program selected mkex profile */ 1407 npc_program_mkex_profile(rvu, blkaddr, rvu->kpu.mkex); 1408 if (mkex_prfl_addr) 1409 iounmap(mkex_prfl_addr); 1410 } 1411 1412 static void npc_config_kpuaction(struct rvu *rvu, int blkaddr, 1413 const struct npc_kpu_profile_action *kpuaction, 1414 int kpu, int entry, bool pkind) 1415 { 1416 struct npc_kpu_action0 action0 = {0}; 1417 struct npc_kpu_action1 action1 = {0}; 1418 u64 reg; 1419 1420 action1.errlev = kpuaction->errlev; 1421 action1.errcode = kpuaction->errcode; 1422 action1.dp0_offset = kpuaction->dp0_offset; 1423 action1.dp1_offset = kpuaction->dp1_offset; 1424 action1.dp2_offset = kpuaction->dp2_offset; 1425 1426 if (pkind) 1427 reg = NPC_AF_PKINDX_ACTION1(entry); 1428 else 1429 reg = NPC_AF_KPUX_ENTRYX_ACTION1(kpu, entry); 1430 1431 rvu_write64(rvu, blkaddr, reg, *(u64 *)&action1); 1432 1433 action0.byp_count = kpuaction->bypass_count; 1434 action0.capture_ena = kpuaction->cap_ena; 1435 action0.parse_done = kpuaction->parse_done; 1436 action0.next_state = kpuaction->next_state; 1437 action0.capture_lid = kpuaction->lid; 1438 action0.capture_ltype = kpuaction->ltype; 1439 action0.capture_flags = kpuaction->flags; 1440 action0.ptr_advance = kpuaction->ptr_advance; 1441 action0.var_len_offset = kpuaction->offset; 1442 action0.var_len_mask = kpuaction->mask; 1443 action0.var_len_right = kpuaction->right; 1444 action0.var_len_shift = kpuaction->shift; 1445 1446 if (pkind) 1447 reg = NPC_AF_PKINDX_ACTION0(entry); 1448 else 1449 reg = NPC_AF_KPUX_ENTRYX_ACTION0(kpu, entry); 1450 1451 rvu_write64(rvu, blkaddr, reg, *(u64 *)&action0); 1452 } 1453 1454 static void npc_config_kpucam(struct rvu *rvu, int blkaddr, 1455 const struct npc_kpu_profile_cam *kpucam, 1456 int kpu, int entry) 1457 { 1458 struct npc_kpu_cam cam0 = {0}; 1459 struct npc_kpu_cam cam1 = {0}; 1460 1461 cam1.state = kpucam->state & kpucam->state_mask; 1462 cam1.dp0_data = kpucam->dp0 & kpucam->dp0_mask; 1463 cam1.dp1_data = kpucam->dp1 & kpucam->dp1_mask; 1464 cam1.dp2_data = kpucam->dp2 & kpucam->dp2_mask; 1465 1466 cam0.state = ~kpucam->state & kpucam->state_mask; 1467 cam0.dp0_data = ~kpucam->dp0 & kpucam->dp0_mask; 1468 cam0.dp1_data = ~kpucam->dp1 & kpucam->dp1_mask; 1469 cam0.dp2_data = ~kpucam->dp2 & kpucam->dp2_mask; 1470 1471 rvu_write64(rvu, blkaddr, 1472 NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 0), *(u64 *)&cam0); 1473 rvu_write64(rvu, blkaddr, 1474 NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 1), *(u64 *)&cam1); 1475 } 1476 1477 static inline u64 enable_mask(int count) 1478 { 1479 return (((count) < 64) ? ~(BIT_ULL(count) - 1) : (0x00ULL)); 1480 } 1481 1482 static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu, 1483 const struct npc_kpu_profile *profile) 1484 { 1485 int entry, num_entries, max_entries; 1486 u64 entry_mask; 1487 1488 if (profile->cam_entries != profile->action_entries) { 1489 dev_err(rvu->dev, 1490 "KPU%d: CAM and action entries [%d != %d] not equal\n", 1491 kpu, profile->cam_entries, profile->action_entries); 1492 } 1493 1494 max_entries = rvu->hw->npc_kpu_entries; 1495 1496 /* Program CAM match entries for previous KPU extracted data */ 1497 num_entries = min_t(int, profile->cam_entries, max_entries); 1498 for (entry = 0; entry < num_entries; entry++) 1499 npc_config_kpucam(rvu, blkaddr, 1500 &profile->cam[entry], kpu, entry); 1501 1502 /* Program this KPU's actions */ 1503 num_entries = min_t(int, profile->action_entries, max_entries); 1504 for (entry = 0; entry < num_entries; entry++) 1505 npc_config_kpuaction(rvu, blkaddr, &profile->action[entry], 1506 kpu, entry, false); 1507 1508 /* Enable all programmed entries */ 1509 num_entries = min_t(int, profile->action_entries, profile->cam_entries); 1510 entry_mask = enable_mask(num_entries); 1511 /* Disable first KPU_MAX_CST_ENT entries for built-in profile */ 1512 if (!rvu->kpu.custom) 1513 entry_mask |= GENMASK_ULL(KPU_MAX_CST_ENT - 1, 0); 1514 rvu_write64(rvu, blkaddr, 1515 NPC_AF_KPUX_ENTRY_DISX(kpu, 0), entry_mask); 1516 if (num_entries > 64) { 1517 rvu_write64(rvu, blkaddr, 1518 NPC_AF_KPUX_ENTRY_DISX(kpu, 1), 1519 enable_mask(num_entries - 64)); 1520 } 1521 1522 /* Enable this KPU */ 1523 rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(kpu), 0x01); 1524 } 1525 1526 static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile) 1527 { 1528 profile->custom = 0; 1529 profile->name = def_pfl_name; 1530 profile->version = NPC_KPU_PROFILE_VER; 1531 profile->ikpu = ikpu_action_entries; 1532 profile->pkinds = ARRAY_SIZE(ikpu_action_entries); 1533 profile->kpu = npc_kpu_profiles; 1534 profile->kpus = ARRAY_SIZE(npc_kpu_profiles); 1535 profile->lt_def = &npc_lt_defaults; 1536 profile->mkex = &npc_mkex_default; 1537 profile->mkex_hash = &npc_mkex_hash_default; 1538 1539 return 0; 1540 } 1541 1542 static int npc_apply_custom_kpu(struct rvu *rvu, 1543 struct npc_kpu_profile_adapter *profile) 1544 { 1545 size_t hdr_sz = sizeof(struct npc_kpu_profile_fwdata), offset = 0; 1546 struct npc_kpu_profile_fwdata *fw = rvu->kpu_fwdata; 1547 struct npc_kpu_profile_action *action; 1548 struct npc_kpu_profile_cam *cam; 1549 struct npc_kpu_fwdata *fw_kpu; 1550 int entries; 1551 u16 kpu, entry; 1552 1553 if (rvu->kpu_fwdata_sz < hdr_sz) { 1554 dev_warn(rvu->dev, "Invalid KPU profile size\n"); 1555 return -EINVAL; 1556 } 1557 if (le64_to_cpu(fw->signature) != KPU_SIGN) { 1558 dev_warn(rvu->dev, "Invalid KPU profile signature %llx\n", 1559 fw->signature); 1560 return -EINVAL; 1561 } 1562 /* Verify if the using known profile structure */ 1563 if (NPC_KPU_VER_MAJ(profile->version) > 1564 NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER)) { 1565 dev_warn(rvu->dev, "Not supported Major version: %d > %d\n", 1566 NPC_KPU_VER_MAJ(profile->version), 1567 NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER)); 1568 return -EINVAL; 1569 } 1570 /* Verify if profile is aligned with the required kernel changes */ 1571 if (NPC_KPU_VER_MIN(profile->version) < 1572 NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER)) { 1573 dev_warn(rvu->dev, 1574 "Invalid KPU profile version: %d.%d.%d expected version <= %d.%d.%d\n", 1575 NPC_KPU_VER_MAJ(profile->version), 1576 NPC_KPU_VER_MIN(profile->version), 1577 NPC_KPU_VER_PATCH(profile->version), 1578 NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER), 1579 NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER), 1580 NPC_KPU_VER_PATCH(NPC_KPU_PROFILE_VER)); 1581 return -EINVAL; 1582 } 1583 /* Verify if profile fits the HW */ 1584 if (fw->kpus > profile->kpus) { 1585 dev_warn(rvu->dev, "Not enough KPUs: %d > %ld\n", fw->kpus, 1586 profile->kpus); 1587 return -EINVAL; 1588 } 1589 1590 profile->custom = 1; 1591 profile->name = fw->name; 1592 profile->version = le64_to_cpu(fw->version); 1593 profile->mkex = &fw->mkex; 1594 profile->lt_def = &fw->lt_def; 1595 1596 for (kpu = 0; kpu < fw->kpus; kpu++) { 1597 fw_kpu = (struct npc_kpu_fwdata *)(fw->data + offset); 1598 if (fw_kpu->entries > KPU_MAX_CST_ENT) 1599 dev_warn(rvu->dev, 1600 "Too many custom entries on KPU%d: %d > %d\n", 1601 kpu, fw_kpu->entries, KPU_MAX_CST_ENT); 1602 entries = min(fw_kpu->entries, KPU_MAX_CST_ENT); 1603 cam = (struct npc_kpu_profile_cam *)fw_kpu->data; 1604 offset += sizeof(*fw_kpu) + fw_kpu->entries * sizeof(*cam); 1605 action = (struct npc_kpu_profile_action *)(fw->data + offset); 1606 offset += fw_kpu->entries * sizeof(*action); 1607 if (rvu->kpu_fwdata_sz < hdr_sz + offset) { 1608 dev_warn(rvu->dev, 1609 "Profile size mismatch on KPU%i parsing.\n", 1610 kpu + 1); 1611 return -EINVAL; 1612 } 1613 for (entry = 0; entry < entries; entry++) { 1614 profile->kpu[kpu].cam[entry] = cam[entry]; 1615 profile->kpu[kpu].action[entry] = action[entry]; 1616 } 1617 } 1618 1619 return 0; 1620 } 1621 1622 static int npc_load_kpu_prfl_img(struct rvu *rvu, void __iomem *prfl_addr, 1623 u64 prfl_sz, const char *kpu_profile) 1624 { 1625 struct npc_kpu_profile_fwdata *kpu_data = NULL; 1626 int rc = -EINVAL; 1627 1628 kpu_data = (struct npc_kpu_profile_fwdata __force *)prfl_addr; 1629 if (le64_to_cpu(kpu_data->signature) == KPU_SIGN && 1630 !strncmp(kpu_data->name, kpu_profile, KPU_NAME_LEN)) { 1631 dev_info(rvu->dev, "Loading KPU profile from firmware db: %s\n", 1632 kpu_profile); 1633 rvu->kpu_fwdata = kpu_data; 1634 rvu->kpu_fwdata_sz = prfl_sz; 1635 rvu->kpu_prfl_addr = prfl_addr; 1636 rc = 0; 1637 } 1638 1639 return rc; 1640 } 1641 1642 static int npc_fwdb_detect_load_prfl_img(struct rvu *rvu, uint64_t prfl_sz, 1643 const char *kpu_profile) 1644 { 1645 struct npc_coalesced_kpu_prfl *img_data = NULL; 1646 int i = 0, rc = -EINVAL; 1647 void __iomem *kpu_prfl_addr; 1648 u32 offset; 1649 1650 img_data = (struct npc_coalesced_kpu_prfl __force *)rvu->kpu_prfl_addr; 1651 if (le64_to_cpu(img_data->signature) == KPU_SIGN && 1652 !strncmp(img_data->name, kpu_profile, KPU_NAME_LEN)) { 1653 /* Loaded profile is a single KPU profile. */ 1654 rc = npc_load_kpu_prfl_img(rvu, rvu->kpu_prfl_addr, 1655 prfl_sz, kpu_profile); 1656 goto done; 1657 } 1658 1659 /* Loaded profile is coalesced image, offset of first KPU profile.*/ 1660 offset = offsetof(struct npc_coalesced_kpu_prfl, prfl_sz) + 1661 (img_data->num_prfl * sizeof(uint16_t)); 1662 /* Check if mapped image is coalesced image. */ 1663 while (i < img_data->num_prfl) { 1664 /* Profile image offsets are rounded up to next 8 multiple.*/ 1665 offset = ALIGN_8B_CEIL(offset); 1666 kpu_prfl_addr = (void __iomem *)((uintptr_t)rvu->kpu_prfl_addr + 1667 offset); 1668 rc = npc_load_kpu_prfl_img(rvu, kpu_prfl_addr, 1669 img_data->prfl_sz[i], kpu_profile); 1670 if (!rc) 1671 break; 1672 /* Calculating offset of profile image based on profile size.*/ 1673 offset += img_data->prfl_sz[i]; 1674 i++; 1675 } 1676 done: 1677 return rc; 1678 } 1679 1680 static int npc_load_kpu_profile_fwdb(struct rvu *rvu, const char *kpu_profile) 1681 { 1682 int ret = -EINVAL; 1683 u64 prfl_sz; 1684 1685 /* Setting up the mapping for NPC profile image */ 1686 ret = npc_fwdb_prfl_img_map(rvu, &rvu->kpu_prfl_addr, &prfl_sz); 1687 if (ret < 0) 1688 goto done; 1689 1690 /* Detect if profile is coalesced or single KPU profile and load */ 1691 ret = npc_fwdb_detect_load_prfl_img(rvu, prfl_sz, kpu_profile); 1692 if (ret == 0) 1693 goto done; 1694 1695 /* Cleaning up if KPU profile image from fwdata is not valid. */ 1696 if (rvu->kpu_prfl_addr) { 1697 iounmap(rvu->kpu_prfl_addr); 1698 rvu->kpu_prfl_addr = NULL; 1699 rvu->kpu_fwdata_sz = 0; 1700 rvu->kpu_fwdata = NULL; 1701 } 1702 1703 done: 1704 return ret; 1705 } 1706 1707 static void npc_load_kpu_profile(struct rvu *rvu) 1708 { 1709 struct npc_kpu_profile_adapter *profile = &rvu->kpu; 1710 const char *kpu_profile = rvu->kpu_pfl_name; 1711 const struct firmware *fw = NULL; 1712 bool retry_fwdb = false; 1713 1714 /* If user not specified profile customization */ 1715 if (!strncmp(kpu_profile, def_pfl_name, KPU_NAME_LEN)) 1716 goto revert_to_default; 1717 /* First prepare default KPU, then we'll customize top entries. */ 1718 npc_prepare_default_kpu(profile); 1719 1720 /* Order of preceedence for load loading NPC profile (high to low) 1721 * Firmware binary in filesystem. 1722 * Firmware database method. 1723 * Default KPU profile. 1724 */ 1725 if (!request_firmware_direct(&fw, kpu_profile, rvu->dev)) { 1726 dev_info(rvu->dev, "Loading KPU profile from firmware: %s\n", 1727 kpu_profile); 1728 rvu->kpu_fwdata = kzalloc(fw->size, GFP_KERNEL); 1729 if (rvu->kpu_fwdata) { 1730 memcpy(rvu->kpu_fwdata, fw->data, fw->size); 1731 rvu->kpu_fwdata_sz = fw->size; 1732 } 1733 release_firmware(fw); 1734 retry_fwdb = true; 1735 goto program_kpu; 1736 } 1737 1738 load_image_fwdb: 1739 /* Loading the KPU profile using firmware database */ 1740 if (npc_load_kpu_profile_fwdb(rvu, kpu_profile)) 1741 goto revert_to_default; 1742 1743 program_kpu: 1744 /* Apply profile customization if firmware was loaded. */ 1745 if (!rvu->kpu_fwdata_sz || npc_apply_custom_kpu(rvu, profile)) { 1746 /* If image from firmware filesystem fails to load or invalid 1747 * retry with firmware database method. 1748 */ 1749 if (rvu->kpu_fwdata || rvu->kpu_fwdata_sz) { 1750 /* Loading image from firmware database failed. */ 1751 if (rvu->kpu_prfl_addr) { 1752 iounmap(rvu->kpu_prfl_addr); 1753 rvu->kpu_prfl_addr = NULL; 1754 } else { 1755 kfree(rvu->kpu_fwdata); 1756 } 1757 rvu->kpu_fwdata = NULL; 1758 rvu->kpu_fwdata_sz = 0; 1759 if (retry_fwdb) { 1760 retry_fwdb = false; 1761 goto load_image_fwdb; 1762 } 1763 } 1764 1765 dev_warn(rvu->dev, 1766 "Can't load KPU profile %s. Using default.\n", 1767 kpu_profile); 1768 kfree(rvu->kpu_fwdata); 1769 rvu->kpu_fwdata = NULL; 1770 goto revert_to_default; 1771 } 1772 1773 dev_info(rvu->dev, "Using custom profile '%s', version %d.%d.%d\n", 1774 profile->name, NPC_KPU_VER_MAJ(profile->version), 1775 NPC_KPU_VER_MIN(profile->version), 1776 NPC_KPU_VER_PATCH(profile->version)); 1777 1778 return; 1779 1780 revert_to_default: 1781 npc_prepare_default_kpu(profile); 1782 } 1783 1784 static void npc_parser_profile_init(struct rvu *rvu, int blkaddr) 1785 { 1786 struct rvu_hwinfo *hw = rvu->hw; 1787 int num_pkinds, num_kpus, idx; 1788 1789 /* Disable all KPUs and their entries */ 1790 for (idx = 0; idx < hw->npc_kpus; idx++) { 1791 rvu_write64(rvu, blkaddr, 1792 NPC_AF_KPUX_ENTRY_DISX(idx, 0), ~0ULL); 1793 rvu_write64(rvu, blkaddr, 1794 NPC_AF_KPUX_ENTRY_DISX(idx, 1), ~0ULL); 1795 rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(idx), 0x00); 1796 } 1797 1798 /* Load and customize KPU profile. */ 1799 npc_load_kpu_profile(rvu); 1800 1801 /* First program IKPU profile i.e PKIND configs. 1802 * Check HW max count to avoid configuring junk or 1803 * writing to unsupported CSR addresses. 1804 */ 1805 num_pkinds = rvu->kpu.pkinds; 1806 num_pkinds = min_t(int, hw->npc_pkinds, num_pkinds); 1807 1808 for (idx = 0; idx < num_pkinds; idx++) 1809 npc_config_kpuaction(rvu, blkaddr, &rvu->kpu.ikpu[idx], 0, idx, true); 1810 1811 /* Program KPU CAM and Action profiles */ 1812 num_kpus = rvu->kpu.kpus; 1813 num_kpus = min_t(int, hw->npc_kpus, num_kpus); 1814 1815 for (idx = 0; idx < num_kpus; idx++) 1816 npc_program_kpu_profile(rvu, blkaddr, idx, &rvu->kpu.kpu[idx]); 1817 } 1818 1819 void npc_mcam_rsrcs_deinit(struct rvu *rvu) 1820 { 1821 struct npc_mcam *mcam = &rvu->hw->mcam; 1822 1823 bitmap_free(mcam->bmap); 1824 bitmap_free(mcam->bmap_reverse); 1825 kfree(mcam->entry2pfvf_map); 1826 kfree(mcam->cntr2pfvf_map); 1827 kfree(mcam->entry2cntr_map); 1828 kfree(mcam->cntr_refcnt); 1829 kfree(mcam->entry2target_pffunc); 1830 kfree(mcam->counters.bmap); 1831 } 1832 1833 int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) 1834 { 1835 int nixlf_count = rvu_get_nixlf_count(rvu); 1836 struct npc_mcam *mcam = &rvu->hw->mcam; 1837 int rsvd, err; 1838 u16 index; 1839 int cntr; 1840 u64 cfg; 1841 1842 /* Actual number of MCAM entries vary by entry size */ 1843 cfg = (rvu_read64(rvu, blkaddr, 1844 NPC_AF_INTFX_KEX_CFG(0)) >> 32) & 0x07; 1845 mcam->total_entries = (mcam->banks / BIT_ULL(cfg)) * mcam->banksize; 1846 mcam->keysize = cfg; 1847 1848 /* Number of banks combined per MCAM entry */ 1849 if (cfg == NPC_MCAM_KEY_X4) 1850 mcam->banks_per_entry = 4; 1851 else if (cfg == NPC_MCAM_KEY_X2) 1852 mcam->banks_per_entry = 2; 1853 else 1854 mcam->banks_per_entry = 1; 1855 1856 /* Reserve one MCAM entry for each of the NIX LF to 1857 * guarantee space to install default matching DMAC rule. 1858 * Also reserve 2 MCAM entries for each PF for default 1859 * channel based matching or 'bcast & promisc' matching to 1860 * support BCAST and PROMISC modes of operation for PFs. 1861 * PF0 is excluded. 1862 */ 1863 rsvd = (nixlf_count * RSVD_MCAM_ENTRIES_PER_NIXLF) + 1864 ((rvu->hw->total_pfs - 1) * RSVD_MCAM_ENTRIES_PER_PF); 1865 if (mcam->total_entries <= rsvd) { 1866 dev_warn(rvu->dev, 1867 "Insufficient NPC MCAM size %d for pkt I/O, exiting\n", 1868 mcam->total_entries); 1869 return -ENOMEM; 1870 } 1871 1872 mcam->bmap_entries = mcam->total_entries - rsvd; 1873 mcam->nixlf_offset = mcam->bmap_entries; 1874 mcam->pf_offset = mcam->nixlf_offset + nixlf_count; 1875 1876 /* Allocate bitmaps for managing MCAM entries */ 1877 mcam->bmap = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL); 1878 if (!mcam->bmap) 1879 return -ENOMEM; 1880 1881 mcam->bmap_reverse = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL); 1882 if (!mcam->bmap_reverse) 1883 goto free_bmap; 1884 1885 mcam->bmap_fcnt = mcam->bmap_entries; 1886 1887 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ 1888 mcam->entry2pfvf_map = kcalloc(mcam->bmap_entries, sizeof(u16), 1889 GFP_KERNEL); 1890 1891 if (!mcam->entry2pfvf_map) 1892 goto free_bmap_reverse; 1893 1894 /* Reserve 1/8th of MCAM entries at the bottom for low priority 1895 * allocations and another 1/8th at the top for high priority 1896 * allocations. 1897 */ 1898 mcam->lprio_count = mcam->bmap_entries / 8; 1899 if (mcam->lprio_count > BITS_PER_LONG) 1900 mcam->lprio_count = round_down(mcam->lprio_count, 1901 BITS_PER_LONG); 1902 mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count; 1903 mcam->hprio_count = mcam->lprio_count; 1904 mcam->hprio_end = mcam->hprio_count; 1905 1906 /* Allocate bitmap for managing MCAM counters and memory 1907 * for saving counter to RVU PFFUNC allocation mapping. 1908 */ 1909 err = rvu_alloc_bitmap(&mcam->counters); 1910 if (err) 1911 goto free_entry_map; 1912 1913 mcam->cntr2pfvf_map = kcalloc(mcam->counters.max, sizeof(u16), 1914 GFP_KERNEL); 1915 if (!mcam->cntr2pfvf_map) 1916 goto free_cntr_bmap; 1917 1918 /* Alloc memory for MCAM entry to counter mapping and for tracking 1919 * counter's reference count. 1920 */ 1921 mcam->entry2cntr_map = kcalloc(mcam->bmap_entries, sizeof(u16), 1922 GFP_KERNEL); 1923 if (!mcam->entry2cntr_map) 1924 goto free_cntr_map; 1925 1926 mcam->cntr_refcnt = kcalloc(mcam->counters.max, sizeof(u16), 1927 GFP_KERNEL); 1928 if (!mcam->cntr_refcnt) 1929 goto free_entry_cntr_map; 1930 1931 /* Alloc memory for saving target device of mcam rule */ 1932 mcam->entry2target_pffunc = kmalloc_array(mcam->total_entries, 1933 sizeof(u16), GFP_KERNEL); 1934 if (!mcam->entry2target_pffunc) 1935 goto free_cntr_refcnt; 1936 1937 for (index = 0; index < mcam->bmap_entries; index++) { 1938 mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP; 1939 mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP; 1940 } 1941 1942 for (cntr = 0; cntr < mcam->counters.max; cntr++) 1943 mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP; 1944 1945 mutex_init(&mcam->lock); 1946 1947 return 0; 1948 1949 free_cntr_refcnt: 1950 kfree(mcam->cntr_refcnt); 1951 free_entry_cntr_map: 1952 kfree(mcam->entry2cntr_map); 1953 free_cntr_map: 1954 kfree(mcam->cntr2pfvf_map); 1955 free_cntr_bmap: 1956 kfree(mcam->counters.bmap); 1957 free_entry_map: 1958 kfree(mcam->entry2pfvf_map); 1959 free_bmap_reverse: 1960 bitmap_free(mcam->bmap_reverse); 1961 free_bmap: 1962 bitmap_free(mcam->bmap); 1963 1964 return -ENOMEM; 1965 } 1966 1967 static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr) 1968 { 1969 struct npc_pkind *pkind = &rvu->hw->pkind; 1970 struct npc_mcam *mcam = &rvu->hw->mcam; 1971 struct rvu_hwinfo *hw = rvu->hw; 1972 u64 npc_const, npc_const1; 1973 u64 npc_const2 = 0; 1974 1975 npc_const = rvu_read64(rvu, blkaddr, NPC_AF_CONST); 1976 npc_const1 = rvu_read64(rvu, blkaddr, NPC_AF_CONST1); 1977 if (npc_const1 & BIT_ULL(63)) 1978 npc_const2 = rvu_read64(rvu, blkaddr, NPC_AF_CONST2); 1979 1980 pkind->rsrc.max = NPC_UNRESERVED_PKIND_COUNT; 1981 hw->npc_pkinds = (npc_const1 >> 12) & 0xFFULL; 1982 hw->npc_kpu_entries = npc_const1 & 0xFFFULL; 1983 hw->npc_kpus = (npc_const >> 8) & 0x1FULL; 1984 hw->npc_intfs = npc_const & 0xFULL; 1985 hw->npc_counters = (npc_const >> 48) & 0xFFFFULL; 1986 1987 mcam->banks = (npc_const >> 44) & 0xFULL; 1988 mcam->banksize = (npc_const >> 28) & 0xFFFFULL; 1989 hw->npc_stat_ena = BIT_ULL(9); 1990 /* Extended set */ 1991 if (npc_const2) { 1992 hw->npc_ext_set = true; 1993 /* 96xx supports only match_stats and npc_counters 1994 * reflected in NPC_AF_CONST reg. 1995 * STAT_SEL and ENA are at [0:8] and 9 bit positions. 1996 * 98xx has both match_stat and ext and npc_counter 1997 * reflected in NPC_AF_CONST2 1998 * STAT_SEL_EXT added at [12:14] bit position. 1999 * cn10k supports only ext and hence npc_counters in 2000 * NPC_AF_CONST is 0 and npc_counters reflected in NPC_AF_CONST2. 2001 * STAT_SEL bitpos incremented from [0:8] to [0:11] and ENA bit moved to 63 2002 */ 2003 if (!hw->npc_counters) 2004 hw->npc_stat_ena = BIT_ULL(63); 2005 hw->npc_counters = (npc_const2 >> 16) & 0xFFFFULL; 2006 mcam->banksize = npc_const2 & 0xFFFFULL; 2007 } 2008 2009 mcam->counters.max = hw->npc_counters; 2010 } 2011 2012 static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr) 2013 { 2014 struct npc_mcam_kex *mkex = rvu->kpu.mkex; 2015 struct npc_mcam *mcam = &rvu->hw->mcam; 2016 struct rvu_hwinfo *hw = rvu->hw; 2017 u64 nibble_ena, rx_kex, tx_kex; 2018 u8 intf; 2019 2020 /* Reserve last counter for MCAM RX miss action which is set to 2021 * drop packet. This way we will know how many pkts didn't match 2022 * any MCAM entry. 2023 */ 2024 mcam->counters.max--; 2025 mcam->rx_miss_act_cntr = mcam->counters.max; 2026 2027 rx_kex = mkex->keyx_cfg[NIX_INTF_RX]; 2028 tx_kex = mkex->keyx_cfg[NIX_INTF_TX]; 2029 nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex); 2030 2031 nibble_ena = rvu_npc_get_tx_nibble_cfg(rvu, nibble_ena); 2032 if (nibble_ena) { 2033 tx_kex &= ~NPC_PARSE_NIBBLE; 2034 tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena); 2035 mkex->keyx_cfg[NIX_INTF_TX] = tx_kex; 2036 } 2037 2038 /* Configure RX interfaces */ 2039 for (intf = 0; intf < hw->npc_intfs; intf++) { 2040 if (is_npc_intf_tx(intf)) 2041 continue; 2042 2043 /* Set RX MCAM search key size. LA..LE (ltype only) + Channel */ 2044 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf), 2045 rx_kex); 2046 2047 /* If MCAM lookup doesn't result in a match, drop the received 2048 * packet. And map this action to a counter to count dropped 2049 * packets. 2050 */ 2051 rvu_write64(rvu, blkaddr, 2052 NPC_AF_INTFX_MISS_ACT(intf), NIX_RX_ACTIONOP_DROP); 2053 2054 /* NPC_AF_INTFX_MISS_STAT_ACT[14:12] - counter[11:9] 2055 * NPC_AF_INTFX_MISS_STAT_ACT[8:0] - counter[8:0] 2056 */ 2057 rvu_write64(rvu, blkaddr, 2058 NPC_AF_INTFX_MISS_STAT_ACT(intf), 2059 ((mcam->rx_miss_act_cntr >> 9) << 12) | 2060 hw->npc_stat_ena | mcam->rx_miss_act_cntr); 2061 } 2062 2063 /* Configure TX interfaces */ 2064 for (intf = 0; intf < hw->npc_intfs; intf++) { 2065 if (is_npc_intf_rx(intf)) 2066 continue; 2067 2068 /* Extract Ltypes LID_LA to LID_LE */ 2069 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf), 2070 tx_kex); 2071 2072 /* Set TX miss action to UCAST_DEFAULT i.e 2073 * transmit the packet on NIX LF SQ's default channel. 2074 */ 2075 rvu_write64(rvu, blkaddr, 2076 NPC_AF_INTFX_MISS_ACT(intf), 2077 NIX_TX_ACTIONOP_UCAST_DEFAULT); 2078 } 2079 } 2080 2081 int rvu_npc_init(struct rvu *rvu) 2082 { 2083 struct npc_kpu_profile_adapter *kpu = &rvu->kpu; 2084 struct npc_pkind *pkind = &rvu->hw->pkind; 2085 struct npc_mcam *mcam = &rvu->hw->mcam; 2086 int blkaddr, entry, bank, err; 2087 2088 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2089 if (blkaddr < 0) { 2090 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 2091 return -ENODEV; 2092 } 2093 2094 rvu_npc_hw_init(rvu, blkaddr); 2095 2096 /* First disable all MCAM entries, to stop traffic towards NIXLFs */ 2097 for (bank = 0; bank < mcam->banks; bank++) { 2098 for (entry = 0; entry < mcam->banksize; entry++) 2099 rvu_write64(rvu, blkaddr, 2100 NPC_AF_MCAMEX_BANKX_CFG(entry, bank), 0); 2101 } 2102 2103 err = rvu_alloc_bitmap(&pkind->rsrc); 2104 if (err) 2105 return err; 2106 /* Reserve PKIND#0 for LBKs. Power reset value of LBK_CH_PKIND is '0', 2107 * no need to configure PKIND for all LBKs separately. 2108 */ 2109 rvu_alloc_rsrc(&pkind->rsrc); 2110 2111 /* Allocate mem for pkind to PF and channel mapping info */ 2112 pkind->pfchan_map = devm_kcalloc(rvu->dev, pkind->rsrc.max, 2113 sizeof(u32), GFP_KERNEL); 2114 if (!pkind->pfchan_map) 2115 return -ENOMEM; 2116 2117 /* Configure KPU profile */ 2118 npc_parser_profile_init(rvu, blkaddr); 2119 2120 /* Config Outer L2, IPv4's NPC layer info */ 2121 rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OL2, 2122 (kpu->lt_def->pck_ol2.lid << 8) | (kpu->lt_def->pck_ol2.ltype_match << 4) | 2123 kpu->lt_def->pck_ol2.ltype_mask); 2124 rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OIP4, 2125 (kpu->lt_def->pck_oip4.lid << 8) | (kpu->lt_def->pck_oip4.ltype_match << 4) | 2126 kpu->lt_def->pck_oip4.ltype_mask); 2127 2128 /* Config Inner IPV4 NPC layer info */ 2129 rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_IIP4, 2130 (kpu->lt_def->pck_iip4.lid << 8) | (kpu->lt_def->pck_iip4.ltype_match << 4) | 2131 kpu->lt_def->pck_iip4.ltype_mask); 2132 2133 /* Enable below for Rx pkts. 2134 * - Outer IPv4 header checksum validation. 2135 * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2B]. 2136 * - Detect outer L2 multicast address and set NPC_RESULT_S[L2M]. 2137 * - Inner IPv4 header checksum validation. 2138 * - Set non zero checksum error code value 2139 */ 2140 rvu_write64(rvu, blkaddr, NPC_AF_PCK_CFG, 2141 rvu_read64(rvu, blkaddr, NPC_AF_PCK_CFG) | 2142 ((u64)NPC_EC_OIP4_CSUM << 32) | (NPC_EC_IIP4_CSUM << 24) | 2143 BIT_ULL(7) | BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1)); 2144 2145 rvu_npc_setup_interfaces(rvu, blkaddr); 2146 2147 npc_config_secret_key(rvu, blkaddr); 2148 /* Configure MKEX profile */ 2149 npc_load_mkex_profile(rvu, blkaddr, rvu->mkex_pfl_name); 2150 2151 err = npc_mcam_rsrcs_init(rvu, blkaddr); 2152 if (err) 2153 return err; 2154 2155 err = npc_flow_steering_init(rvu, blkaddr); 2156 if (err) { 2157 dev_err(rvu->dev, 2158 "Incorrect mkex profile loaded using default mkex\n"); 2159 npc_load_mkex_profile(rvu, blkaddr, def_pfl_name); 2160 } 2161 2162 return 0; 2163 } 2164 2165 void rvu_npc_freemem(struct rvu *rvu) 2166 { 2167 struct npc_pkind *pkind = &rvu->hw->pkind; 2168 struct npc_mcam *mcam = &rvu->hw->mcam; 2169 2170 kfree(pkind->rsrc.bmap); 2171 npc_mcam_rsrcs_deinit(rvu); 2172 if (rvu->kpu_prfl_addr) 2173 iounmap(rvu->kpu_prfl_addr); 2174 else 2175 kfree(rvu->kpu_fwdata); 2176 mutex_destroy(&mcam->lock); 2177 } 2178 2179 void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc, 2180 int blkaddr, int *alloc_cnt, 2181 int *enable_cnt) 2182 { 2183 struct npc_mcam *mcam = &rvu->hw->mcam; 2184 int entry; 2185 2186 *alloc_cnt = 0; 2187 *enable_cnt = 0; 2188 2189 for (entry = 0; entry < mcam->bmap_entries; entry++) { 2190 if (mcam->entry2pfvf_map[entry] == pcifunc) { 2191 (*alloc_cnt)++; 2192 if (is_mcam_entry_enabled(rvu, mcam, blkaddr, entry)) 2193 (*enable_cnt)++; 2194 } 2195 } 2196 } 2197 2198 void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc, 2199 int blkaddr, int *alloc_cnt, 2200 int *enable_cnt) 2201 { 2202 struct npc_mcam *mcam = &rvu->hw->mcam; 2203 int cntr; 2204 2205 *alloc_cnt = 0; 2206 *enable_cnt = 0; 2207 2208 for (cntr = 0; cntr < mcam->counters.max; cntr++) { 2209 if (mcam->cntr2pfvf_map[cntr] == pcifunc) { 2210 (*alloc_cnt)++; 2211 if (mcam->cntr_refcnt[cntr]) 2212 (*enable_cnt)++; 2213 } 2214 } 2215 } 2216 2217 static int npc_mcam_verify_entry(struct npc_mcam *mcam, 2218 u16 pcifunc, int entry) 2219 { 2220 /* verify AF installed entries */ 2221 if (is_pffunc_af(pcifunc)) 2222 return 0; 2223 /* Verify if entry is valid and if it is indeed 2224 * allocated to the requesting PFFUNC. 2225 */ 2226 if (entry >= mcam->bmap_entries) 2227 return NPC_MCAM_INVALID_REQ; 2228 2229 if (pcifunc != mcam->entry2pfvf_map[entry]) 2230 return NPC_MCAM_PERM_DENIED; 2231 2232 return 0; 2233 } 2234 2235 static int npc_mcam_verify_counter(struct npc_mcam *mcam, 2236 u16 pcifunc, int cntr) 2237 { 2238 /* Verify if counter is valid and if it is indeed 2239 * allocated to the requesting PFFUNC. 2240 */ 2241 if (cntr >= mcam->counters.max) 2242 return NPC_MCAM_INVALID_REQ; 2243 2244 if (pcifunc != mcam->cntr2pfvf_map[cntr]) 2245 return NPC_MCAM_PERM_DENIED; 2246 2247 return 0; 2248 } 2249 2250 static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam, 2251 int blkaddr, u16 entry, u16 cntr) 2252 { 2253 u16 index = entry & (mcam->banksize - 1); 2254 u32 bank = npc_get_bank(mcam, entry); 2255 struct rvu_hwinfo *hw = rvu->hw; 2256 2257 /* Set mapping and increment counter's refcnt */ 2258 mcam->entry2cntr_map[entry] = cntr; 2259 mcam->cntr_refcnt[cntr]++; 2260 /* Enable stats */ 2261 rvu_write64(rvu, blkaddr, 2262 NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), 2263 ((cntr >> 9) << 12) | hw->npc_stat_ena | cntr); 2264 } 2265 2266 static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu, 2267 struct npc_mcam *mcam, 2268 int blkaddr, u16 entry, u16 cntr) 2269 { 2270 u16 index = entry & (mcam->banksize - 1); 2271 u32 bank = npc_get_bank(mcam, entry); 2272 2273 /* Remove mapping and reduce counter's refcnt */ 2274 mcam->entry2cntr_map[entry] = NPC_MCAM_INVALID_MAP; 2275 mcam->cntr_refcnt[cntr]--; 2276 /* Disable stats */ 2277 rvu_write64(rvu, blkaddr, 2278 NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), 0x00); 2279 } 2280 2281 /* Sets MCAM entry in bitmap as used. Update 2282 * reverse bitmap too. Should be called with 2283 * 'mcam->lock' held. 2284 */ 2285 static void npc_mcam_set_bit(struct npc_mcam *mcam, u16 index) 2286 { 2287 u16 entry, rentry; 2288 2289 entry = index; 2290 rentry = mcam->bmap_entries - index - 1; 2291 2292 __set_bit(entry, mcam->bmap); 2293 __set_bit(rentry, mcam->bmap_reverse); 2294 mcam->bmap_fcnt--; 2295 } 2296 2297 /* Sets MCAM entry in bitmap as free. Update 2298 * reverse bitmap too. Should be called with 2299 * 'mcam->lock' held. 2300 */ 2301 static void npc_mcam_clear_bit(struct npc_mcam *mcam, u16 index) 2302 { 2303 u16 entry, rentry; 2304 2305 entry = index; 2306 rentry = mcam->bmap_entries - index - 1; 2307 2308 __clear_bit(entry, mcam->bmap); 2309 __clear_bit(rentry, mcam->bmap_reverse); 2310 mcam->bmap_fcnt++; 2311 } 2312 2313 static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam, 2314 int blkaddr, u16 pcifunc) 2315 { 2316 u16 index, cntr; 2317 2318 /* Scan all MCAM entries and free the ones mapped to 'pcifunc' */ 2319 for (index = 0; index < mcam->bmap_entries; index++) { 2320 if (mcam->entry2pfvf_map[index] == pcifunc) { 2321 mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP; 2322 /* Free the entry in bitmap */ 2323 npc_mcam_clear_bit(mcam, index); 2324 /* Disable the entry */ 2325 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false); 2326 2327 /* Update entry2counter mapping */ 2328 cntr = mcam->entry2cntr_map[index]; 2329 if (cntr != NPC_MCAM_INVALID_MAP) 2330 npc_unmap_mcam_entry_and_cntr(rvu, mcam, 2331 blkaddr, index, 2332 cntr); 2333 mcam->entry2target_pffunc[index] = 0x0; 2334 } 2335 } 2336 } 2337 2338 static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam, 2339 u16 pcifunc) 2340 { 2341 u16 cntr; 2342 2343 /* Scan all MCAM counters and free the ones mapped to 'pcifunc' */ 2344 for (cntr = 0; cntr < mcam->counters.max; cntr++) { 2345 if (mcam->cntr2pfvf_map[cntr] == pcifunc) { 2346 mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP; 2347 mcam->cntr_refcnt[cntr] = 0; 2348 rvu_free_rsrc(&mcam->counters, cntr); 2349 /* This API is expected to be called after freeing 2350 * MCAM entries, which inturn will remove 2351 * 'entry to counter' mapping. 2352 * No need to do it again. 2353 */ 2354 } 2355 } 2356 } 2357 2358 /* Find area of contiguous free entries of size 'nr'. 2359 * If not found return max contiguous free entries available. 2360 */ 2361 static u16 npc_mcam_find_zero_area(unsigned long *map, u16 size, u16 start, 2362 u16 nr, u16 *max_area) 2363 { 2364 u16 max_area_start = 0; 2365 u16 index, next, end; 2366 2367 *max_area = 0; 2368 2369 again: 2370 index = find_next_zero_bit(map, size, start); 2371 if (index >= size) 2372 return max_area_start; 2373 2374 end = ((index + nr) >= size) ? size : index + nr; 2375 next = find_next_bit(map, end, index); 2376 if (*max_area < (next - index)) { 2377 *max_area = next - index; 2378 max_area_start = index; 2379 } 2380 2381 if (next < end) { 2382 start = next + 1; 2383 goto again; 2384 } 2385 2386 return max_area_start; 2387 } 2388 2389 /* Find number of free MCAM entries available 2390 * within range i.e in between 'start' and 'end'. 2391 */ 2392 static u16 npc_mcam_get_free_count(unsigned long *map, u16 start, u16 end) 2393 { 2394 u16 index, next; 2395 u16 fcnt = 0; 2396 2397 again: 2398 if (start >= end) 2399 return fcnt; 2400 2401 index = find_next_zero_bit(map, end, start); 2402 if (index >= end) 2403 return fcnt; 2404 2405 next = find_next_bit(map, end, index); 2406 if (next <= end) { 2407 fcnt += next - index; 2408 start = next + 1; 2409 goto again; 2410 } 2411 2412 fcnt += end - index; 2413 return fcnt; 2414 } 2415 2416 static void 2417 npc_get_mcam_search_range_priority(struct npc_mcam *mcam, 2418 struct npc_mcam_alloc_entry_req *req, 2419 u16 *start, u16 *end, bool *reverse) 2420 { 2421 u16 fcnt; 2422 2423 if (req->priority == NPC_MCAM_HIGHER_PRIO) 2424 goto hprio; 2425 2426 /* For a low priority entry allocation 2427 * - If reference entry is not in hprio zone then 2428 * search range: ref_entry to end. 2429 * - If reference entry is in hprio zone and if 2430 * request can be accomodated in non-hprio zone then 2431 * search range: 'start of middle zone' to 'end' 2432 * - else search in reverse, so that less number of hprio 2433 * zone entries are allocated. 2434 */ 2435 2436 *reverse = false; 2437 *start = req->ref_entry + 1; 2438 *end = mcam->bmap_entries; 2439 2440 if (req->ref_entry >= mcam->hprio_end) 2441 return; 2442 2443 fcnt = npc_mcam_get_free_count(mcam->bmap, 2444 mcam->hprio_end, mcam->bmap_entries); 2445 if (fcnt > req->count) 2446 *start = mcam->hprio_end; 2447 else 2448 *reverse = true; 2449 return; 2450 2451 hprio: 2452 /* For a high priority entry allocation, search is always 2453 * in reverse to preserve hprio zone entries. 2454 * - If reference entry is not in lprio zone then 2455 * search range: 0 to ref_entry. 2456 * - If reference entry is in lprio zone and if 2457 * request can be accomodated in middle zone then 2458 * search range: 'hprio_end' to 'lprio_start' 2459 */ 2460 2461 *reverse = true; 2462 *start = 0; 2463 *end = req->ref_entry; 2464 2465 if (req->ref_entry <= mcam->lprio_start) 2466 return; 2467 2468 fcnt = npc_mcam_get_free_count(mcam->bmap, 2469 mcam->hprio_end, mcam->lprio_start); 2470 if (fcnt < req->count) 2471 return; 2472 *start = mcam->hprio_end; 2473 *end = mcam->lprio_start; 2474 } 2475 2476 static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc, 2477 struct npc_mcam_alloc_entry_req *req, 2478 struct npc_mcam_alloc_entry_rsp *rsp) 2479 { 2480 u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES]; 2481 u16 fcnt, hp_fcnt, lp_fcnt; 2482 u16 start, end, index; 2483 int entry, next_start; 2484 bool reverse = false; 2485 unsigned long *bmap; 2486 u16 max_contig; 2487 2488 mutex_lock(&mcam->lock); 2489 2490 /* Check if there are any free entries */ 2491 if (!mcam->bmap_fcnt) { 2492 mutex_unlock(&mcam->lock); 2493 return NPC_MCAM_ALLOC_FAILED; 2494 } 2495 2496 /* MCAM entries are divided into high priority, middle and 2497 * low priority zones. Idea is to not allocate top and lower 2498 * most entries as much as possible, this is to increase 2499 * probability of honouring priority allocation requests. 2500 * 2501 * Two bitmaps are used for mcam entry management, 2502 * mcam->bmap for forward search i.e '0 to mcam->bmap_entries'. 2503 * mcam->bmap_reverse for reverse search i.e 'mcam->bmap_entries to 0'. 2504 * 2505 * Reverse bitmap is used to allocate entries 2506 * - when a higher priority entry is requested 2507 * - when available free entries are less. 2508 * Lower priority ones out of avaialble free entries are always 2509 * chosen when 'high vs low' question arises. 2510 * 2511 * For a VF base MCAM match rule is set by its PF. And all the 2512 * further MCAM rules installed by VF on its own are 2513 * concatenated with the base rule set by its PF. Hence PF entries 2514 * should be at lower priority compared to VF entries. Otherwise 2515 * base rule is hit always and rules installed by VF will be of 2516 * no use. Hence if the request is from PF then allocate low 2517 * priority entries. 2518 */ 2519 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) 2520 goto lprio_alloc; 2521 2522 /* Get the search range for priority allocation request */ 2523 if (req->priority) { 2524 npc_get_mcam_search_range_priority(mcam, req, 2525 &start, &end, &reverse); 2526 goto alloc; 2527 } 2528 2529 /* Find out the search range for non-priority allocation request 2530 * 2531 * Get MCAM free entry count in middle zone. 2532 */ 2533 lp_fcnt = npc_mcam_get_free_count(mcam->bmap, 2534 mcam->lprio_start, 2535 mcam->bmap_entries); 2536 hp_fcnt = npc_mcam_get_free_count(mcam->bmap, 0, mcam->hprio_end); 2537 fcnt = mcam->bmap_fcnt - lp_fcnt - hp_fcnt; 2538 2539 /* Check if request can be accomodated in the middle zone */ 2540 if (fcnt > req->count) { 2541 start = mcam->hprio_end; 2542 end = mcam->lprio_start; 2543 } else if ((fcnt + (hp_fcnt / 2) + (lp_fcnt / 2)) > req->count) { 2544 /* Expand search zone from half of hprio zone to 2545 * half of lprio zone. 2546 */ 2547 start = mcam->hprio_end / 2; 2548 end = mcam->bmap_entries - (mcam->lprio_count / 2); 2549 reverse = true; 2550 } else { 2551 /* Not enough free entries, search all entries in reverse, 2552 * so that low priority ones will get used up. 2553 */ 2554 lprio_alloc: 2555 reverse = true; 2556 start = 0; 2557 end = mcam->bmap_entries; 2558 /* Ensure PF requests are always at bottom and if PF requests 2559 * for higher/lower priority entry wrt reference entry then 2560 * honour that criteria and start search for entries from bottom 2561 * and not in mid zone. 2562 */ 2563 if (!(pcifunc & RVU_PFVF_FUNC_MASK) && 2564 req->priority == NPC_MCAM_HIGHER_PRIO) 2565 end = req->ref_entry; 2566 2567 if (!(pcifunc & RVU_PFVF_FUNC_MASK) && 2568 req->priority == NPC_MCAM_LOWER_PRIO) 2569 start = req->ref_entry; 2570 } 2571 2572 alloc: 2573 if (reverse) { 2574 bmap = mcam->bmap_reverse; 2575 start = mcam->bmap_entries - start; 2576 end = mcam->bmap_entries - end; 2577 swap(start, end); 2578 } else { 2579 bmap = mcam->bmap; 2580 } 2581 2582 if (req->contig) { 2583 /* Allocate requested number of contiguous entries, if 2584 * unsuccessful find max contiguous entries available. 2585 */ 2586 index = npc_mcam_find_zero_area(bmap, end, start, 2587 req->count, &max_contig); 2588 rsp->count = max_contig; 2589 if (reverse) 2590 rsp->entry = mcam->bmap_entries - index - max_contig; 2591 else 2592 rsp->entry = index; 2593 } else { 2594 /* Allocate requested number of non-contiguous entries, 2595 * if unsuccessful allocate as many as possible. 2596 */ 2597 rsp->count = 0; 2598 next_start = start; 2599 for (entry = 0; entry < req->count; entry++) { 2600 index = find_next_zero_bit(bmap, end, next_start); 2601 if (index >= end) 2602 break; 2603 2604 next_start = start + (index - start) + 1; 2605 2606 /* Save the entry's index */ 2607 if (reverse) 2608 index = mcam->bmap_entries - index - 1; 2609 entry_list[entry] = index; 2610 rsp->count++; 2611 } 2612 } 2613 2614 /* If allocating requested no of entries is unsucessful, 2615 * expand the search range to full bitmap length and retry. 2616 */ 2617 if (!req->priority && (rsp->count < req->count) && 2618 ((end - start) != mcam->bmap_entries)) { 2619 reverse = true; 2620 start = 0; 2621 end = mcam->bmap_entries; 2622 goto alloc; 2623 } 2624 2625 /* For priority entry allocation requests, if allocation is 2626 * failed then expand search to max possible range and retry. 2627 */ 2628 if (req->priority && rsp->count < req->count) { 2629 if (req->priority == NPC_MCAM_LOWER_PRIO && 2630 (start != (req->ref_entry + 1))) { 2631 start = req->ref_entry + 1; 2632 end = mcam->bmap_entries; 2633 reverse = false; 2634 goto alloc; 2635 } else if ((req->priority == NPC_MCAM_HIGHER_PRIO) && 2636 ((end - start) != req->ref_entry)) { 2637 start = 0; 2638 end = req->ref_entry; 2639 reverse = true; 2640 goto alloc; 2641 } 2642 } 2643 2644 /* Copy MCAM entry indices into mbox response entry_list. 2645 * Requester always expects indices in ascending order, so 2646 * reverse the list if reverse bitmap is used for allocation. 2647 */ 2648 if (!req->contig && rsp->count) { 2649 index = 0; 2650 for (entry = rsp->count - 1; entry >= 0; entry--) { 2651 if (reverse) 2652 rsp->entry_list[index++] = entry_list[entry]; 2653 else 2654 rsp->entry_list[entry] = entry_list[entry]; 2655 } 2656 } 2657 2658 /* Mark the allocated entries as used and set nixlf mapping */ 2659 for (entry = 0; entry < rsp->count; entry++) { 2660 index = req->contig ? 2661 (rsp->entry + entry) : rsp->entry_list[entry]; 2662 npc_mcam_set_bit(mcam, index); 2663 mcam->entry2pfvf_map[index] = pcifunc; 2664 mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP; 2665 } 2666 2667 /* Update available free count in mbox response */ 2668 rsp->free_count = mcam->bmap_fcnt; 2669 2670 mutex_unlock(&mcam->lock); 2671 return 0; 2672 } 2673 2674 /* Marks bitmaps to reserved the mcam slot */ 2675 void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx) 2676 { 2677 struct npc_mcam *mcam = &rvu->hw->mcam; 2678 2679 npc_mcam_set_bit(mcam, entry_idx); 2680 } 2681 2682 int npc_config_cntr_default_entries(struct rvu *rvu, bool enable) 2683 { 2684 struct npc_mcam *mcam = &rvu->hw->mcam; 2685 struct npc_install_flow_rsp rsp; 2686 struct rvu_npc_mcam_rule *rule; 2687 int blkaddr; 2688 2689 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2690 if (blkaddr < 0) 2691 return -EINVAL; 2692 2693 mutex_lock(&mcam->lock); 2694 list_for_each_entry(rule, &mcam->mcam_rules, list) { 2695 if (!is_mcam_entry_enabled(rvu, mcam, blkaddr, rule->entry)) 2696 continue; 2697 if (!rule->default_rule) 2698 continue; 2699 if (enable && !rule->has_cntr) { /* Alloc and map new counter */ 2700 __rvu_mcam_add_counter_to_rule(rvu, rule->owner, 2701 rule, &rsp); 2702 if (rsp.counter < 0) { 2703 dev_err(rvu->dev, 2704 "%s: Failed to allocate cntr for default rule (err=%d)\n", 2705 __func__, rsp.counter); 2706 break; 2707 } 2708 npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2709 rule->entry, rsp.counter); 2710 /* Reset counter before use */ 2711 rvu_write64(rvu, blkaddr, 2712 NPC_AF_MATCH_STATX(rule->cntr), 0x0); 2713 } 2714 2715 /* Free and unmap counter */ 2716 if (!enable && rule->has_cntr) 2717 __rvu_mcam_remove_counter_from_rule(rvu, rule->owner, 2718 rule); 2719 } 2720 mutex_unlock(&mcam->lock); 2721 2722 return 0; 2723 } 2724 2725 int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu, 2726 struct npc_mcam_alloc_entry_req *req, 2727 struct npc_mcam_alloc_entry_rsp *rsp) 2728 { 2729 struct npc_mcam *mcam = &rvu->hw->mcam; 2730 u16 pcifunc = req->hdr.pcifunc; 2731 int blkaddr; 2732 2733 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2734 if (blkaddr < 0) 2735 return NPC_MCAM_INVALID_REQ; 2736 2737 rsp->entry = NPC_MCAM_ENTRY_INVALID; 2738 rsp->free_count = 0; 2739 2740 /* Check if ref_entry is greater that the range 2741 * then set it to max value. 2742 */ 2743 if (req->ref_entry > mcam->bmap_entries) 2744 req->ref_entry = mcam->bmap_entries; 2745 2746 /* ref_entry can't be '0' if requested priority is high. 2747 * Can't be last entry if requested priority is low. 2748 */ 2749 if ((!req->ref_entry && req->priority == NPC_MCAM_HIGHER_PRIO) || 2750 ((req->ref_entry == mcam->bmap_entries) && 2751 req->priority == NPC_MCAM_LOWER_PRIO)) 2752 return NPC_MCAM_INVALID_REQ; 2753 2754 /* Since list of allocated indices needs to be sent to requester, 2755 * max number of non-contiguous entries per mbox msg is limited. 2756 */ 2757 if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES) { 2758 dev_err(rvu->dev, 2759 "%s: %d Non-contiguous MCAM entries requested is more than max (%d) allowed\n", 2760 __func__, req->count, NPC_MAX_NONCONTIG_ENTRIES); 2761 return NPC_MCAM_INVALID_REQ; 2762 } 2763 2764 /* Alloc request from PFFUNC with no NIXLF attached should be denied */ 2765 if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc)) 2766 return NPC_MCAM_ALLOC_DENIED; 2767 2768 return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp); 2769 } 2770 2771 int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu, 2772 struct npc_mcam_free_entry_req *req, 2773 struct msg_rsp *rsp) 2774 { 2775 struct npc_mcam *mcam = &rvu->hw->mcam; 2776 u16 pcifunc = req->hdr.pcifunc; 2777 int blkaddr, rc = 0; 2778 u16 cntr; 2779 2780 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2781 if (blkaddr < 0) 2782 return NPC_MCAM_INVALID_REQ; 2783 2784 /* Free request from PFFUNC with no NIXLF attached, ignore */ 2785 if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc)) 2786 return NPC_MCAM_INVALID_REQ; 2787 2788 mutex_lock(&mcam->lock); 2789 2790 if (req->all) 2791 goto free_all; 2792 2793 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 2794 if (rc) 2795 goto exit; 2796 2797 mcam->entry2pfvf_map[req->entry] = NPC_MCAM_INVALID_MAP; 2798 mcam->entry2target_pffunc[req->entry] = 0x0; 2799 npc_mcam_clear_bit(mcam, req->entry); 2800 npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false); 2801 2802 /* Update entry2counter mapping */ 2803 cntr = mcam->entry2cntr_map[req->entry]; 2804 if (cntr != NPC_MCAM_INVALID_MAP) 2805 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2806 req->entry, cntr); 2807 2808 goto exit; 2809 2810 free_all: 2811 /* Free up all entries allocated to requesting PFFUNC */ 2812 npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc); 2813 exit: 2814 mutex_unlock(&mcam->lock); 2815 return rc; 2816 } 2817 2818 int rvu_mbox_handler_npc_mcam_read_entry(struct rvu *rvu, 2819 struct npc_mcam_read_entry_req *req, 2820 struct npc_mcam_read_entry_rsp *rsp) 2821 { 2822 struct npc_mcam *mcam = &rvu->hw->mcam; 2823 u16 pcifunc = req->hdr.pcifunc; 2824 int blkaddr, rc; 2825 2826 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2827 if (blkaddr < 0) 2828 return NPC_MCAM_INVALID_REQ; 2829 2830 mutex_lock(&mcam->lock); 2831 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 2832 if (!rc) { 2833 npc_read_mcam_entry(rvu, mcam, blkaddr, req->entry, 2834 &rsp->entry_data, 2835 &rsp->intf, &rsp->enable); 2836 } 2837 2838 mutex_unlock(&mcam->lock); 2839 return rc; 2840 } 2841 2842 int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, 2843 struct npc_mcam_write_entry_req *req, 2844 struct msg_rsp *rsp) 2845 { 2846 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 2847 struct npc_mcam *mcam = &rvu->hw->mcam; 2848 u16 pcifunc = req->hdr.pcifunc; 2849 int blkaddr, rc; 2850 u8 nix_intf; 2851 2852 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2853 if (blkaddr < 0) 2854 return NPC_MCAM_INVALID_REQ; 2855 2856 mutex_lock(&mcam->lock); 2857 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 2858 if (rc) 2859 goto exit; 2860 2861 if (req->set_cntr && 2862 npc_mcam_verify_counter(mcam, pcifunc, req->cntr)) { 2863 rc = NPC_MCAM_INVALID_REQ; 2864 goto exit; 2865 } 2866 2867 if (!is_npc_interface_valid(rvu, req->intf)) { 2868 rc = NPC_MCAM_INVALID_REQ; 2869 goto exit; 2870 } 2871 2872 if (is_npc_intf_tx(req->intf)) 2873 nix_intf = pfvf->nix_tx_intf; 2874 else 2875 nix_intf = pfvf->nix_rx_intf; 2876 2877 /* For AF installed rules, the nix_intf should be set to target NIX */ 2878 if (is_pffunc_af(req->hdr.pcifunc)) 2879 nix_intf = req->intf; 2880 2881 npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, nix_intf, 2882 &req->entry_data, req->enable_entry); 2883 2884 if (req->set_cntr) 2885 npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2886 req->entry, req->cntr); 2887 2888 rc = 0; 2889 exit: 2890 mutex_unlock(&mcam->lock); 2891 return rc; 2892 } 2893 2894 int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu, 2895 struct npc_mcam_ena_dis_entry_req *req, 2896 struct msg_rsp *rsp) 2897 { 2898 struct npc_mcam *mcam = &rvu->hw->mcam; 2899 u16 pcifunc = req->hdr.pcifunc; 2900 int blkaddr, rc; 2901 2902 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2903 if (blkaddr < 0) 2904 return NPC_MCAM_INVALID_REQ; 2905 2906 mutex_lock(&mcam->lock); 2907 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 2908 mutex_unlock(&mcam->lock); 2909 if (rc) 2910 return rc; 2911 2912 npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, true); 2913 2914 return 0; 2915 } 2916 2917 int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu, 2918 struct npc_mcam_ena_dis_entry_req *req, 2919 struct msg_rsp *rsp) 2920 { 2921 struct npc_mcam *mcam = &rvu->hw->mcam; 2922 u16 pcifunc = req->hdr.pcifunc; 2923 int blkaddr, rc; 2924 2925 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2926 if (blkaddr < 0) 2927 return NPC_MCAM_INVALID_REQ; 2928 2929 mutex_lock(&mcam->lock); 2930 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 2931 mutex_unlock(&mcam->lock); 2932 if (rc) 2933 return rc; 2934 2935 npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false); 2936 2937 return 0; 2938 } 2939 2940 int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu, 2941 struct npc_mcam_shift_entry_req *req, 2942 struct npc_mcam_shift_entry_rsp *rsp) 2943 { 2944 struct npc_mcam *mcam = &rvu->hw->mcam; 2945 u16 pcifunc = req->hdr.pcifunc; 2946 u16 old_entry, new_entry; 2947 int blkaddr, rc = 0; 2948 u16 index, cntr; 2949 2950 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2951 if (blkaddr < 0) 2952 return NPC_MCAM_INVALID_REQ; 2953 2954 if (req->shift_count > NPC_MCAM_MAX_SHIFTS) 2955 return NPC_MCAM_INVALID_REQ; 2956 2957 mutex_lock(&mcam->lock); 2958 for (index = 0; index < req->shift_count; index++) { 2959 old_entry = req->curr_entry[index]; 2960 new_entry = req->new_entry[index]; 2961 2962 /* Check if both old and new entries are valid and 2963 * does belong to this PFFUNC or not. 2964 */ 2965 rc = npc_mcam_verify_entry(mcam, pcifunc, old_entry); 2966 if (rc) 2967 break; 2968 2969 rc = npc_mcam_verify_entry(mcam, pcifunc, new_entry); 2970 if (rc) 2971 break; 2972 2973 /* new_entry should not have a counter mapped */ 2974 if (mcam->entry2cntr_map[new_entry] != NPC_MCAM_INVALID_MAP) { 2975 rc = NPC_MCAM_PERM_DENIED; 2976 break; 2977 } 2978 2979 /* Disable the new_entry */ 2980 npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, false); 2981 2982 /* Copy rule from old entry to new entry */ 2983 npc_copy_mcam_entry(rvu, mcam, blkaddr, old_entry, new_entry); 2984 2985 /* Copy counter mapping, if any */ 2986 cntr = mcam->entry2cntr_map[old_entry]; 2987 if (cntr != NPC_MCAM_INVALID_MAP) { 2988 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2989 old_entry, cntr); 2990 npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2991 new_entry, cntr); 2992 } 2993 2994 /* Enable new_entry and disable old_entry */ 2995 npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, true); 2996 npc_enable_mcam_entry(rvu, mcam, blkaddr, old_entry, false); 2997 } 2998 2999 /* If shift has failed then report the failed index */ 3000 if (index != req->shift_count) { 3001 rc = NPC_MCAM_PERM_DENIED; 3002 rsp->failed_entry_idx = index; 3003 } 3004 3005 mutex_unlock(&mcam->lock); 3006 return rc; 3007 } 3008 3009 static int __npc_mcam_alloc_counter(struct rvu *rvu, 3010 struct npc_mcam_alloc_counter_req *req, 3011 struct npc_mcam_alloc_counter_rsp *rsp) 3012 { 3013 struct npc_mcam *mcam = &rvu->hw->mcam; 3014 u16 pcifunc = req->hdr.pcifunc; 3015 u16 max_contig, cntr; 3016 int blkaddr, index; 3017 3018 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3019 if (blkaddr < 0) 3020 return NPC_MCAM_INVALID_REQ; 3021 3022 /* If the request is from a PFFUNC with no NIXLF attached, ignore */ 3023 if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc)) 3024 return NPC_MCAM_INVALID_REQ; 3025 3026 /* Since list of allocated counter IDs needs to be sent to requester, 3027 * max number of non-contiguous counters per mbox msg is limited. 3028 */ 3029 if (!req->contig && req->count > NPC_MAX_NONCONTIG_COUNTERS) 3030 return NPC_MCAM_INVALID_REQ; 3031 3032 3033 /* Check if unused counters are available or not */ 3034 if (!rvu_rsrc_free_count(&mcam->counters)) { 3035 return NPC_MCAM_ALLOC_FAILED; 3036 } 3037 3038 rsp->count = 0; 3039 3040 if (req->contig) { 3041 /* Allocate requested number of contiguous counters, if 3042 * unsuccessful find max contiguous entries available. 3043 */ 3044 index = npc_mcam_find_zero_area(mcam->counters.bmap, 3045 mcam->counters.max, 0, 3046 req->count, &max_contig); 3047 rsp->count = max_contig; 3048 rsp->cntr = index; 3049 for (cntr = index; cntr < (index + max_contig); cntr++) { 3050 __set_bit(cntr, mcam->counters.bmap); 3051 mcam->cntr2pfvf_map[cntr] = pcifunc; 3052 } 3053 } else { 3054 /* Allocate requested number of non-contiguous counters, 3055 * if unsuccessful allocate as many as possible. 3056 */ 3057 for (cntr = 0; cntr < req->count; cntr++) { 3058 index = rvu_alloc_rsrc(&mcam->counters); 3059 if (index < 0) 3060 break; 3061 rsp->cntr_list[cntr] = index; 3062 rsp->count++; 3063 mcam->cntr2pfvf_map[index] = pcifunc; 3064 } 3065 } 3066 3067 return 0; 3068 } 3069 3070 int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu, 3071 struct npc_mcam_alloc_counter_req *req, 3072 struct npc_mcam_alloc_counter_rsp *rsp) 3073 { 3074 struct npc_mcam *mcam = &rvu->hw->mcam; 3075 int err; 3076 3077 mutex_lock(&mcam->lock); 3078 3079 err = __npc_mcam_alloc_counter(rvu, req, rsp); 3080 3081 mutex_unlock(&mcam->lock); 3082 return err; 3083 } 3084 3085 static int __npc_mcam_free_counter(struct rvu *rvu, 3086 struct npc_mcam_oper_counter_req *req, 3087 struct msg_rsp *rsp) 3088 { 3089 struct npc_mcam *mcam = &rvu->hw->mcam; 3090 u16 index, entry = 0; 3091 int blkaddr, err; 3092 3093 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3094 if (blkaddr < 0) 3095 return NPC_MCAM_INVALID_REQ; 3096 3097 err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); 3098 if (err) { 3099 return err; 3100 } 3101 3102 /* Mark counter as free/unused */ 3103 mcam->cntr2pfvf_map[req->cntr] = NPC_MCAM_INVALID_MAP; 3104 rvu_free_rsrc(&mcam->counters, req->cntr); 3105 3106 /* Disable all MCAM entry's stats which are using this counter */ 3107 while (entry < mcam->bmap_entries) { 3108 if (!mcam->cntr_refcnt[req->cntr]) 3109 break; 3110 3111 index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry); 3112 if (index >= mcam->bmap_entries) 3113 break; 3114 entry = index + 1; 3115 if (mcam->entry2cntr_map[index] != req->cntr) 3116 continue; 3117 3118 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 3119 index, req->cntr); 3120 } 3121 3122 return 0; 3123 } 3124 3125 int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu, 3126 struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp) 3127 { 3128 struct npc_mcam *mcam = &rvu->hw->mcam; 3129 int err; 3130 3131 mutex_lock(&mcam->lock); 3132 3133 err = __npc_mcam_free_counter(rvu, req, rsp); 3134 3135 mutex_unlock(&mcam->lock); 3136 3137 return err; 3138 } 3139 3140 void __rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc, 3141 struct rvu_npc_mcam_rule *rule) 3142 { 3143 struct npc_mcam_oper_counter_req free_req = { 0 }; 3144 struct msg_rsp free_rsp; 3145 3146 if (!rule->has_cntr) 3147 return; 3148 3149 free_req.hdr.pcifunc = pcifunc; 3150 free_req.cntr = rule->cntr; 3151 3152 __npc_mcam_free_counter(rvu, &free_req, &free_rsp); 3153 rule->has_cntr = false; 3154 } 3155 3156 void __rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc, 3157 struct rvu_npc_mcam_rule *rule, 3158 struct npc_install_flow_rsp *rsp) 3159 { 3160 struct npc_mcam_alloc_counter_req cntr_req = { 0 }; 3161 struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 }; 3162 int err; 3163 3164 cntr_req.hdr.pcifunc = pcifunc; 3165 cntr_req.contig = true; 3166 cntr_req.count = 1; 3167 3168 /* we try to allocate a counter to track the stats of this 3169 * rule. If counter could not be allocated then proceed 3170 * without counter because counters are limited than entries. 3171 */ 3172 err = __npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp); 3173 if (!err && cntr_rsp.count) { 3174 rule->cntr = cntr_rsp.cntr; 3175 rule->has_cntr = true; 3176 rsp->counter = rule->cntr; 3177 } else { 3178 rsp->counter = err; 3179 } 3180 } 3181 3182 int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu, 3183 struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp) 3184 { 3185 struct npc_mcam *mcam = &rvu->hw->mcam; 3186 u16 index, entry = 0; 3187 int blkaddr, rc; 3188 3189 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3190 if (blkaddr < 0) 3191 return NPC_MCAM_INVALID_REQ; 3192 3193 mutex_lock(&mcam->lock); 3194 rc = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); 3195 if (rc) 3196 goto exit; 3197 3198 /* Unmap the MCAM entry and counter */ 3199 if (!req->all) { 3200 rc = npc_mcam_verify_entry(mcam, req->hdr.pcifunc, req->entry); 3201 if (rc) 3202 goto exit; 3203 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 3204 req->entry, req->cntr); 3205 goto exit; 3206 } 3207 3208 /* Disable all MCAM entry's stats which are using this counter */ 3209 while (entry < mcam->bmap_entries) { 3210 if (!mcam->cntr_refcnt[req->cntr]) 3211 break; 3212 3213 index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry); 3214 if (index >= mcam->bmap_entries) 3215 break; 3216 entry = index + 1; 3217 3218 if (mcam->entry2cntr_map[index] != req->cntr) 3219 continue; 3220 3221 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 3222 index, req->cntr); 3223 } 3224 exit: 3225 mutex_unlock(&mcam->lock); 3226 return rc; 3227 } 3228 3229 int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu, 3230 struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp) 3231 { 3232 struct npc_mcam *mcam = &rvu->hw->mcam; 3233 int blkaddr, err; 3234 3235 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3236 if (blkaddr < 0) 3237 return NPC_MCAM_INVALID_REQ; 3238 3239 mutex_lock(&mcam->lock); 3240 err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); 3241 mutex_unlock(&mcam->lock); 3242 if (err) 3243 return err; 3244 3245 rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr), 0x00); 3246 3247 return 0; 3248 } 3249 3250 int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu, 3251 struct npc_mcam_oper_counter_req *req, 3252 struct npc_mcam_oper_counter_rsp *rsp) 3253 { 3254 struct npc_mcam *mcam = &rvu->hw->mcam; 3255 int blkaddr, err; 3256 3257 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3258 if (blkaddr < 0) 3259 return NPC_MCAM_INVALID_REQ; 3260 3261 mutex_lock(&mcam->lock); 3262 err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); 3263 mutex_unlock(&mcam->lock); 3264 if (err) 3265 return err; 3266 3267 rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr)); 3268 rsp->stat &= BIT_ULL(48) - 1; 3269 3270 return 0; 3271 } 3272 3273 int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, 3274 struct npc_mcam_alloc_and_write_entry_req *req, 3275 struct npc_mcam_alloc_and_write_entry_rsp *rsp) 3276 { 3277 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 3278 struct npc_mcam_alloc_counter_req cntr_req; 3279 struct npc_mcam_alloc_counter_rsp cntr_rsp; 3280 struct npc_mcam_alloc_entry_req entry_req; 3281 struct npc_mcam_alloc_entry_rsp entry_rsp; 3282 struct npc_mcam *mcam = &rvu->hw->mcam; 3283 u16 entry = NPC_MCAM_ENTRY_INVALID; 3284 u16 cntr = NPC_MCAM_ENTRY_INVALID; 3285 int blkaddr, rc; 3286 u8 nix_intf; 3287 3288 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3289 if (blkaddr < 0) 3290 return NPC_MCAM_INVALID_REQ; 3291 3292 if (!is_npc_interface_valid(rvu, req->intf)) 3293 return NPC_MCAM_INVALID_REQ; 3294 3295 /* Try to allocate a MCAM entry */ 3296 entry_req.hdr.pcifunc = req->hdr.pcifunc; 3297 entry_req.contig = true; 3298 entry_req.priority = req->priority; 3299 entry_req.ref_entry = req->ref_entry; 3300 entry_req.count = 1; 3301 3302 rc = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, 3303 &entry_req, &entry_rsp); 3304 if (rc) 3305 return rc; 3306 3307 if (!entry_rsp.count) 3308 return NPC_MCAM_ALLOC_FAILED; 3309 3310 entry = entry_rsp.entry; 3311 3312 if (!req->alloc_cntr) 3313 goto write_entry; 3314 3315 /* Now allocate counter */ 3316 cntr_req.hdr.pcifunc = req->hdr.pcifunc; 3317 cntr_req.contig = true; 3318 cntr_req.count = 1; 3319 3320 rc = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp); 3321 if (rc) { 3322 /* Free allocated MCAM entry */ 3323 mutex_lock(&mcam->lock); 3324 mcam->entry2pfvf_map[entry] = NPC_MCAM_INVALID_MAP; 3325 npc_mcam_clear_bit(mcam, entry); 3326 mutex_unlock(&mcam->lock); 3327 return rc; 3328 } 3329 3330 cntr = cntr_rsp.cntr; 3331 3332 write_entry: 3333 mutex_lock(&mcam->lock); 3334 3335 if (is_npc_intf_tx(req->intf)) 3336 nix_intf = pfvf->nix_tx_intf; 3337 else 3338 nix_intf = pfvf->nix_rx_intf; 3339 3340 npc_config_mcam_entry(rvu, mcam, blkaddr, entry, nix_intf, 3341 &req->entry_data, req->enable_entry); 3342 3343 if (req->alloc_cntr) 3344 npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, entry, cntr); 3345 mutex_unlock(&mcam->lock); 3346 3347 rsp->entry = entry; 3348 rsp->cntr = cntr; 3349 3350 return 0; 3351 } 3352 3353 #define GET_KEX_CFG(intf) \ 3354 rvu_read64(rvu, BLKADDR_NPC, NPC_AF_INTFX_KEX_CFG(intf)) 3355 3356 #define GET_KEX_FLAGS(ld) \ 3357 rvu_read64(rvu, BLKADDR_NPC, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld)) 3358 3359 #define GET_KEX_LD(intf, lid, lt, ld) \ 3360 rvu_read64(rvu, BLKADDR_NPC, \ 3361 NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld)) 3362 3363 #define GET_KEX_LDFLAGS(intf, ld, fl) \ 3364 rvu_read64(rvu, BLKADDR_NPC, \ 3365 NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, fl)) 3366 3367 int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req, 3368 struct npc_get_kex_cfg_rsp *rsp) 3369 { 3370 int lid, lt, ld, fl; 3371 3372 rsp->rx_keyx_cfg = GET_KEX_CFG(NIX_INTF_RX); 3373 rsp->tx_keyx_cfg = GET_KEX_CFG(NIX_INTF_TX); 3374 for (lid = 0; lid < NPC_MAX_LID; lid++) { 3375 for (lt = 0; lt < NPC_MAX_LT; lt++) { 3376 for (ld = 0; ld < NPC_MAX_LD; ld++) { 3377 rsp->intf_lid_lt_ld[NIX_INTF_RX][lid][lt][ld] = 3378 GET_KEX_LD(NIX_INTF_RX, lid, lt, ld); 3379 rsp->intf_lid_lt_ld[NIX_INTF_TX][lid][lt][ld] = 3380 GET_KEX_LD(NIX_INTF_TX, lid, lt, ld); 3381 } 3382 } 3383 } 3384 for (ld = 0; ld < NPC_MAX_LD; ld++) 3385 rsp->kex_ld_flags[ld] = GET_KEX_FLAGS(ld); 3386 3387 for (ld = 0; ld < NPC_MAX_LD; ld++) { 3388 for (fl = 0; fl < NPC_MAX_LFL; fl++) { 3389 rsp->intf_ld_flags[NIX_INTF_RX][ld][fl] = 3390 GET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl); 3391 rsp->intf_ld_flags[NIX_INTF_TX][ld][fl] = 3392 GET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl); 3393 } 3394 } 3395 memcpy(rsp->mkex_pfl_name, rvu->mkex_pfl_name, MKEX_NAME_LEN); 3396 return 0; 3397 } 3398 3399 static int 3400 npc_set_var_len_offset_pkind(struct rvu *rvu, u16 pcifunc, u64 pkind, 3401 u8 var_len_off, u8 var_len_off_mask, u8 shift_dir) 3402 { 3403 struct npc_kpu_action0 *act0; 3404 u8 shift_count = 0; 3405 int blkaddr; 3406 u64 val; 3407 3408 if (!var_len_off_mask) 3409 return -EINVAL; 3410 3411 if (var_len_off_mask != 0xff) { 3412 if (shift_dir) 3413 shift_count = __ffs(var_len_off_mask); 3414 else 3415 shift_count = (8 - __fls(var_len_off_mask)); 3416 } 3417 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc); 3418 if (blkaddr < 0) { 3419 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 3420 return -EINVAL; 3421 } 3422 val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind)); 3423 act0 = (struct npc_kpu_action0 *)&val; 3424 act0->var_len_shift = shift_count; 3425 act0->var_len_right = shift_dir; 3426 act0->var_len_mask = var_len_off_mask; 3427 act0->var_len_offset = var_len_off; 3428 rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val); 3429 return 0; 3430 } 3431 3432 int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir, 3433 u64 pkind, u8 var_len_off, u8 var_len_off_mask, 3434 u8 shift_dir) 3435 3436 { 3437 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 3438 int blkaddr, nixlf, rc, intf_mode; 3439 int pf = rvu_get_pf(rvu->pdev, pcifunc); 3440 u64 rxpkind, txpkind; 3441 u8 cgx_id, lmac_id; 3442 3443 /* use default pkind to disable edsa/higig */ 3444 rxpkind = rvu_npc_get_pkind(rvu, pf); 3445 txpkind = NPC_TX_DEF_PKIND; 3446 intf_mode = NPC_INTF_MODE_DEF; 3447 3448 if (mode & OTX2_PRIV_FLAGS_CUSTOM) { 3449 if (pkind == NPC_RX_CUSTOM_PRE_L2_PKIND) { 3450 rc = npc_set_var_len_offset_pkind(rvu, pcifunc, pkind, 3451 var_len_off, 3452 var_len_off_mask, 3453 shift_dir); 3454 if (rc) 3455 return rc; 3456 } 3457 rxpkind = pkind; 3458 txpkind = pkind; 3459 } 3460 3461 if (dir & PKIND_RX) { 3462 /* rx pkind set req valid only for cgx mapped PFs */ 3463 if (!is_cgx_config_permitted(rvu, pcifunc)) 3464 return 0; 3465 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 3466 3467 rc = cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, 3468 rxpkind); 3469 if (rc) 3470 return rc; 3471 } 3472 3473 if (dir & PKIND_TX) { 3474 /* Tx pkind set request valid if PCIFUNC has NIXLF attached */ 3475 rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3476 if (rc) 3477 return rc; 3478 3479 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), 3480 txpkind); 3481 } 3482 3483 pfvf->intf_mode = intf_mode; 3484 return 0; 3485 } 3486 3487 int rvu_mbox_handler_npc_set_pkind(struct rvu *rvu, struct npc_set_pkind *req, 3488 struct msg_rsp *rsp) 3489 { 3490 return rvu_npc_set_parse_mode(rvu, req->hdr.pcifunc, req->mode, 3491 req->dir, req->pkind, req->var_len_off, 3492 req->var_len_off_mask, req->shift_dir); 3493 } 3494 3495 int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu, 3496 struct msg_req *req, 3497 struct npc_mcam_read_base_rule_rsp *rsp) 3498 { 3499 struct npc_mcam *mcam = &rvu->hw->mcam; 3500 int index, blkaddr, nixlf, rc = 0; 3501 u16 pcifunc = req->hdr.pcifunc; 3502 struct rvu_pfvf *pfvf; 3503 u8 intf, enable; 3504 3505 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3506 if (blkaddr < 0) 3507 return NPC_MCAM_INVALID_REQ; 3508 3509 /* Return the channel number in case of PF */ 3510 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) { 3511 pfvf = rvu_get_pfvf(rvu, pcifunc); 3512 rsp->entry.kw[0] = pfvf->rx_chan_base; 3513 rsp->entry.kw_mask[0] = 0xFFFULL; 3514 goto out; 3515 } 3516 3517 /* Find the pkt steering rule installed by PF to this VF */ 3518 mutex_lock(&mcam->lock); 3519 for (index = 0; index < mcam->bmap_entries; index++) { 3520 if (mcam->entry2target_pffunc[index] == pcifunc) 3521 goto read_entry; 3522 } 3523 3524 rc = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 3525 if (rc < 0) { 3526 mutex_unlock(&mcam->lock); 3527 goto out; 3528 } 3529 /* Read the default ucast entry if there is no pkt steering rule */ 3530 index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, 3531 NIXLF_UCAST_ENTRY); 3532 read_entry: 3533 /* Read the mcam entry */ 3534 npc_read_mcam_entry(rvu, mcam, blkaddr, index, &rsp->entry, &intf, 3535 &enable); 3536 mutex_unlock(&mcam->lock); 3537 out: 3538 return rc; 3539 } 3540 3541 int rvu_mbox_handler_npc_mcam_entry_stats(struct rvu *rvu, 3542 struct npc_mcam_get_stats_req *req, 3543 struct npc_mcam_get_stats_rsp *rsp) 3544 { 3545 struct npc_mcam *mcam = &rvu->hw->mcam; 3546 u16 index, cntr; 3547 int blkaddr; 3548 u64 regval; 3549 u32 bank; 3550 3551 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3552 if (blkaddr < 0) 3553 return NPC_MCAM_INVALID_REQ; 3554 3555 mutex_lock(&mcam->lock); 3556 3557 index = req->entry & (mcam->banksize - 1); 3558 bank = npc_get_bank(mcam, req->entry); 3559 3560 /* read MCAM entry STAT_ACT register */ 3561 regval = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank)); 3562 3563 if (!(regval & rvu->hw->npc_stat_ena)) { 3564 rsp->stat_ena = 0; 3565 mutex_unlock(&mcam->lock); 3566 return 0; 3567 } 3568 3569 cntr = regval & 0x1FF; 3570 3571 rsp->stat_ena = 1; 3572 rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(cntr)); 3573 rsp->stat &= BIT_ULL(48) - 1; 3574 3575 mutex_unlock(&mcam->lock); 3576 3577 return 0; 3578 } 3579 3580 void rvu_npc_clear_ucast_entry(struct rvu *rvu, int pcifunc, int nixlf) 3581 { 3582 struct npc_mcam *mcam = &rvu->hw->mcam; 3583 struct rvu_npc_mcam_rule *rule; 3584 int ucast_idx, blkaddr; 3585 3586 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3587 if (blkaddr < 0) 3588 return; 3589 3590 ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc, 3591 nixlf, NIXLF_UCAST_ENTRY); 3592 3593 npc_enable_mcam_entry(rvu, mcam, blkaddr, ucast_idx, false); 3594 3595 npc_set_mcam_action(rvu, mcam, blkaddr, ucast_idx, 0); 3596 3597 npc_clear_mcam_entry(rvu, mcam, blkaddr, ucast_idx); 3598 3599 mutex_lock(&mcam->lock); 3600 list_for_each_entry(rule, &mcam->mcam_rules, list) { 3601 if (rule->entry == ucast_idx) { 3602 list_del(&rule->list); 3603 kfree(rule); 3604 break; 3605 } 3606 } 3607 mutex_unlock(&mcam->lock); 3608 } 3609