1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Admin Function driver 3 * 4 * Copyright (C) 2022 Marvell. 5 * 6 */ 7 8 #include <linux/bitfield.h> 9 #include <linux/module.h> 10 #include <linux/pci.h> 11 #include <linux/firmware.h> 12 #include <linux/stddef.h> 13 #include <linux/debugfs.h> 14 15 #include "rvu_struct.h" 16 #include "rvu_reg.h" 17 #include "rvu.h" 18 #include "npc.h" 19 #include "cgx.h" 20 #include "rvu_npc_fs.h" 21 #include "rvu_npc_hash.h" 22 23 static u64 rvu_npc_wide_extract(const u64 input[], size_t start_bit, 24 size_t width_bits) 25 { 26 const u64 mask = ~(u64)((~(__uint128_t)0) << width_bits); 27 const size_t msb = start_bit + width_bits - 1; 28 const size_t lword = start_bit >> 6; 29 const size_t uword = msb >> 6; 30 size_t lbits; 31 u64 hi, lo; 32 33 if (lword == uword) 34 return (input[lword] >> (start_bit & 63)) & mask; 35 36 lbits = 64 - (start_bit & 63); 37 hi = input[uword]; 38 lo = (input[lword] >> (start_bit & 63)); 39 return ((hi << lbits) | lo) & mask; 40 } 41 42 static void rvu_npc_lshift_key(u64 *key, size_t key_bit_len) 43 { 44 u64 prev_orig_word = 0; 45 u64 cur_orig_word = 0; 46 size_t extra = key_bit_len % 64; 47 size_t max_idx = key_bit_len / 64; 48 size_t i; 49 50 if (extra) 51 max_idx++; 52 53 for (i = 0; i < max_idx; i++) { 54 cur_orig_word = key[i]; 55 key[i] = key[i] << 1; 56 key[i] |= ((prev_orig_word >> 63) & 0x1); 57 prev_orig_word = cur_orig_word; 58 } 59 } 60 61 static u32 rvu_npc_toeplitz_hash(const u64 *data, u64 *key, size_t data_bit_len, 62 size_t key_bit_len) 63 { 64 u32 hash_out = 0; 65 u64 temp_data = 0; 66 int i; 67 68 for (i = data_bit_len - 1; i >= 0; i--) { 69 temp_data = (data[i / 64]); 70 temp_data = temp_data >> (i % 64); 71 temp_data &= 0x1; 72 if (temp_data) 73 hash_out ^= (u32)(rvu_npc_wide_extract(key, key_bit_len - 32, 32)); 74 75 rvu_npc_lshift_key(key, key_bit_len); 76 } 77 78 return hash_out; 79 } 80 81 u32 npc_field_hash_calc(u64 *ldata, struct npc_get_field_hash_info_rsp rsp, 82 u8 intf, u8 hash_idx) 83 { 84 u64 hash_key[3]; 85 u64 data_padded[2]; 86 u32 field_hash; 87 88 hash_key[0] = rsp.secret_key[1] << 31; 89 hash_key[0] |= rsp.secret_key[2]; 90 hash_key[1] = rsp.secret_key[1] >> 33; 91 hash_key[1] |= rsp.secret_key[0] << 31; 92 hash_key[2] = rsp.secret_key[0] >> 33; 93 94 data_padded[0] = rsp.hash_mask[intf][hash_idx][0] & ldata[0]; 95 data_padded[1] = rsp.hash_mask[intf][hash_idx][1] & ldata[1]; 96 field_hash = rvu_npc_toeplitz_hash(data_padded, hash_key, 128, 159); 97 98 field_hash &= FIELD_GET(GENMASK(63, 32), rsp.hash_ctrl[intf][hash_idx]); 99 field_hash += FIELD_GET(GENMASK(31, 0), rsp.hash_ctrl[intf][hash_idx]); 100 return field_hash; 101 } 102 103 static u64 npc_update_use_hash(struct rvu *rvu, int blkaddr, 104 u8 intf, int lid, int lt, int ld) 105 { 106 u8 hdr, key; 107 u64 cfg; 108 109 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld)); 110 hdr = FIELD_GET(NPC_HDR_OFFSET, cfg); 111 key = FIELD_GET(NPC_KEY_OFFSET, cfg); 112 113 /* Update use_hash(bit-20) to 'true' and 114 * bytesm1(bit-16:19) to '0x3' in KEX_LD_CFG 115 */ 116 cfg = KEX_LD_CFG_USE_HASH(0x1, 0x03, 117 hdr, 0x1, 0x0, key); 118 119 return cfg; 120 } 121 122 static void npc_program_mkex_hash_rx(struct rvu *rvu, int blkaddr, 123 u8 intf) 124 { 125 struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash; 126 int lid, lt, ld, hash_cnt = 0; 127 128 if (is_npc_intf_tx(intf)) 129 return; 130 131 /* Program HASH_CFG */ 132 for (lid = 0; lid < NPC_MAX_LID; lid++) { 133 for (lt = 0; lt < NPC_MAX_LT; lt++) { 134 for (ld = 0; ld < NPC_MAX_LD; ld++) { 135 if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) { 136 u64 cfg; 137 138 if (hash_cnt == NPC_MAX_HASH) 139 return; 140 141 cfg = npc_update_use_hash(rvu, blkaddr, 142 intf, lid, lt, ld); 143 /* Set updated KEX configuration */ 144 SET_KEX_LD(intf, lid, lt, ld, cfg); 145 /* Set HASH configuration */ 146 SET_KEX_LD_HASH(intf, ld, 147 mkex_hash->hash[intf][ld]); 148 SET_KEX_LD_HASH_MASK(intf, ld, 0, 149 mkex_hash->hash_mask[intf][ld][0]); 150 SET_KEX_LD_HASH_MASK(intf, ld, 1, 151 mkex_hash->hash_mask[intf][ld][1]); 152 SET_KEX_LD_HASH_CTRL(intf, ld, 153 mkex_hash->hash_ctrl[intf][ld]); 154 155 hash_cnt++; 156 } 157 } 158 } 159 } 160 } 161 162 static void npc_program_mkex_hash_tx(struct rvu *rvu, int blkaddr, 163 u8 intf) 164 { 165 struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash; 166 int lid, lt, ld, hash_cnt = 0; 167 168 if (is_npc_intf_rx(intf)) 169 return; 170 171 /* Program HASH_CFG */ 172 for (lid = 0; lid < NPC_MAX_LID; lid++) { 173 for (lt = 0; lt < NPC_MAX_LT; lt++) { 174 for (ld = 0; ld < NPC_MAX_LD; ld++) 175 if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) { 176 u64 cfg; 177 178 if (hash_cnt == NPC_MAX_HASH) 179 return; 180 181 cfg = npc_update_use_hash(rvu, blkaddr, 182 intf, lid, lt, ld); 183 /* Set updated KEX configuration */ 184 SET_KEX_LD(intf, lid, lt, ld, cfg); 185 /* Set HASH configuration */ 186 SET_KEX_LD_HASH(intf, ld, 187 mkex_hash->hash[intf][ld]); 188 SET_KEX_LD_HASH_MASK(intf, ld, 0, 189 mkex_hash->hash_mask[intf][ld][0]); 190 SET_KEX_LD_HASH_MASK(intf, ld, 1, 191 mkex_hash->hash_mask[intf][ld][1]); 192 SET_KEX_LD_HASH_CTRL(intf, ld, 193 mkex_hash->hash_ctrl[intf][ld]); 194 hash_cnt++; 195 } 196 } 197 } 198 } 199 200 void npc_config_secret_key(struct rvu *rvu, int blkaddr) 201 { 202 struct hw_cap *hwcap = &rvu->hw->cap; 203 struct rvu_hwinfo *hw = rvu->hw; 204 u8 intf; 205 206 if (!hwcap->npc_hash_extract) 207 return; 208 209 for (intf = 0; intf < hw->npc_intfs; intf++) { 210 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf), 211 RVU_NPC_HASH_SECRET_KEY0); 212 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf), 213 RVU_NPC_HASH_SECRET_KEY1); 214 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf), 215 RVU_NPC_HASH_SECRET_KEY2); 216 } 217 } 218 219 void npc_program_mkex_hash(struct rvu *rvu, int blkaddr) 220 { 221 struct npc_mcam_kex_hash *mh = rvu->kpu.mkex_hash; 222 struct hw_cap *hwcap = &rvu->hw->cap; 223 u8 intf, ld, hdr_offset, byte_len; 224 struct rvu_hwinfo *hw = rvu->hw; 225 u64 cfg; 226 227 /* Check if hardware supports hash extraction */ 228 if (!hwcap->npc_hash_extract) 229 return; 230 231 /* Check if IPv6 source/destination address 232 * should be hash enabled. 233 * Hashing reduces 128bit SIP/DIP fields to 32bit 234 * so that 224 bit X2 key can be used for IPv6 based filters as well, 235 * which in turn results in more number of MCAM entries available for 236 * use. 237 * 238 * Hashing of IPV6 SIP/DIP is enabled in below scenarios 239 * 1. If the silicon variant supports hashing feature 240 * 2. If the number of bytes of IP addr being extracted is 4 bytes ie 241 * 32bit. The assumption here is that if user wants 8bytes of LSB of 242 * IP addr or full 16 bytes then his intention is not to use 32bit 243 * hash. 244 */ 245 for (intf = 0; intf < hw->npc_intfs; intf++) { 246 for (ld = 0; ld < NPC_MAX_LD; ld++) { 247 cfg = rvu_read64(rvu, blkaddr, 248 NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, 249 NPC_LID_LC, 250 NPC_LT_LC_IP6, 251 ld)); 252 hdr_offset = FIELD_GET(NPC_HDR_OFFSET, cfg); 253 byte_len = FIELD_GET(NPC_BYTESM, cfg); 254 /* Hashing of IPv6 source/destination address should be 255 * enabled if, 256 * hdr_offset == 8 (offset of source IPv6 address) or 257 * hdr_offset == 24 (offset of destination IPv6) 258 * address) and the number of byte to be 259 * extracted is 4. As per hardware configuration 260 * byte_len should be == actual byte_len - 1. 261 * Hence byte_len is checked against 3 but nor 4. 262 */ 263 if ((hdr_offset == 8 || hdr_offset == 24) && byte_len == 3) 264 mh->lid_lt_ld_hash_en[intf][NPC_LID_LC][NPC_LT_LC_IP6][ld] = true; 265 } 266 } 267 268 /* Update hash configuration if the field is hash enabled */ 269 for (intf = 0; intf < hw->npc_intfs; intf++) { 270 npc_program_mkex_hash_rx(rvu, blkaddr, intf); 271 npc_program_mkex_hash_tx(rvu, blkaddr, intf); 272 } 273 } 274 275 void npc_update_field_hash(struct rvu *rvu, u8 intf, 276 struct mcam_entry *entry, 277 int blkaddr, 278 u64 features, 279 struct flow_msg *pkt, 280 struct flow_msg *mask, 281 struct flow_msg *opkt, 282 struct flow_msg *omask) 283 { 284 struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash; 285 struct npc_get_field_hash_info_req req; 286 struct npc_get_field_hash_info_rsp rsp; 287 u64 ldata[2], cfg; 288 u32 field_hash; 289 u8 hash_idx; 290 291 if (!rvu->hw->cap.npc_hash_extract) { 292 dev_dbg(rvu->dev, "%s: Field hash extract feature is not supported\n", __func__); 293 return; 294 } 295 296 req.intf = intf; 297 rvu_mbox_handler_npc_get_field_hash_info(rvu, &req, &rsp); 298 299 for (hash_idx = 0; hash_idx < NPC_MAX_HASH; hash_idx++) { 300 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_CFG(intf, hash_idx)); 301 if ((cfg & BIT_ULL(11)) && (cfg & BIT_ULL(12))) { 302 u8 lid = (cfg & GENMASK_ULL(10, 8)) >> 8; 303 u8 ltype = (cfg & GENMASK_ULL(7, 4)) >> 4; 304 u8 ltype_mask = cfg & GENMASK_ULL(3, 0); 305 306 if (mkex_hash->lid_lt_ld_hash_en[intf][lid][ltype][hash_idx]) { 307 switch (ltype & ltype_mask) { 308 /* If hash extract enabled is supported for IPv6 then 309 * 128 bit IPv6 source and destination addressed 310 * is hashed to 32 bit value. 311 */ 312 case NPC_LT_LC_IP6: 313 /* ld[0] == hash_idx[0] == Source IPv6 314 * ld[1] == hash_idx[1] == Destination IPv6 315 */ 316 if ((features & BIT_ULL(NPC_SIP_IPV6)) && !hash_idx) { 317 u32 src_ip[IPV6_WORDS]; 318 319 be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS); 320 ldata[1] = (u64)src_ip[0] << 32 | src_ip[1]; 321 ldata[0] = (u64)src_ip[2] << 32 | src_ip[3]; 322 field_hash = npc_field_hash_calc(ldata, 323 rsp, 324 intf, 325 hash_idx); 326 npc_update_entry(rvu, NPC_SIP_IPV6, entry, 327 field_hash, 0, 328 GENMASK(31, 0), 0, intf); 329 memcpy(&opkt->ip6src, &pkt->ip6src, 330 sizeof(pkt->ip6src)); 331 memcpy(&omask->ip6src, &mask->ip6src, 332 sizeof(mask->ip6src)); 333 } else if ((features & BIT_ULL(NPC_DIP_IPV6)) && hash_idx) { 334 u32 dst_ip[IPV6_WORDS]; 335 336 be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS); 337 ldata[1] = (u64)dst_ip[0] << 32 | dst_ip[1]; 338 ldata[0] = (u64)dst_ip[2] << 32 | dst_ip[3]; 339 field_hash = npc_field_hash_calc(ldata, 340 rsp, 341 intf, 342 hash_idx); 343 npc_update_entry(rvu, NPC_DIP_IPV6, entry, 344 field_hash, 0, 345 GENMASK(31, 0), 0, intf); 346 memcpy(&opkt->ip6dst, &pkt->ip6dst, 347 sizeof(pkt->ip6dst)); 348 memcpy(&omask->ip6dst, &mask->ip6dst, 349 sizeof(mask->ip6dst)); 350 } 351 352 break; 353 } 354 } 355 } 356 } 357 } 358 359 int rvu_mbox_handler_npc_get_field_hash_info(struct rvu *rvu, 360 struct npc_get_field_hash_info_req *req, 361 struct npc_get_field_hash_info_rsp *rsp) 362 { 363 u64 *secret_key = rsp->secret_key; 364 u8 intf = req->intf; 365 int i, j, blkaddr; 366 367 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 368 if (blkaddr < 0) { 369 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 370 return -EINVAL; 371 } 372 373 secret_key[0] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf)); 374 secret_key[1] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf)); 375 secret_key[2] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf)); 376 377 for (i = 0; i < NPC_MAX_HASH; i++) { 378 for (j = 0; j < NPC_MAX_HASH_MASK; j++) { 379 rsp->hash_mask[NIX_INTF_RX][i][j] = 380 GET_KEX_LD_HASH_MASK(NIX_INTF_RX, i, j); 381 rsp->hash_mask[NIX_INTF_TX][i][j] = 382 GET_KEX_LD_HASH_MASK(NIX_INTF_TX, i, j); 383 } 384 } 385 386 for (i = 0; i < NPC_MAX_INTF; i++) 387 for (j = 0; j < NPC_MAX_HASH; j++) 388 rsp->hash_ctrl[i][j] = GET_KEX_LD_HASH_CTRL(i, j); 389 390 return 0; 391 } 392 393 /** 394 * rvu_npc_exact_mac2u64 - utility function to convert mac address to u64. 395 * @mac_addr: MAC address. 396 * Return: mdata for exact match table. 397 */ 398 static u64 rvu_npc_exact_mac2u64(u8 *mac_addr) 399 { 400 u64 mac = 0; 401 int index; 402 403 for (index = ETH_ALEN - 1; index >= 0; index--) 404 mac |= ((u64)*mac_addr++) << (8 * index); 405 406 return mac; 407 } 408 409 /** 410 * rvu_exact_prepare_mdata - Make mdata for mcam entry 411 * @mac: MAC address 412 * @chan: Channel number. 413 * @ctype: Channel Type. 414 * @mask: LDATA mask. 415 * Return: Meta data 416 */ 417 static u64 rvu_exact_prepare_mdata(u8 *mac, u16 chan, u16 ctype, u64 mask) 418 { 419 u64 ldata = rvu_npc_exact_mac2u64(mac); 420 421 /* Please note that mask is 48bit which excludes chan and ctype. 422 * Increase mask bits if we need to include them as well. 423 */ 424 ldata |= ((u64)chan << 48); 425 ldata |= ((u64)ctype << 60); 426 ldata &= mask; 427 ldata = ldata << 2; 428 429 return ldata; 430 } 431 432 /** 433 * rvu_exact_calculate_hash - calculate hash index to mem table. 434 * @rvu: resource virtualization unit. 435 * @chan: Channel number 436 * @ctype: Channel type. 437 * @mac: MAC address 438 * @mask: HASH mask. 439 * @table_depth: Depth of table. 440 * Return: Hash value 441 */ 442 static u32 rvu_exact_calculate_hash(struct rvu *rvu, u16 chan, u16 ctype, u8 *mac, 443 u64 mask, u32 table_depth) 444 { 445 struct npc_exact_table *table = rvu->hw->table; 446 u64 hash_key[2]; 447 u64 key_in[2]; 448 u64 ldata; 449 u32 hash; 450 451 key_in[0] = RVU_NPC_HASH_SECRET_KEY0; 452 key_in[1] = RVU_NPC_HASH_SECRET_KEY2; 453 454 hash_key[0] = key_in[0] << 31; 455 hash_key[0] |= key_in[1]; 456 hash_key[1] = key_in[0] >> 33; 457 458 ldata = rvu_exact_prepare_mdata(mac, chan, ctype, mask); 459 460 dev_dbg(rvu->dev, "%s: ldata=0x%llx hash_key0=0x%llx hash_key2=0x%llx\n", __func__, 461 ldata, hash_key[1], hash_key[0]); 462 hash = rvu_npc_toeplitz_hash(&ldata, (u64 *)hash_key, 64, 95); 463 464 hash &= table->mem_table.hash_mask; 465 hash += table->mem_table.hash_offset; 466 dev_dbg(rvu->dev, "%s: hash=%x\n", __func__, hash); 467 468 return hash; 469 } 470 471 /** 472 * rvu_npc_exact_alloc_mem_table_entry - find free entry in 4 way table. 473 * @rvu: resource virtualization unit. 474 * @way: Indicate way to table. 475 * @index: Hash index to 4 way table. 476 * @hash: Hash value. 477 * 478 * Searches 4 way table using hash index. Returns 0 on success. 479 * Return: 0 upon success. 480 */ 481 static int rvu_npc_exact_alloc_mem_table_entry(struct rvu *rvu, u8 *way, 482 u32 *index, unsigned int hash) 483 { 484 struct npc_exact_table *table; 485 int depth, i; 486 487 table = rvu->hw->table; 488 depth = table->mem_table.depth; 489 490 /* Check all the 4 ways for a free slot. */ 491 mutex_lock(&table->lock); 492 for (i = 0; i < table->mem_table.ways; i++) { 493 if (test_bit(hash + i * depth, table->mem_table.bmap)) 494 continue; 495 496 set_bit(hash + i * depth, table->mem_table.bmap); 497 mutex_unlock(&table->lock); 498 499 dev_dbg(rvu->dev, "%s: mem table entry alloc success (way=%d index=%d)\n", 500 __func__, i, hash); 501 502 *way = i; 503 *index = hash; 504 return 0; 505 } 506 mutex_unlock(&table->lock); 507 508 dev_dbg(rvu->dev, "%s: No space in 4 way exact way, weight=%u\n", __func__, 509 bitmap_weight(table->mem_table.bmap, table->mem_table.depth)); 510 return -ENOSPC; 511 } 512 513 /** 514 * rvu_npc_exact_free_id - Free seq id from bitmat. 515 * @rvu: Resource virtualization unit. 516 * @seq_id: Sequence identifier to be freed. 517 */ 518 static void rvu_npc_exact_free_id(struct rvu *rvu, u32 seq_id) 519 { 520 struct npc_exact_table *table; 521 522 table = rvu->hw->table; 523 mutex_lock(&table->lock); 524 clear_bit(seq_id, table->id_bmap); 525 mutex_unlock(&table->lock); 526 dev_dbg(rvu->dev, "%s: freed id %d\n", __func__, seq_id); 527 } 528 529 /** 530 * rvu_npc_exact_alloc_id - Alloc seq id from bitmap. 531 * @rvu: Resource virtualization unit. 532 * @seq_id: Sequence identifier. 533 * Return: True or false. 534 */ 535 static bool rvu_npc_exact_alloc_id(struct rvu *rvu, u32 *seq_id) 536 { 537 struct npc_exact_table *table; 538 u32 idx; 539 540 table = rvu->hw->table; 541 542 mutex_lock(&table->lock); 543 idx = find_first_zero_bit(table->id_bmap, table->tot_ids); 544 if (idx == table->tot_ids) { 545 mutex_unlock(&table->lock); 546 dev_err(rvu->dev, "%s: No space in id bitmap (%d)\n", 547 __func__, table->tot_ids); 548 549 return false; 550 } 551 552 /* Mark bit map to indicate that slot is used.*/ 553 set_bit(idx, table->id_bmap); 554 mutex_unlock(&table->lock); 555 556 *seq_id = idx; 557 dev_dbg(rvu->dev, "%s: Allocated id (%d)\n", __func__, *seq_id); 558 559 return true; 560 } 561 562 /** 563 * rvu_npc_exact_alloc_cam_table_entry - find free slot in fully associative table. 564 * @rvu: resource virtualization unit. 565 * @index: Index to exact CAM table. 566 * Return: 0 upon success; else error number. 567 */ 568 static int rvu_npc_exact_alloc_cam_table_entry(struct rvu *rvu, int *index) 569 { 570 struct npc_exact_table *table; 571 u32 idx; 572 573 table = rvu->hw->table; 574 575 mutex_lock(&table->lock); 576 idx = find_first_zero_bit(table->cam_table.bmap, table->cam_table.depth); 577 if (idx == table->cam_table.depth) { 578 mutex_unlock(&table->lock); 579 dev_info(rvu->dev, "%s: No space in exact cam table, weight=%u\n", __func__, 580 bitmap_weight(table->cam_table.bmap, table->cam_table.depth)); 581 return -ENOSPC; 582 } 583 584 /* Mark bit map to indicate that slot is used.*/ 585 set_bit(idx, table->cam_table.bmap); 586 mutex_unlock(&table->lock); 587 588 *index = idx; 589 dev_dbg(rvu->dev, "%s: cam table entry alloc success (index=%d)\n", 590 __func__, idx); 591 return 0; 592 } 593 594 /** 595 * rvu_exact_prepare_table_entry - Data for exact match table entry. 596 * @rvu: Resource virtualization unit. 597 * @enable: Enable/Disable entry 598 * @ctype: Software defined channel type. Currently set as 0. 599 * @chan: Channel number. 600 * @mac_addr: Destination mac address. 601 * Return: mdata for exact match table. 602 */ 603 static u64 rvu_exact_prepare_table_entry(struct rvu *rvu, bool enable, 604 u8 ctype, u16 chan, u8 *mac_addr) 605 606 { 607 u64 ldata = rvu_npc_exact_mac2u64(mac_addr); 608 609 /* Enable or disable */ 610 u64 mdata = FIELD_PREP(GENMASK_ULL(63, 63), enable ? 1 : 0); 611 612 /* Set Ctype */ 613 mdata |= FIELD_PREP(GENMASK_ULL(61, 60), ctype); 614 615 /* Set chan */ 616 mdata |= FIELD_PREP(GENMASK_ULL(59, 48), chan); 617 618 /* MAC address */ 619 mdata |= FIELD_PREP(GENMASK_ULL(47, 0), ldata); 620 621 return mdata; 622 } 623 624 /** 625 * rvu_exact_config_secret_key - Configure secret key. 626 * @rvu: Resource virtualization unit. 627 */ 628 static void rvu_exact_config_secret_key(struct rvu *rvu) 629 { 630 int blkaddr; 631 632 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 633 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET0(NIX_INTF_RX), 634 RVU_NPC_HASH_SECRET_KEY0); 635 636 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET1(NIX_INTF_RX), 637 RVU_NPC_HASH_SECRET_KEY1); 638 639 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET2(NIX_INTF_RX), 640 RVU_NPC_HASH_SECRET_KEY2); 641 } 642 643 /** 644 * rvu_exact_config_search_key - Configure search key 645 * @rvu: Resource virtualization unit. 646 */ 647 static void rvu_exact_config_search_key(struct rvu *rvu) 648 { 649 int blkaddr; 650 u64 reg_val; 651 652 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 653 654 /* HDR offset */ 655 reg_val = FIELD_PREP(GENMASK_ULL(39, 32), 0); 656 657 /* BYTESM1, number of bytes - 1 */ 658 reg_val |= FIELD_PREP(GENMASK_ULL(18, 16), ETH_ALEN - 1); 659 660 /* Enable LID and set LID to NPC_LID_LA */ 661 reg_val |= FIELD_PREP(GENMASK_ULL(11, 11), 1); 662 reg_val |= FIELD_PREP(GENMASK_ULL(10, 8), NPC_LID_LA); 663 664 /* Clear layer type based extraction */ 665 666 /* Disable LT_EN */ 667 reg_val |= FIELD_PREP(GENMASK_ULL(12, 12), 0); 668 669 /* Set LTYPE_MATCH to 0 */ 670 reg_val |= FIELD_PREP(GENMASK_ULL(7, 4), 0); 671 672 /* Set LTYPE_MASK to 0 */ 673 reg_val |= FIELD_PREP(GENMASK_ULL(3, 0), 0); 674 675 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_CFG(NIX_INTF_RX), reg_val); 676 } 677 678 /** 679 * rvu_exact_config_result_ctrl - Set exact table hash control 680 * @rvu: Resource virtualization unit. 681 * @depth: Depth of Exact match table. 682 * 683 * Sets mask and offset for hash for mem table. 684 */ 685 static void rvu_exact_config_result_ctrl(struct rvu *rvu, uint32_t depth) 686 { 687 int blkaddr; 688 u64 reg = 0; 689 690 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 691 692 /* Set mask. Note that depth is a power of 2 */ 693 rvu->hw->table->mem_table.hash_mask = (depth - 1); 694 reg |= FIELD_PREP(GENMASK_ULL(42, 32), (depth - 1)); 695 696 /* Set offset as 0 */ 697 rvu->hw->table->mem_table.hash_offset = 0; 698 reg |= FIELD_PREP(GENMASK_ULL(10, 0), 0); 699 700 /* Set reg for RX */ 701 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_RESULT_CTL(NIX_INTF_RX), reg); 702 /* Store hash mask and offset for s/w algorithm */ 703 } 704 705 /** 706 * rvu_exact_config_table_mask - Set exact table mask. 707 * @rvu: Resource virtualization unit. 708 */ 709 static void rvu_exact_config_table_mask(struct rvu *rvu) 710 { 711 int blkaddr; 712 u64 mask = 0; 713 714 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 715 716 /* Don't use Ctype */ 717 mask |= FIELD_PREP(GENMASK_ULL(61, 60), 0); 718 719 /* Set chan */ 720 mask |= GENMASK_ULL(59, 48); 721 722 /* Full ldata */ 723 mask |= GENMASK_ULL(47, 0); 724 725 /* Store mask for s/w hash calcualtion */ 726 rvu->hw->table->mem_table.mask = mask; 727 728 /* Set mask for RX.*/ 729 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_MASK(NIX_INTF_RX), mask); 730 } 731 732 /** 733 * rvu_npc_exact_get_max_entries - Get total number of entries in table. 734 * @rvu: resource virtualization unit. 735 * Return: Maximum table entries possible. 736 */ 737 u32 rvu_npc_exact_get_max_entries(struct rvu *rvu) 738 { 739 struct npc_exact_table *table; 740 741 table = rvu->hw->table; 742 return table->tot_ids; 743 } 744 745 /** 746 * rvu_npc_exact_has_match_table - Checks support for exact match. 747 * @rvu: resource virtualization unit. 748 * Return: True if exact match table is supported/enabled. 749 */ 750 bool rvu_npc_exact_has_match_table(struct rvu *rvu) 751 { 752 return rvu->hw->cap.npc_exact_match_enabled; 753 } 754 755 /** 756 * __rvu_npc_exact_find_entry_by_seq_id - find entry by id 757 * @rvu: resource virtualization unit. 758 * @seq_id: Sequence identifier. 759 * 760 * Caller should acquire the lock. 761 * Return: Pointer to table entry. 762 */ 763 static struct npc_exact_table_entry * 764 __rvu_npc_exact_find_entry_by_seq_id(struct rvu *rvu, u32 seq_id) 765 { 766 struct npc_exact_table *table = rvu->hw->table; 767 struct npc_exact_table_entry *entry = NULL; 768 struct list_head *lhead; 769 770 lhead = &table->lhead_gbl; 771 772 /* traverse to find the matching entry */ 773 list_for_each_entry(entry, lhead, glist) { 774 if (entry->seq_id != seq_id) 775 continue; 776 777 return entry; 778 } 779 780 return NULL; 781 } 782 783 /** 784 * rvu_npc_exact_add_to_list - Add entry to list 785 * @rvu: resource virtualization unit. 786 * @opc_type: OPCODE to select MEM/CAM table. 787 * @ways: MEM table ways. 788 * @index: Index in MEM/CAM table. 789 * @cgx_id: CGX identifier. 790 * @lmac_id: LMAC identifier. 791 * @mac_addr: MAC address. 792 * @chan: Channel number. 793 * @ctype: Channel Type. 794 * @seq_id: Sequence identifier 795 * @cmd: True if function is called by ethtool cmd 796 * @mcam_idx: NPC mcam index of DMAC entry in NPC mcam. 797 * @pcifunc: pci function 798 * Return: 0 upon success. 799 */ 800 static int rvu_npc_exact_add_to_list(struct rvu *rvu, enum npc_exact_opc_type opc_type, u8 ways, 801 u32 index, u8 cgx_id, u8 lmac_id, u8 *mac_addr, u16 chan, 802 u8 ctype, u32 *seq_id, bool cmd, u32 mcam_idx, u16 pcifunc) 803 { 804 struct npc_exact_table_entry *entry, *tmp, *iter; 805 struct npc_exact_table *table = rvu->hw->table; 806 struct list_head *lhead, *pprev; 807 808 WARN_ON(ways >= NPC_EXACT_TBL_MAX_WAYS); 809 810 if (!rvu_npc_exact_alloc_id(rvu, seq_id)) { 811 dev_err(rvu->dev, "%s: Generate seq id failed\n", __func__); 812 return -EFAULT; 813 } 814 815 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 816 if (!entry) { 817 rvu_npc_exact_free_id(rvu, *seq_id); 818 dev_err(rvu->dev, "%s: Memory allocation failed\n", __func__); 819 return -ENOMEM; 820 } 821 822 mutex_lock(&table->lock); 823 switch (opc_type) { 824 case NPC_EXACT_OPC_CAM: 825 lhead = &table->lhead_cam_tbl_entry; 826 table->cam_tbl_entry_cnt++; 827 break; 828 829 case NPC_EXACT_OPC_MEM: 830 lhead = &table->lhead_mem_tbl_entry[ways]; 831 table->mem_tbl_entry_cnt++; 832 break; 833 834 default: 835 mutex_unlock(&table->lock); 836 kfree(entry); 837 rvu_npc_exact_free_id(rvu, *seq_id); 838 839 dev_err(rvu->dev, "%s: Unknown opc type%d\n", __func__, opc_type); 840 return -EINVAL; 841 } 842 843 /* Add to global list */ 844 INIT_LIST_HEAD(&entry->glist); 845 list_add_tail(&entry->glist, &table->lhead_gbl); 846 INIT_LIST_HEAD(&entry->list); 847 entry->index = index; 848 entry->ways = ways; 849 entry->opc_type = opc_type; 850 851 entry->pcifunc = pcifunc; 852 853 ether_addr_copy(entry->mac, mac_addr); 854 entry->chan = chan; 855 entry->ctype = ctype; 856 entry->cgx_id = cgx_id; 857 entry->lmac_id = lmac_id; 858 859 entry->seq_id = *seq_id; 860 861 entry->mcam_idx = mcam_idx; 862 entry->cmd = cmd; 863 864 pprev = lhead; 865 866 /* Insert entry in ascending order of index */ 867 list_for_each_entry_safe(iter, tmp, lhead, list) { 868 if (index < iter->index) 869 break; 870 871 pprev = &iter->list; 872 } 873 874 /* Add to each table list */ 875 list_add(&entry->list, pprev); 876 mutex_unlock(&table->lock); 877 return 0; 878 } 879 880 /** 881 * rvu_npc_exact_mem_table_write - Wrapper for register write 882 * @rvu: resource virtualization unit. 883 * @blkaddr: Block address 884 * @ways: ways for MEM table. 885 * @index: Index in MEM 886 * @mdata: Meta data to be written to register. 887 */ 888 static void rvu_npc_exact_mem_table_write(struct rvu *rvu, int blkaddr, u8 ways, 889 u32 index, u64 mdata) 890 { 891 rvu_write64(rvu, blkaddr, NPC_AF_EXACT_MEM_ENTRY(ways, index), mdata); 892 } 893 894 /** 895 * rvu_npc_exact_cam_table_write - Wrapper for register write 896 * @rvu: resource virtualization unit. 897 * @blkaddr: Block address 898 * @index: Index in MEM 899 * @mdata: Meta data to be written to register. 900 */ 901 static void rvu_npc_exact_cam_table_write(struct rvu *rvu, int blkaddr, 902 u32 index, u64 mdata) 903 { 904 rvu_write64(rvu, blkaddr, NPC_AF_EXACT_CAM_ENTRY(index), mdata); 905 } 906 907 /** 908 * rvu_npc_exact_dealloc_table_entry - dealloc table entry 909 * @rvu: resource virtualization unit. 910 * @opc_type: OPCODE for selection of table(MEM or CAM) 911 * @ways: ways if opc_type is MEM table. 912 * @index: Index of MEM or CAM table. 913 * Return: 0 upon success. 914 */ 915 static int rvu_npc_exact_dealloc_table_entry(struct rvu *rvu, enum npc_exact_opc_type opc_type, 916 u8 ways, u32 index) 917 { 918 int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 919 struct npc_exact_table *table; 920 u8 null_dmac[6] = { 0 }; 921 int depth; 922 923 /* Prepare entry with all fields set to zero */ 924 u64 null_mdata = rvu_exact_prepare_table_entry(rvu, false, 0, 0, null_dmac); 925 926 table = rvu->hw->table; 927 depth = table->mem_table.depth; 928 929 mutex_lock(&table->lock); 930 931 switch (opc_type) { 932 case NPC_EXACT_OPC_CAM: 933 934 /* Check whether entry is used already */ 935 if (!test_bit(index, table->cam_table.bmap)) { 936 mutex_unlock(&table->lock); 937 dev_err(rvu->dev, "%s: Trying to free an unused entry ways=%d index=%d\n", 938 __func__, ways, index); 939 return -EINVAL; 940 } 941 942 rvu_npc_exact_cam_table_write(rvu, blkaddr, index, null_mdata); 943 clear_bit(index, table->cam_table.bmap); 944 break; 945 946 case NPC_EXACT_OPC_MEM: 947 948 /* Check whether entry is used already */ 949 if (!test_bit(index + ways * depth, table->mem_table.bmap)) { 950 mutex_unlock(&table->lock); 951 dev_err(rvu->dev, "%s: Trying to free an unused entry index=%d\n", 952 __func__, index); 953 return -EINVAL; 954 } 955 956 rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, null_mdata); 957 clear_bit(index + ways * depth, table->mem_table.bmap); 958 break; 959 960 default: 961 mutex_unlock(&table->lock); 962 dev_err(rvu->dev, "%s: invalid opc type %d", __func__, opc_type); 963 return -ENOSPC; 964 } 965 966 mutex_unlock(&table->lock); 967 968 dev_dbg(rvu->dev, "%s: Successfully deleted entry (index=%d, ways=%d opc_type=%d\n", 969 __func__, index, ways, opc_type); 970 971 return 0; 972 } 973 974 /** 975 * rvu_npc_exact_alloc_table_entry - Allociate an entry 976 * @rvu: resource virtualization unit. 977 * @mac: MAC address. 978 * @chan: Channel number. 979 * @ctype: Channel Type. 980 * @index: Index of MEM table or CAM table. 981 * @ways: Ways. Only valid for MEM table. 982 * @opc_type: OPCODE to select table (MEM or CAM) 983 * 984 * Try allocating a slot from MEM table. If all 4 ways 985 * slot are full for a hash index, check availability in 986 * 32-entry CAM table for allocation. 987 * Return: 0 upon success. 988 */ 989 static int rvu_npc_exact_alloc_table_entry(struct rvu *rvu, char *mac, u16 chan, u8 ctype, 990 u32 *index, u8 *ways, enum npc_exact_opc_type *opc_type) 991 { 992 struct npc_exact_table *table; 993 unsigned int hash; 994 int err; 995 996 table = rvu->hw->table; 997 998 /* Check in 4-ways mem entry for free slote */ 999 hash = rvu_exact_calculate_hash(rvu, chan, ctype, mac, table->mem_table.mask, 1000 table->mem_table.depth); 1001 err = rvu_npc_exact_alloc_mem_table_entry(rvu, ways, index, hash); 1002 if (!err) { 1003 *opc_type = NPC_EXACT_OPC_MEM; 1004 dev_dbg(rvu->dev, "%s: inserted in 4 ways hash table ways=%d, index=%d\n", 1005 __func__, *ways, *index); 1006 return 0; 1007 } 1008 1009 dev_dbg(rvu->dev, "%s: failed to insert in 4 ways hash table\n", __func__); 1010 1011 /* wayss is 0 for cam table */ 1012 *ways = 0; 1013 err = rvu_npc_exact_alloc_cam_table_entry(rvu, index); 1014 if (!err) { 1015 *opc_type = NPC_EXACT_OPC_CAM; 1016 dev_dbg(rvu->dev, "%s: inserted in fully associative hash table index=%u\n", 1017 __func__, *index); 1018 return 0; 1019 } 1020 1021 dev_err(rvu->dev, "%s: failed to insert in fully associative hash table\n", __func__); 1022 return -ENOSPC; 1023 } 1024 1025 /** 1026 * rvu_npc_exact_save_drop_rule_chan_and_mask - Save drop rules info in data base. 1027 * @rvu: resource virtualization unit. 1028 * @drop_mcam_idx: Drop rule index in NPC mcam. 1029 * @chan_val: Channel value. 1030 * @chan_mask: Channel Mask. 1031 * @pcifunc: pcifunc of interface. 1032 * Return: True upon success. 1033 */ 1034 static bool rvu_npc_exact_save_drop_rule_chan_and_mask(struct rvu *rvu, int drop_mcam_idx, 1035 u64 chan_val, u64 chan_mask, u16 pcifunc) 1036 { 1037 struct npc_exact_table *table; 1038 int i; 1039 1040 table = rvu->hw->table; 1041 1042 for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) { 1043 if (!table->drop_rule_map[i].valid) 1044 break; 1045 1046 if (table->drop_rule_map[i].chan_val != (u16)chan_val) 1047 continue; 1048 1049 if (table->drop_rule_map[i].chan_mask != (u16)chan_mask) 1050 continue; 1051 1052 return false; 1053 } 1054 1055 if (i == NPC_MCAM_DROP_RULE_MAX) 1056 return false; 1057 1058 table->drop_rule_map[i].drop_rule_idx = drop_mcam_idx; 1059 table->drop_rule_map[i].chan_val = (u16)chan_val; 1060 table->drop_rule_map[i].chan_mask = (u16)chan_mask; 1061 table->drop_rule_map[i].pcifunc = pcifunc; 1062 table->drop_rule_map[i].valid = true; 1063 return true; 1064 } 1065 1066 /** 1067 * rvu_npc_exact_calc_drop_rule_chan_and_mask - Calculate Channel number and mask. 1068 * @rvu: resource virtualization unit. 1069 * @intf_type: Interface type (SDK, LBK or CGX) 1070 * @cgx_id: CGX identifier. 1071 * @lmac_id: LAMC identifier. 1072 * @val: Channel number. 1073 * @mask: Channel mask. 1074 * Return: True upon success. 1075 */ 1076 static bool rvu_npc_exact_calc_drop_rule_chan_and_mask(struct rvu *rvu, u8 intf_type, 1077 u8 cgx_id, u8 lmac_id, 1078 u64 *val, u64 *mask) 1079 { 1080 u16 chan_val, chan_mask; 1081 1082 /* No support for SDP and LBK */ 1083 if (intf_type != NIX_INTF_TYPE_CGX) 1084 return false; 1085 1086 chan_val = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0); 1087 chan_mask = 0xfff; 1088 1089 if (val) 1090 *val = chan_val; 1091 1092 if (mask) 1093 *mask = chan_mask; 1094 1095 return true; 1096 } 1097 1098 /** 1099 * rvu_npc_exact_drop_rule_to_pcifunc - Retrieve pcifunc 1100 * @rvu: resource virtualization unit. 1101 * @drop_rule_idx: Drop rule index in NPC mcam. 1102 * 1103 * Debugfs (exact_drop_cnt) entry displays pcifunc for interface 1104 * by retrieving the pcifunc value from data base. 1105 * Return: Drop rule index. 1106 */ 1107 u16 rvu_npc_exact_drop_rule_to_pcifunc(struct rvu *rvu, u32 drop_rule_idx) 1108 { 1109 struct npc_exact_table *table; 1110 int i; 1111 1112 table = rvu->hw->table; 1113 1114 for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) { 1115 if (!table->drop_rule_map[i].valid) 1116 break; 1117 1118 if (table->drop_rule_map[i].drop_rule_idx != drop_rule_idx) 1119 continue; 1120 1121 return table->drop_rule_map[i].pcifunc; 1122 } 1123 1124 dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n", 1125 __func__, drop_rule_idx); 1126 return -1; 1127 } 1128 1129 /** 1130 * rvu_npc_exact_get_drop_rule_info - Get drop rule information. 1131 * @rvu: resource virtualization unit. 1132 * @intf_type: Interface type (CGX, SDP or LBK) 1133 * @cgx_id: CGX identifier. 1134 * @lmac_id: LMAC identifier. 1135 * @drop_mcam_idx: NPC mcam drop rule index. 1136 * @val: Channel value. 1137 * @mask: Channel mask. 1138 * @pcifunc: pcifunc of interface corresponding to the drop rule. 1139 * Return: True upon success. 1140 */ 1141 static bool rvu_npc_exact_get_drop_rule_info(struct rvu *rvu, u8 intf_type, u8 cgx_id, 1142 u8 lmac_id, u32 *drop_mcam_idx, u64 *val, 1143 u64 *mask, u16 *pcifunc) 1144 { 1145 struct npc_exact_table *table; 1146 u64 chan_val, chan_mask; 1147 bool rc; 1148 int i; 1149 1150 table = rvu->hw->table; 1151 1152 if (intf_type != NIX_INTF_TYPE_CGX) { 1153 dev_err(rvu->dev, "%s: No drop rule for LBK/SDP mode\n", __func__); 1154 return false; 1155 } 1156 1157 rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, intf_type, cgx_id, 1158 lmac_id, &chan_val, &chan_mask); 1159 if (!rc) 1160 return false; 1161 1162 for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) { 1163 if (!table->drop_rule_map[i].valid) 1164 break; 1165 1166 if (table->drop_rule_map[i].chan_val != (u16)chan_val) 1167 continue; 1168 1169 if (val) 1170 *val = table->drop_rule_map[i].chan_val; 1171 if (mask) 1172 *mask = table->drop_rule_map[i].chan_mask; 1173 if (pcifunc) 1174 *pcifunc = table->drop_rule_map[i].pcifunc; 1175 1176 *drop_mcam_idx = i; 1177 return true; 1178 } 1179 1180 if (i == NPC_MCAM_DROP_RULE_MAX) { 1181 dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n", 1182 __func__, *drop_mcam_idx); 1183 return false; 1184 } 1185 1186 dev_err(rvu->dev, "%s: Could not retrieve for cgx=%d, lmac=%d\n", 1187 __func__, cgx_id, lmac_id); 1188 return false; 1189 } 1190 1191 /** 1192 * __rvu_npc_exact_cmd_rules_cnt_update - Update number dmac rules against a drop rule. 1193 * @rvu: resource virtualization unit. 1194 * @drop_mcam_idx: NPC mcam drop rule index. 1195 * @val: +1 or -1. 1196 * @enable_or_disable_cam: If no exact match rules against a drop rule, disable it. 1197 * 1198 * when first exact match entry against a drop rule is added, enable_or_disable_cam 1199 * is set to true. When last exact match entry against a drop rule is deleted, 1200 * enable_or_disable_cam is set to true. 1201 * Return: Number of rules 1202 */ 1203 static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_idx, 1204 int val, bool *enable_or_disable_cam) 1205 { 1206 struct npc_exact_table *table; 1207 u16 *cnt, old_cnt; 1208 bool promisc; 1209 1210 table = rvu->hw->table; 1211 promisc = table->promisc_mode[drop_mcam_idx]; 1212 1213 cnt = &table->cnt_cmd_rules[drop_mcam_idx]; 1214 old_cnt = *cnt; 1215 1216 *cnt += val; 1217 1218 if (!enable_or_disable_cam) 1219 goto done; 1220 1221 *enable_or_disable_cam = false; 1222 1223 if (promisc) 1224 goto done; 1225 1226 /* If all rules are deleted and not already in promisc mode; 1227 * disable cam 1228 */ 1229 if (!*cnt && val < 0) { 1230 *enable_or_disable_cam = true; 1231 goto done; 1232 } 1233 1234 /* If rule got added and not already in promisc mode; enable cam */ 1235 if (!old_cnt && val > 0) { 1236 *enable_or_disable_cam = true; 1237 goto done; 1238 } 1239 1240 done: 1241 return *cnt; 1242 } 1243 1244 /** 1245 * rvu_npc_exact_del_table_entry_by_id - Delete and free table entry. 1246 * @rvu: resource virtualization unit. 1247 * @seq_id: Sequence identifier of the entry. 1248 * 1249 * Deletes entry from linked lists and free up slot in HW MEM or CAM 1250 * table. 1251 * Return: 0 upon success. 1252 */ 1253 static int rvu_npc_exact_del_table_entry_by_id(struct rvu *rvu, u32 seq_id) 1254 { 1255 struct npc_exact_table_entry *entry = NULL; 1256 struct npc_exact_table *table; 1257 bool disable_cam = false; 1258 u32 drop_mcam_idx = -1; 1259 int *cnt; 1260 bool rc; 1261 1262 table = rvu->hw->table; 1263 1264 mutex_lock(&table->lock); 1265 1266 /* Lookup for entry which needs to be updated */ 1267 entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, seq_id); 1268 if (!entry) { 1269 dev_dbg(rvu->dev, "%s: failed to find entry for id=%d\n", __func__, seq_id); 1270 mutex_unlock(&table->lock); 1271 return -ENODATA; 1272 } 1273 1274 cnt = (entry->opc_type == NPC_EXACT_OPC_CAM) ? &table->cam_tbl_entry_cnt : 1275 &table->mem_tbl_entry_cnt; 1276 1277 /* delete from lists */ 1278 list_del_init(&entry->list); 1279 list_del_init(&entry->glist); 1280 1281 (*cnt)--; 1282 1283 rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, entry->cgx_id, 1284 entry->lmac_id, &drop_mcam_idx, NULL, NULL, NULL); 1285 if (!rc) { 1286 dev_dbg(rvu->dev, "%s: failed to retrieve drop info for id=0x%x\n", 1287 __func__, seq_id); 1288 mutex_unlock(&table->lock); 1289 return -ENODATA; 1290 } 1291 1292 if (entry->cmd) 1293 __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, -1, &disable_cam); 1294 1295 /* No dmac filter rules; disable drop on hit rule */ 1296 if (disable_cam) { 1297 rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false); 1298 dev_dbg(rvu->dev, "%s: Disabling mcam idx %d\n", 1299 __func__, drop_mcam_idx); 1300 } 1301 1302 mutex_unlock(&table->lock); 1303 1304 rvu_npc_exact_dealloc_table_entry(rvu, entry->opc_type, entry->ways, entry->index); 1305 1306 rvu_npc_exact_free_id(rvu, seq_id); 1307 1308 dev_dbg(rvu->dev, "%s: delete entry success for id=0x%x, mca=%pM\n", 1309 __func__, seq_id, entry->mac); 1310 kfree(entry); 1311 1312 return 0; 1313 } 1314 1315 /** 1316 * rvu_npc_exact_add_table_entry - Adds a table entry 1317 * @rvu: resource virtualization unit. 1318 * @cgx_id: cgx identifier. 1319 * @lmac_id: lmac identifier. 1320 * @mac: MAC address. 1321 * @chan: Channel number. 1322 * @ctype: Channel Type. 1323 * @seq_id: Sequence number. 1324 * @cmd: Whether it is invoked by ethtool cmd. 1325 * @mcam_idx: NPC mcam index corresponding to MAC 1326 * @pcifunc: PCI func. 1327 * 1328 * Creates a new exact match table entry in either CAM or 1329 * MEM table. 1330 * Return: 0 upon success. 1331 */ 1332 static int rvu_npc_exact_add_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id, u8 *mac, 1333 u16 chan, u8 ctype, u32 *seq_id, bool cmd, 1334 u32 mcam_idx, u16 pcifunc) 1335 { 1336 int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1337 enum npc_exact_opc_type opc_type; 1338 bool enable_cam = false; 1339 u32 drop_mcam_idx; 1340 u32 index; 1341 u64 mdata; 1342 bool rc; 1343 int err; 1344 u8 ways; 1345 1346 ctype = 0; 1347 1348 err = rvu_npc_exact_alloc_table_entry(rvu, mac, chan, ctype, &index, &ways, &opc_type); 1349 if (err) { 1350 dev_err(rvu->dev, "%s: Could not alloc in exact match table\n", __func__); 1351 return err; 1352 } 1353 1354 /* Write mdata to table */ 1355 mdata = rvu_exact_prepare_table_entry(rvu, true, ctype, chan, mac); 1356 1357 if (opc_type == NPC_EXACT_OPC_CAM) 1358 rvu_npc_exact_cam_table_write(rvu, blkaddr, index, mdata); 1359 else 1360 rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, mdata); 1361 1362 /* Insert entry to linked list */ 1363 err = rvu_npc_exact_add_to_list(rvu, opc_type, ways, index, cgx_id, lmac_id, 1364 mac, chan, ctype, seq_id, cmd, mcam_idx, pcifunc); 1365 if (err) { 1366 rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index); 1367 dev_err(rvu->dev, "%s: could not add to exact match table\n", __func__); 1368 return err; 1369 } 1370 1371 rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id, 1372 &drop_mcam_idx, NULL, NULL, NULL); 1373 if (!rc) { 1374 rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index); 1375 dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n", 1376 __func__, cgx_id, lmac_id); 1377 return -EINVAL; 1378 } 1379 1380 if (cmd) 1381 __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 1, &enable_cam); 1382 1383 /* First command rule; enable drop on hit rule */ 1384 if (enable_cam) { 1385 rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, true); 1386 dev_dbg(rvu->dev, "%s: Enabling mcam idx %d\n", 1387 __func__, drop_mcam_idx); 1388 } 1389 1390 dev_dbg(rvu->dev, 1391 "%s: Successfully added entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n", 1392 __func__, index, mac, ways, opc_type); 1393 1394 return 0; 1395 } 1396 1397 /** 1398 * rvu_npc_exact_update_table_entry - Update exact match table. 1399 * @rvu: resource virtualization unit. 1400 * @cgx_id: CGX identifier. 1401 * @lmac_id: LMAC identifier. 1402 * @old_mac: Existing MAC address entry. 1403 * @new_mac: New MAC address entry. 1404 * @seq_id: Sequence identifier of the entry. 1405 * 1406 * Updates MAC address of an entry. If entry is in MEM table, new 1407 * hash value may not match with old one. 1408 * Return: 0 upon success. 1409 */ 1410 static int rvu_npc_exact_update_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id, 1411 u8 *old_mac, u8 *new_mac, u32 *seq_id) 1412 { 1413 int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1414 struct npc_exact_table_entry *entry; 1415 struct npc_exact_table *table; 1416 u32 hash_index; 1417 u64 mdata; 1418 1419 table = rvu->hw->table; 1420 1421 mutex_lock(&table->lock); 1422 1423 /* Lookup for entry which needs to be updated */ 1424 entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, *seq_id); 1425 if (!entry) { 1426 mutex_unlock(&table->lock); 1427 dev_dbg(rvu->dev, 1428 "%s: failed to find entry for cgx_id=%d lmac_id=%d old_mac=%pM\n", 1429 __func__, cgx_id, lmac_id, old_mac); 1430 return -ENODATA; 1431 } 1432 1433 /* If entry is in mem table and new hash index is different than old 1434 * hash index, we cannot update the entry. Fail in these scenarios. 1435 */ 1436 if (entry->opc_type == NPC_EXACT_OPC_MEM) { 1437 hash_index = rvu_exact_calculate_hash(rvu, entry->chan, entry->ctype, 1438 new_mac, table->mem_table.mask, 1439 table->mem_table.depth); 1440 if (hash_index != entry->index) { 1441 dev_dbg(rvu->dev, 1442 "%s: Update failed due to index mismatch(new=0x%x, old=%x)\n", 1443 __func__, hash_index, entry->index); 1444 mutex_unlock(&table->lock); 1445 return -EINVAL; 1446 } 1447 } 1448 1449 mdata = rvu_exact_prepare_table_entry(rvu, true, entry->ctype, entry->chan, new_mac); 1450 1451 if (entry->opc_type == NPC_EXACT_OPC_MEM) 1452 rvu_npc_exact_mem_table_write(rvu, blkaddr, entry->ways, entry->index, mdata); 1453 else 1454 rvu_npc_exact_cam_table_write(rvu, blkaddr, entry->index, mdata); 1455 1456 /* Update entry fields */ 1457 ether_addr_copy(entry->mac, new_mac); 1458 *seq_id = entry->seq_id; 1459 1460 dev_dbg(rvu->dev, 1461 "%s: Successfully updated entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n", 1462 __func__, entry->index, entry->mac, entry->ways, entry->opc_type); 1463 1464 dev_dbg(rvu->dev, "%s: Successfully updated entry (old mac=%pM new_mac=%pM\n", 1465 __func__, old_mac, new_mac); 1466 1467 mutex_unlock(&table->lock); 1468 return 0; 1469 } 1470 1471 /** 1472 * rvu_npc_exact_promisc_disable - Disable promiscuous mode. 1473 * @rvu: resource virtualization unit. 1474 * @pcifunc: pcifunc 1475 * 1476 * Drop rule is against each PF. We dont support DMAC filter for 1477 * VF. 1478 * Return: 0 upon success 1479 */ 1480 1481 int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc) 1482 { 1483 struct npc_exact_table *table; 1484 int pf = rvu_get_pf(pcifunc); 1485 u8 cgx_id, lmac_id; 1486 u32 drop_mcam_idx; 1487 bool *promisc; 1488 bool rc; 1489 1490 table = rvu->hw->table; 1491 1492 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1493 rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id, 1494 &drop_mcam_idx, NULL, NULL, NULL); 1495 if (!rc) { 1496 dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n", 1497 __func__, cgx_id, lmac_id); 1498 return -EINVAL; 1499 } 1500 1501 mutex_lock(&table->lock); 1502 promisc = &table->promisc_mode[drop_mcam_idx]; 1503 1504 if (!*promisc) { 1505 mutex_unlock(&table->lock); 1506 dev_dbg(rvu->dev, "%s: Err Already promisc mode disabled (cgx=%d lmac=%d)\n", 1507 __func__, cgx_id, lmac_id); 1508 return LMAC_AF_ERR_INVALID_PARAM; 1509 } 1510 *promisc = false; 1511 mutex_unlock(&table->lock); 1512 1513 /* Enable drop rule */ 1514 rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, 1515 true); 1516 1517 dev_dbg(rvu->dev, "%s: disabled promisc mode (cgx=%d lmac=%d)\n", 1518 __func__, cgx_id, lmac_id); 1519 return 0; 1520 } 1521 1522 /** 1523 * rvu_npc_exact_promisc_enable - Enable promiscuous mode. 1524 * @rvu: resource virtualization unit. 1525 * @pcifunc: pcifunc. 1526 * Return: 0 upon success 1527 */ 1528 int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc) 1529 { 1530 struct npc_exact_table *table; 1531 int pf = rvu_get_pf(pcifunc); 1532 u8 cgx_id, lmac_id; 1533 u32 drop_mcam_idx; 1534 bool *promisc; 1535 bool rc; 1536 1537 table = rvu->hw->table; 1538 1539 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1540 rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id, 1541 &drop_mcam_idx, NULL, NULL, NULL); 1542 if (!rc) { 1543 dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n", 1544 __func__, cgx_id, lmac_id); 1545 return -EINVAL; 1546 } 1547 1548 mutex_lock(&table->lock); 1549 promisc = &table->promisc_mode[drop_mcam_idx]; 1550 1551 if (*promisc) { 1552 mutex_unlock(&table->lock); 1553 dev_dbg(rvu->dev, "%s: Already in promisc mode (cgx=%d lmac=%d)\n", 1554 __func__, cgx_id, lmac_id); 1555 return LMAC_AF_ERR_INVALID_PARAM; 1556 } 1557 *promisc = true; 1558 mutex_unlock(&table->lock); 1559 1560 /* disable drop rule */ 1561 rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, 1562 false); 1563 1564 dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d)\n", 1565 __func__, cgx_id, lmac_id); 1566 return 0; 1567 } 1568 1569 /** 1570 * rvu_npc_exact_mac_addr_reset - Delete PF mac address. 1571 * @rvu: resource virtualization unit. 1572 * @req: Reset request 1573 * @rsp: Reset response. 1574 * Return: 0 upon success 1575 */ 1576 int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req, 1577 struct msg_rsp *rsp) 1578 { 1579 int pf = rvu_get_pf(req->hdr.pcifunc); 1580 u32 seq_id = req->index; 1581 struct rvu_pfvf *pfvf; 1582 u8 cgx_id, lmac_id; 1583 int rc; 1584 1585 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1586 1587 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 1588 1589 rc = rvu_npc_exact_del_table_entry_by_id(rvu, seq_id); 1590 if (rc) { 1591 /* TODO: how to handle this error case ? */ 1592 dev_err(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__, pfvf->mac_addr, pf); 1593 return 0; 1594 } 1595 1596 dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d success (seq_id=%u)\n", 1597 __func__, pfvf->mac_addr, pf, seq_id); 1598 return 0; 1599 } 1600 1601 /** 1602 * rvu_npc_exact_mac_addr_update - Update mac address field with new value. 1603 * @rvu: resource virtualization unit. 1604 * @req: Update request. 1605 * @rsp: Update response. 1606 * Return: 0 upon success 1607 */ 1608 int rvu_npc_exact_mac_addr_update(struct rvu *rvu, 1609 struct cgx_mac_addr_update_req *req, 1610 struct cgx_mac_addr_update_rsp *rsp) 1611 { 1612 int pf = rvu_get_pf(req->hdr.pcifunc); 1613 struct npc_exact_table_entry *entry; 1614 struct npc_exact_table *table; 1615 struct rvu_pfvf *pfvf; 1616 u32 seq_id, mcam_idx; 1617 u8 old_mac[ETH_ALEN]; 1618 u8 cgx_id, lmac_id; 1619 int rc; 1620 1621 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 1622 return LMAC_AF_ERR_PERM_DENIED; 1623 1624 dev_dbg(rvu->dev, "%s: Update request for seq_id=%d, mac=%pM\n", 1625 __func__, req->index, req->mac_addr); 1626 1627 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1628 1629 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 1630 1631 table = rvu->hw->table; 1632 1633 mutex_lock(&table->lock); 1634 1635 /* Lookup for entry which needs to be updated */ 1636 entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, req->index); 1637 if (!entry) { 1638 dev_err(rvu->dev, "%s: failed to find entry for id=0x%x\n", __func__, req->index); 1639 mutex_unlock(&table->lock); 1640 return LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED; 1641 } 1642 ether_addr_copy(old_mac, entry->mac); 1643 seq_id = entry->seq_id; 1644 mcam_idx = entry->mcam_idx; 1645 mutex_unlock(&table->lock); 1646 1647 rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, old_mac, 1648 req->mac_addr, &seq_id); 1649 if (!rc) { 1650 rsp->index = seq_id; 1651 dev_dbg(rvu->dev, "%s mac:%pM (pfvf:%pM default:%pM) update to PF=%d success\n", 1652 __func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf); 1653 ether_addr_copy(pfvf->mac_addr, req->mac_addr); 1654 return 0; 1655 } 1656 1657 /* Try deleting and adding it again */ 1658 rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index); 1659 if (rc) { 1660 /* This could be a new entry */ 1661 dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__, 1662 pfvf->mac_addr, pf); 1663 } 1664 1665 rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr, 1666 pfvf->rx_chan_base, 0, &seq_id, true, 1667 mcam_idx, req->hdr.pcifunc); 1668 if (rc) { 1669 dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n", __func__, 1670 req->mac_addr, pf); 1671 return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED; 1672 } 1673 1674 rsp->index = seq_id; 1675 dev_dbg(rvu->dev, 1676 "%s MAC (new:%pM, old=%pM default:%pM) del and add to PF=%d success (seq_id=%u)\n", 1677 __func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf, seq_id); 1678 1679 ether_addr_copy(pfvf->mac_addr, req->mac_addr); 1680 return 0; 1681 } 1682 1683 /** 1684 * rvu_npc_exact_mac_addr_add - Adds MAC address to exact match table. 1685 * @rvu: resource virtualization unit. 1686 * @req: Add request. 1687 * @rsp: Add response. 1688 * Return: 0 upon success 1689 */ 1690 int rvu_npc_exact_mac_addr_add(struct rvu *rvu, 1691 struct cgx_mac_addr_add_req *req, 1692 struct cgx_mac_addr_add_rsp *rsp) 1693 { 1694 int pf = rvu_get_pf(req->hdr.pcifunc); 1695 struct rvu_pfvf *pfvf; 1696 u8 cgx_id, lmac_id; 1697 int rc = 0; 1698 u32 seq_id; 1699 1700 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1701 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 1702 1703 rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr, 1704 pfvf->rx_chan_base, 0, &seq_id, 1705 true, -1, req->hdr.pcifunc); 1706 1707 if (!rc) { 1708 rsp->index = seq_id; 1709 dev_dbg(rvu->dev, "%s MAC (%pM) add to PF=%d success (seq_id=%u)\n", 1710 __func__, req->mac_addr, pf, seq_id); 1711 return 0; 1712 } 1713 1714 dev_err(rvu->dev, "%s MAC (%pM) add to PF=%d failed\n", __func__, 1715 req->mac_addr, pf); 1716 return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED; 1717 } 1718 1719 /** 1720 * rvu_npc_exact_mac_addr_del - Delete DMAC filter 1721 * @rvu: resource virtualization unit. 1722 * @req: Delete request. 1723 * @rsp: Delete response. 1724 * Return: 0 upon success 1725 */ 1726 int rvu_npc_exact_mac_addr_del(struct rvu *rvu, 1727 struct cgx_mac_addr_del_req *req, 1728 struct msg_rsp *rsp) 1729 { 1730 int pf = rvu_get_pf(req->hdr.pcifunc); 1731 int rc; 1732 1733 rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index); 1734 if (!rc) { 1735 dev_dbg(rvu->dev, "%s del to PF=%d success (seq_id=%u)\n", 1736 __func__, pf, req->index); 1737 return 0; 1738 } 1739 1740 dev_err(rvu->dev, "%s del to PF=%d failed (seq_id=%u)\n", 1741 __func__, pf, req->index); 1742 return LMAC_AF_ERR_EXACT_MATCH_TBL_DEL_FAILED; 1743 } 1744 1745 /** 1746 * rvu_npc_exact_mac_addr_set - Add PF mac address to dmac filter. 1747 * @rvu: resource virtualization unit. 1748 * @req: Set request. 1749 * @rsp: Set response. 1750 * Return: 0 upon success 1751 */ 1752 int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, 1753 struct cgx_mac_addr_set_or_get *rsp) 1754 { 1755 int pf = rvu_get_pf(req->hdr.pcifunc); 1756 u32 seq_id = req->index; 1757 struct rvu_pfvf *pfvf; 1758 u8 cgx_id, lmac_id; 1759 u32 mcam_idx = -1; 1760 int rc, nixlf; 1761 1762 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1763 1764 pfvf = &rvu->pf[pf]; 1765 1766 /* If table does not have an entry; both update entry and del table entry API 1767 * below fails. Those are not failure conditions. 1768 */ 1769 rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, pfvf->mac_addr, 1770 req->mac_addr, &seq_id); 1771 if (!rc) { 1772 rsp->index = seq_id; 1773 ether_addr_copy(pfvf->mac_addr, req->mac_addr); 1774 ether_addr_copy(rsp->mac_addr, req->mac_addr); 1775 dev_dbg(rvu->dev, "%s MAC (%pM) update to PF=%d success\n", 1776 __func__, req->mac_addr, pf); 1777 return 0; 1778 } 1779 1780 /* Try deleting and adding it again */ 1781 rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index); 1782 if (rc) { 1783 dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", 1784 __func__, pfvf->mac_addr, pf); 1785 } 1786 1787 /* find mcam entry if exist */ 1788 rc = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, NULL); 1789 if (!rc) { 1790 mcam_idx = npc_get_nixlf_mcam_index(&rvu->hw->mcam, req->hdr.pcifunc, 1791 nixlf, NIXLF_UCAST_ENTRY); 1792 } 1793 1794 rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr, 1795 pfvf->rx_chan_base, 0, &seq_id, 1796 true, mcam_idx, req->hdr.pcifunc); 1797 if (rc) { 1798 dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n", 1799 __func__, req->mac_addr, pf); 1800 return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED; 1801 } 1802 1803 rsp->index = seq_id; 1804 ether_addr_copy(rsp->mac_addr, req->mac_addr); 1805 ether_addr_copy(pfvf->mac_addr, req->mac_addr); 1806 dev_dbg(rvu->dev, 1807 "%s MAC (%pM) del and add to PF=%d success (seq_id=%u)\n", 1808 __func__, req->mac_addr, pf, seq_id); 1809 return 0; 1810 } 1811 1812 /** 1813 * rvu_npc_exact_can_disable_feature - Check if feature can be disabled. 1814 * @rvu: resource virtualization unit. 1815 * Return: True if exact match feature is supported. 1816 */ 1817 bool rvu_npc_exact_can_disable_feature(struct rvu *rvu) 1818 { 1819 struct npc_exact_table *table = rvu->hw->table; 1820 bool empty; 1821 1822 if (!rvu->hw->cap.npc_exact_match_enabled) 1823 return false; 1824 1825 mutex_lock(&table->lock); 1826 empty = list_empty(&table->lhead_gbl); 1827 mutex_unlock(&table->lock); 1828 1829 return empty; 1830 } 1831 1832 /** 1833 * rvu_npc_exact_disable_feature - Disable feature. 1834 * @rvu: resource virtualization unit. 1835 */ 1836 void rvu_npc_exact_disable_feature(struct rvu *rvu) 1837 { 1838 rvu->hw->cap.npc_exact_match_enabled = false; 1839 } 1840 1841 /** 1842 * rvu_npc_exact_reset - Delete and free all entry which match pcifunc. 1843 * @rvu: resource virtualization unit. 1844 * @pcifunc: PCI func to match. 1845 */ 1846 void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc) 1847 { 1848 struct npc_exact_table *table = rvu->hw->table; 1849 struct npc_exact_table_entry *tmp, *iter; 1850 u32 seq_id; 1851 1852 mutex_lock(&table->lock); 1853 list_for_each_entry_safe(iter, tmp, &table->lhead_gbl, glist) { 1854 if (pcifunc != iter->pcifunc) 1855 continue; 1856 1857 seq_id = iter->seq_id; 1858 dev_dbg(rvu->dev, "%s: resetting pcifun=%d seq_id=%u\n", __func__, 1859 pcifunc, seq_id); 1860 1861 mutex_unlock(&table->lock); 1862 rvu_npc_exact_del_table_entry_by_id(rvu, seq_id); 1863 mutex_lock(&table->lock); 1864 } 1865 mutex_unlock(&table->lock); 1866 } 1867 1868 /** 1869 * rvu_npc_exact_init - initialize exact match table 1870 * @rvu: resource virtualization unit. 1871 * 1872 * Initialize HW and SW resources to manage 4way-2K table and fully 1873 * associative 32-entry mcam table. 1874 * Return: 0 upon success. 1875 */ 1876 int rvu_npc_exact_init(struct rvu *rvu) 1877 { 1878 u64 bcast_mcast_val, bcast_mcast_mask; 1879 struct npc_exact_table *table; 1880 u64 exact_val, exact_mask; 1881 u64 chan_val, chan_mask; 1882 u8 cgx_id, lmac_id; 1883 u32 *drop_mcam_idx; 1884 u16 max_lmac_cnt; 1885 u64 npc_const3; 1886 int table_size; 1887 int blkaddr; 1888 u16 pcifunc; 1889 int err, i; 1890 u64 cfg; 1891 bool rc; 1892 1893 /* Read NPC_AF_CONST3 and check for have exact 1894 * match functionality is present 1895 */ 1896 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1897 if (blkaddr < 0) { 1898 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 1899 return -EINVAL; 1900 } 1901 1902 /* Check exact match feature is supported */ 1903 npc_const3 = rvu_read64(rvu, blkaddr, NPC_AF_CONST3); 1904 if (!(npc_const3 & BIT_ULL(62))) 1905 return 0; 1906 1907 /* Check if kex profile has enabled EXACT match nibble */ 1908 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX)); 1909 if (!(cfg & NPC_EXACT_NIBBLE_HIT)) 1910 return 0; 1911 1912 /* Set capability to true */ 1913 rvu->hw->cap.npc_exact_match_enabled = true; 1914 1915 table = kzalloc(sizeof(*table), GFP_KERNEL); 1916 if (!table) 1917 return -ENOMEM; 1918 1919 dev_dbg(rvu->dev, "%s: Memory allocation for table success\n", __func__); 1920 rvu->hw->table = table; 1921 1922 /* Read table size, ways and depth */ 1923 table->mem_table.ways = FIELD_GET(GENMASK_ULL(19, 16), npc_const3); 1924 table->mem_table.depth = FIELD_GET(GENMASK_ULL(15, 0), npc_const3); 1925 table->cam_table.depth = FIELD_GET(GENMASK_ULL(31, 24), npc_const3); 1926 1927 dev_dbg(rvu->dev, "%s: NPC exact match 4way_2k table(ways=%d, depth=%d)\n", 1928 __func__, table->mem_table.ways, table->cam_table.depth); 1929 1930 /* Check if depth of table is not a sequre of 2 1931 * TODO: why _builtin_popcount() is not working ? 1932 */ 1933 if ((table->mem_table.depth & (table->mem_table.depth - 1)) != 0) { 1934 dev_err(rvu->dev, 1935 "%s: NPC exact match 4way_2k table depth(%d) is not square of 2\n", 1936 __func__, table->mem_table.depth); 1937 return -EINVAL; 1938 } 1939 1940 table_size = table->mem_table.depth * table->mem_table.ways; 1941 1942 /* Allocate bitmap for 4way 2K table */ 1943 table->mem_table.bmap = devm_bitmap_zalloc(rvu->dev, table_size, 1944 GFP_KERNEL); 1945 if (!table->mem_table.bmap) 1946 return -ENOMEM; 1947 1948 dev_dbg(rvu->dev, "%s: Allocated bitmap for 4way 2K entry table\n", __func__); 1949 1950 /* Allocate bitmap for 32 entry mcam */ 1951 table->cam_table.bmap = devm_bitmap_zalloc(rvu->dev, 32, GFP_KERNEL); 1952 1953 if (!table->cam_table.bmap) 1954 return -ENOMEM; 1955 1956 dev_dbg(rvu->dev, "%s: Allocated bitmap for 32 entry cam\n", __func__); 1957 1958 table->tot_ids = table_size + table->cam_table.depth; 1959 table->id_bmap = devm_bitmap_zalloc(rvu->dev, table->tot_ids, 1960 GFP_KERNEL); 1961 1962 if (!table->id_bmap) 1963 return -ENOMEM; 1964 1965 dev_dbg(rvu->dev, "%s: Allocated bitmap for id map (total=%d)\n", 1966 __func__, table->tot_ids); 1967 1968 /* Initialize list heads for npc_exact_table entries. 1969 * This entry is used by debugfs to show entries in 1970 * exact match table. 1971 */ 1972 for (i = 0; i < NPC_EXACT_TBL_MAX_WAYS; i++) 1973 INIT_LIST_HEAD(&table->lhead_mem_tbl_entry[i]); 1974 1975 INIT_LIST_HEAD(&table->lhead_cam_tbl_entry); 1976 INIT_LIST_HEAD(&table->lhead_gbl); 1977 1978 mutex_init(&table->lock); 1979 1980 rvu_exact_config_secret_key(rvu); 1981 rvu_exact_config_search_key(rvu); 1982 1983 rvu_exact_config_table_mask(rvu); 1984 rvu_exact_config_result_ctrl(rvu, table->mem_table.depth); 1985 1986 /* - No drop rule for LBK 1987 * - Drop rules for SDP and each LMAC. 1988 */ 1989 exact_val = !NPC_EXACT_RESULT_HIT; 1990 exact_mask = NPC_EXACT_RESULT_HIT; 1991 1992 /* nibble - 3 2 1 0 1993 * L3B L3M L2B L2M 1994 */ 1995 bcast_mcast_val = 0b0000; 1996 bcast_mcast_mask = 0b0011; 1997 1998 /* Install SDP drop rule */ 1999 drop_mcam_idx = &table->num_drop_rules; 2000 2001 max_lmac_cnt = rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx + 2002 PF_CGXMAP_BASE; 2003 2004 for (i = PF_CGXMAP_BASE; i < max_lmac_cnt; i++) { 2005 if (rvu->pf2cgxlmac_map[i] == 0xFF) 2006 continue; 2007 2008 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[i], &cgx_id, &lmac_id); 2009 2010 rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, NIX_INTF_TYPE_CGX, cgx_id, 2011 lmac_id, &chan_val, &chan_mask); 2012 if (!rc) { 2013 dev_err(rvu->dev, 2014 "%s: failed, info chan_val=0x%llx chan_mask=0x%llx rule_id=%d\n", 2015 __func__, chan_val, chan_mask, *drop_mcam_idx); 2016 return -EINVAL; 2017 } 2018 2019 /* Filter rules are only for PF */ 2020 pcifunc = RVU_PFFUNC(i, 0); 2021 2022 dev_dbg(rvu->dev, 2023 "%s:Drop rule cgx=%d lmac=%d chan(val=0x%llx, mask=0x%llx\n", 2024 __func__, cgx_id, lmac_id, chan_val, chan_mask); 2025 2026 rc = rvu_npc_exact_save_drop_rule_chan_and_mask(rvu, table->num_drop_rules, 2027 chan_val, chan_mask, pcifunc); 2028 if (!rc) { 2029 dev_err(rvu->dev, 2030 "%s: failed to set drop info for cgx=%d, lmac=%d, chan=%llx\n", 2031 __func__, cgx_id, lmac_id, chan_val); 2032 return -EINVAL; 2033 } 2034 2035 err = npc_install_mcam_drop_rule(rvu, *drop_mcam_idx, 2036 &table->counter_idx[*drop_mcam_idx], 2037 chan_val, chan_mask, 2038 exact_val, exact_mask, 2039 bcast_mcast_val, bcast_mcast_mask); 2040 if (err) { 2041 dev_err(rvu->dev, 2042 "failed to configure drop rule (cgx=%d lmac=%d)\n", 2043 cgx_id, lmac_id); 2044 return err; 2045 } 2046 2047 (*drop_mcam_idx)++; 2048 } 2049 2050 dev_info(rvu->dev, "initialized exact match table successfully\n"); 2051 return 0; 2052 } 2053