1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Admin Function driver 3 * 4 * Copyright (C) 2020 Marvell. 5 */ 6 7 #include <linux/bitfield.h> 8 9 #include "rvu_struct.h" 10 #include "rvu_reg.h" 11 #include "rvu.h" 12 #include "npc.h" 13 #include "rvu_npc_fs.h" 14 #include "rvu_npc_hash.h" 15 16 static const char * const npc_flow_names[] = { 17 [NPC_DMAC] = "dmac", 18 [NPC_SMAC] = "smac", 19 [NPC_ETYPE] = "ether type", 20 [NPC_VLAN_ETYPE_CTAG] = "vlan ether type ctag", 21 [NPC_VLAN_ETYPE_STAG] = "vlan ether type stag", 22 [NPC_OUTER_VID] = "outer vlan id", 23 [NPC_INNER_VID] = "inner vlan id", 24 [NPC_TOS] = "tos", 25 [NPC_IPFRAG_IPV4] = "fragmented IPv4 header ", 26 [NPC_SIP_IPV4] = "ipv4 source ip", 27 [NPC_DIP_IPV4] = "ipv4 destination ip", 28 [NPC_IPFRAG_IPV6] = "fragmented IPv6 header ", 29 [NPC_SIP_IPV6] = "ipv6 source ip", 30 [NPC_DIP_IPV6] = "ipv6 destination ip", 31 [NPC_IPPROTO_TCP] = "ip proto tcp", 32 [NPC_IPPROTO_UDP] = "ip proto udp", 33 [NPC_IPPROTO_SCTP] = "ip proto sctp", 34 [NPC_IPPROTO_ICMP] = "ip proto icmp", 35 [NPC_IPPROTO_ICMP6] = "ip proto icmp6", 36 [NPC_IPPROTO_AH] = "ip proto AH", 37 [NPC_IPPROTO_ESP] = "ip proto ESP", 38 [NPC_SPORT_TCP] = "tcp source port", 39 [NPC_DPORT_TCP] = "tcp destination port", 40 [NPC_SPORT_UDP] = "udp source port", 41 [NPC_DPORT_UDP] = "udp destination port", 42 [NPC_SPORT_SCTP] = "sctp source port", 43 [NPC_DPORT_SCTP] = "sctp destination port", 44 [NPC_LXMB] = "Mcast/Bcast header ", 45 [NPC_IPSEC_SPI] = "SPI ", 46 [NPC_MPLS1_LBTCBOS] = "lse depth 1 label tc bos", 47 [NPC_MPLS1_TTL] = "lse depth 1 ttl", 48 [NPC_MPLS2_LBTCBOS] = "lse depth 2 label tc bos", 49 [NPC_MPLS2_TTL] = "lse depth 2 ttl", 50 [NPC_MPLS3_LBTCBOS] = "lse depth 3 label tc bos", 51 [NPC_MPLS3_TTL] = "lse depth 3 ttl", 52 [NPC_MPLS4_LBTCBOS] = "lse depth 4 label tc bos", 53 [NPC_MPLS4_TTL] = "lse depth 4", 54 [NPC_UNKNOWN] = "unknown", 55 }; 56 57 bool npc_is_feature_supported(struct rvu *rvu, u64 features, u8 intf) 58 { 59 struct npc_mcam *mcam = &rvu->hw->mcam; 60 u64 mcam_features; 61 u64 unsupported; 62 63 mcam_features = is_npc_intf_tx(intf) ? mcam->tx_features : mcam->rx_features; 64 unsupported = (mcam_features ^ features) & ~mcam_features; 65 66 /* Return false if at least one of the input flows is not extracted */ 67 return !unsupported; 68 } 69 70 const char *npc_get_field_name(u8 hdr) 71 { 72 if (hdr >= ARRAY_SIZE(npc_flow_names)) 73 return npc_flow_names[NPC_UNKNOWN]; 74 75 return npc_flow_names[hdr]; 76 } 77 78 /* Compute keyword masks and figure out the number of keywords a field 79 * spans in the key. 80 */ 81 static void npc_set_kw_masks(struct npc_mcam *mcam, u8 type, 82 u8 nr_bits, int start_kwi, int offset, u8 intf) 83 { 84 struct npc_key_field *field = &mcam->rx_key_fields[type]; 85 u8 bits_in_kw; 86 int max_kwi; 87 88 if (mcam->banks_per_entry == 1) 89 max_kwi = 1; /* NPC_MCAM_KEY_X1 */ 90 else if (mcam->banks_per_entry == 2) 91 max_kwi = 3; /* NPC_MCAM_KEY_X2 */ 92 else 93 max_kwi = 6; /* NPC_MCAM_KEY_X4 */ 94 95 if (is_npc_intf_tx(intf)) 96 field = &mcam->tx_key_fields[type]; 97 98 if (offset + nr_bits <= 64) { 99 /* one KW only */ 100 if (start_kwi > max_kwi) 101 return; 102 field->kw_mask[start_kwi] |= GENMASK_ULL(nr_bits - 1, 0) 103 << offset; 104 field->nr_kws = 1; 105 } else if (offset + nr_bits > 64 && 106 offset + nr_bits <= 128) { 107 /* two KWs */ 108 if (start_kwi + 1 > max_kwi) 109 return; 110 /* first KW mask */ 111 bits_in_kw = 64 - offset; 112 field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0) 113 << offset; 114 /* second KW mask i.e. mask for rest of bits */ 115 bits_in_kw = nr_bits + offset - 64; 116 field->kw_mask[start_kwi + 1] |= GENMASK_ULL(bits_in_kw - 1, 0); 117 field->nr_kws = 2; 118 } else { 119 /* three KWs */ 120 if (start_kwi + 2 > max_kwi) 121 return; 122 /* first KW mask */ 123 bits_in_kw = 64 - offset; 124 field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0) 125 << offset; 126 /* second KW mask */ 127 field->kw_mask[start_kwi + 1] = ~0ULL; 128 /* third KW mask i.e. mask for rest of bits */ 129 bits_in_kw = nr_bits + offset - 128; 130 field->kw_mask[start_kwi + 2] |= GENMASK_ULL(bits_in_kw - 1, 0); 131 field->nr_kws = 3; 132 } 133 } 134 135 /* Helper function to figure out whether field exists in the key */ 136 static bool npc_is_field_present(struct rvu *rvu, enum key_fields type, u8 intf) 137 { 138 struct npc_mcam *mcam = &rvu->hw->mcam; 139 struct npc_key_field *input; 140 141 input = &mcam->rx_key_fields[type]; 142 if (is_npc_intf_tx(intf)) 143 input = &mcam->tx_key_fields[type]; 144 145 return input->nr_kws > 0; 146 } 147 148 static bool npc_is_same(struct npc_key_field *input, 149 struct npc_key_field *field) 150 { 151 return memcmp(&input->layer_mdata, &field->layer_mdata, 152 sizeof(struct npc_layer_mdata)) == 0; 153 } 154 155 static void npc_set_layer_mdata(struct npc_mcam *mcam, enum key_fields type, 156 u64 cfg, u8 lid, u8 lt, u8 intf) 157 { 158 struct npc_key_field *input = &mcam->rx_key_fields[type]; 159 160 if (is_npc_intf_tx(intf)) 161 input = &mcam->tx_key_fields[type]; 162 163 input->layer_mdata.hdr = FIELD_GET(NPC_HDR_OFFSET, cfg); 164 input->layer_mdata.key = FIELD_GET(NPC_KEY_OFFSET, cfg); 165 input->layer_mdata.len = FIELD_GET(NPC_BYTESM, cfg) + 1; 166 input->layer_mdata.ltype = lt; 167 input->layer_mdata.lid = lid; 168 } 169 170 static bool npc_check_overlap_fields(struct npc_key_field *input1, 171 struct npc_key_field *input2) 172 { 173 int kwi; 174 175 /* Fields with same layer id and different ltypes are mutually 176 * exclusive hence they can be overlapped 177 */ 178 if (input1->layer_mdata.lid == input2->layer_mdata.lid && 179 input1->layer_mdata.ltype != input2->layer_mdata.ltype) 180 return false; 181 182 for (kwi = 0; kwi < NPC_MAX_KWS_IN_KEY; kwi++) { 183 if (input1->kw_mask[kwi] & input2->kw_mask[kwi]) 184 return true; 185 } 186 187 return false; 188 } 189 190 /* Helper function to check whether given field overlaps with any other fields 191 * in the key. Due to limitations on key size and the key extraction profile in 192 * use higher layers can overwrite lower layer's header fields. Hence overlap 193 * needs to be checked. 194 */ 195 static bool npc_check_overlap(struct rvu *rvu, int blkaddr, 196 enum key_fields type, u8 start_lid, u8 intf) 197 { 198 struct npc_mcam *mcam = &rvu->hw->mcam; 199 struct npc_key_field *dummy, *input; 200 int start_kwi, offset; 201 u8 nr_bits, lid, lt, ld; 202 u64 cfg; 203 204 dummy = &mcam->rx_key_fields[NPC_UNKNOWN]; 205 input = &mcam->rx_key_fields[type]; 206 207 if (is_npc_intf_tx(intf)) { 208 dummy = &mcam->tx_key_fields[NPC_UNKNOWN]; 209 input = &mcam->tx_key_fields[type]; 210 } 211 212 for (lid = start_lid; lid < NPC_MAX_LID; lid++) { 213 for (lt = 0; lt < NPC_MAX_LT; lt++) { 214 for (ld = 0; ld < NPC_MAX_LD; ld++) { 215 cfg = rvu_read64(rvu, blkaddr, 216 NPC_AF_INTFX_LIDX_LTX_LDX_CFG 217 (intf, lid, lt, ld)); 218 if (!FIELD_GET(NPC_LDATA_EN, cfg)) 219 continue; 220 memset(dummy, 0, sizeof(struct npc_key_field)); 221 npc_set_layer_mdata(mcam, NPC_UNKNOWN, cfg, 222 lid, lt, intf); 223 /* exclude input */ 224 if (npc_is_same(input, dummy)) 225 continue; 226 start_kwi = dummy->layer_mdata.key / 8; 227 offset = (dummy->layer_mdata.key * 8) % 64; 228 nr_bits = dummy->layer_mdata.len * 8; 229 /* form KW masks */ 230 npc_set_kw_masks(mcam, NPC_UNKNOWN, nr_bits, 231 start_kwi, offset, intf); 232 /* check any input field bits falls in any 233 * other field bits. 234 */ 235 if (npc_check_overlap_fields(dummy, input)) 236 return true; 237 } 238 } 239 } 240 241 return false; 242 } 243 244 static bool npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type, 245 u8 intf) 246 { 247 if (!npc_is_field_present(rvu, type, intf) || 248 npc_check_overlap(rvu, blkaddr, type, 0, intf)) 249 return false; 250 return true; 251 } 252 253 static void npc_scan_exact_result(struct npc_mcam *mcam, u8 bit_number, 254 u8 key_nibble, u8 intf) 255 { 256 u8 offset = (key_nibble * 4) % 64; /* offset within key word */ 257 u8 kwi = (key_nibble * 4) / 64; /* which word in key */ 258 u8 nr_bits = 4; /* bits in a nibble */ 259 u8 type; 260 261 switch (bit_number) { 262 case 40 ... 43: 263 type = NPC_EXACT_RESULT; 264 break; 265 266 default: 267 return; 268 } 269 npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf); 270 } 271 272 static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number, 273 u8 key_nibble, u8 intf) 274 { 275 u8 offset = (key_nibble * 4) % 64; /* offset within key word */ 276 u8 kwi = (key_nibble * 4) / 64; /* which word in key */ 277 u8 nr_bits = 4; /* bits in a nibble */ 278 u8 type; 279 280 switch (bit_number) { 281 case 0 ... 2: 282 type = NPC_CHAN; 283 break; 284 case 3: 285 type = NPC_ERRLEV; 286 break; 287 case 4 ... 5: 288 type = NPC_ERRCODE; 289 break; 290 case 6: 291 type = NPC_LXMB; 292 break; 293 /* check for LTYPE only as of now */ 294 case 9: 295 type = NPC_LA; 296 break; 297 case 12: 298 type = NPC_LB; 299 break; 300 case 15: 301 type = NPC_LC; 302 break; 303 case 18: 304 type = NPC_LD; 305 break; 306 case 21: 307 type = NPC_LE; 308 break; 309 case 24: 310 type = NPC_LF; 311 break; 312 case 27: 313 type = NPC_LG; 314 break; 315 case 30: 316 type = NPC_LH; 317 break; 318 default: 319 return; 320 } 321 322 npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf); 323 } 324 325 static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf) 326 { 327 struct npc_mcam *mcam = &rvu->hw->mcam; 328 struct npc_key_field *key_fields; 329 /* Ether type can come from three layers 330 * (ethernet, single tagged, double tagged) 331 */ 332 struct npc_key_field *etype_ether; 333 struct npc_key_field *etype_tag1; 334 struct npc_key_field *etype_tag2; 335 /* Outer VLAN TCI can come from two layers 336 * (single tagged, double tagged) 337 */ 338 struct npc_key_field *vlan_tag1; 339 struct npc_key_field *vlan_tag2; 340 /* Inner VLAN TCI for double tagged frames */ 341 struct npc_key_field *vlan_tag3; 342 u64 *features; 343 u8 start_lid; 344 int i; 345 346 key_fields = mcam->rx_key_fields; 347 features = &mcam->rx_features; 348 349 if (is_npc_intf_tx(intf)) { 350 key_fields = mcam->tx_key_fields; 351 features = &mcam->tx_features; 352 } 353 354 /* Handle header fields which can come from multiple layers like 355 * etype, outer vlan tci. These fields should have same position in 356 * the key otherwise to install a mcam rule more than one entry is 357 * needed which complicates mcam space management. 358 */ 359 etype_ether = &key_fields[NPC_ETYPE_ETHER]; 360 etype_tag1 = &key_fields[NPC_ETYPE_TAG1]; 361 etype_tag2 = &key_fields[NPC_ETYPE_TAG2]; 362 vlan_tag1 = &key_fields[NPC_VLAN_TAG1]; 363 vlan_tag2 = &key_fields[NPC_VLAN_TAG2]; 364 vlan_tag3 = &key_fields[NPC_VLAN_TAG3]; 365 366 /* if key profile programmed does not extract Ethertype at all */ 367 if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) { 368 dev_err(rvu->dev, "mkex: Ethertype is not extracted.\n"); 369 goto vlan_tci; 370 } 371 372 /* if key profile programmed extracts Ethertype from one layer */ 373 if (etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) 374 key_fields[NPC_ETYPE] = *etype_ether; 375 if (!etype_ether->nr_kws && etype_tag1->nr_kws && !etype_tag2->nr_kws) 376 key_fields[NPC_ETYPE] = *etype_tag1; 377 if (!etype_ether->nr_kws && !etype_tag1->nr_kws && etype_tag2->nr_kws) 378 key_fields[NPC_ETYPE] = *etype_tag2; 379 380 /* if key profile programmed extracts Ethertype from multiple layers */ 381 if (etype_ether->nr_kws && etype_tag1->nr_kws) { 382 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 383 if (etype_ether->kw_mask[i] != etype_tag1->kw_mask[i]) { 384 dev_err(rvu->dev, "mkex: Etype pos is different for untagged and tagged pkts.\n"); 385 goto vlan_tci; 386 } 387 } 388 key_fields[NPC_ETYPE] = *etype_tag1; 389 } 390 if (etype_ether->nr_kws && etype_tag2->nr_kws) { 391 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 392 if (etype_ether->kw_mask[i] != etype_tag2->kw_mask[i]) { 393 dev_err(rvu->dev, "mkex: Etype pos is different for untagged and double tagged pkts.\n"); 394 goto vlan_tci; 395 } 396 } 397 key_fields[NPC_ETYPE] = *etype_tag2; 398 } 399 if (etype_tag1->nr_kws && etype_tag2->nr_kws) { 400 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 401 if (etype_tag1->kw_mask[i] != etype_tag2->kw_mask[i]) { 402 dev_err(rvu->dev, "mkex: Etype pos is different for tagged and double tagged pkts.\n"); 403 goto vlan_tci; 404 } 405 } 406 key_fields[NPC_ETYPE] = *etype_tag2; 407 } 408 409 /* check none of higher layers overwrite Ethertype */ 410 start_lid = key_fields[NPC_ETYPE].layer_mdata.lid + 1; 411 if (npc_check_overlap(rvu, blkaddr, NPC_ETYPE, start_lid, intf)) { 412 dev_err(rvu->dev, "mkex: Ethertype is overwritten by higher layers.\n"); 413 goto vlan_tci; 414 } 415 *features |= BIT_ULL(NPC_ETYPE); 416 vlan_tci: 417 /* if key profile does not extract outer vlan tci at all */ 418 if (!vlan_tag1->nr_kws && !vlan_tag2->nr_kws) { 419 dev_err(rvu->dev, "mkex: Outer vlan tci is not extracted.\n"); 420 goto done; 421 } 422 423 /* if key profile extracts outer vlan tci from one layer */ 424 if (vlan_tag1->nr_kws && !vlan_tag2->nr_kws) 425 key_fields[NPC_OUTER_VID] = *vlan_tag1; 426 if (!vlan_tag1->nr_kws && vlan_tag2->nr_kws) 427 key_fields[NPC_OUTER_VID] = *vlan_tag2; 428 429 /* if key profile extracts outer vlan tci from multiple layers */ 430 if (vlan_tag1->nr_kws && vlan_tag2->nr_kws) { 431 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 432 if (vlan_tag1->kw_mask[i] != vlan_tag2->kw_mask[i]) { 433 dev_err(rvu->dev, "mkex: Out vlan tci pos is different for tagged and double tagged pkts.\n"); 434 goto done; 435 } 436 } 437 key_fields[NPC_OUTER_VID] = *vlan_tag2; 438 } 439 /* check none of higher layers overwrite outer vlan tci */ 440 start_lid = key_fields[NPC_OUTER_VID].layer_mdata.lid + 1; 441 if (npc_check_overlap(rvu, blkaddr, NPC_OUTER_VID, start_lid, intf)) { 442 dev_err(rvu->dev, "mkex: Outer vlan tci is overwritten by higher layers.\n"); 443 goto done; 444 } 445 *features |= BIT_ULL(NPC_OUTER_VID); 446 447 /* If key profile extracts inner vlan tci */ 448 if (vlan_tag3->nr_kws) { 449 key_fields[NPC_INNER_VID] = *vlan_tag3; 450 *features |= BIT_ULL(NPC_INNER_VID); 451 } 452 done: 453 return; 454 } 455 456 static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid, 457 u8 lt, u64 cfg, u8 intf) 458 { 459 struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash; 460 struct npc_mcam *mcam = &rvu->hw->mcam; 461 u8 hdr, key, nr_bytes, bit_offset; 462 u8 la_ltype, la_start; 463 /* starting KW index and starting bit position */ 464 int start_kwi, offset; 465 466 nr_bytes = FIELD_GET(NPC_BYTESM, cfg) + 1; 467 hdr = FIELD_GET(NPC_HDR_OFFSET, cfg); 468 key = FIELD_GET(NPC_KEY_OFFSET, cfg); 469 470 /* For Tx, Layer A has NIX_INST_HDR_S(64 bytes) preceding 471 * ethernet header. 472 */ 473 if (is_npc_intf_tx(intf)) { 474 la_ltype = NPC_LT_LA_IH_NIX_ETHER; 475 la_start = 8; 476 } else { 477 la_ltype = NPC_LT_LA_ETHER; 478 la_start = 0; 479 } 480 481 #define NPC_SCAN_HDR(name, hlid, hlt, hstart, hlen) \ 482 do { \ 483 start_kwi = key / 8; \ 484 offset = (key * 8) % 64; \ 485 if (lid == (hlid) && lt == (hlt)) { \ 486 if ((hstart) >= hdr && \ 487 ((hstart) + (hlen)) <= (hdr + nr_bytes)) { \ 488 bit_offset = (hdr + nr_bytes - (hstart) - (hlen)) * 8; \ 489 npc_set_layer_mdata(mcam, (name), cfg, lid, lt, intf); \ 490 offset += bit_offset; \ 491 start_kwi += offset / 64; \ 492 offset %= 64; \ 493 npc_set_kw_masks(mcam, (name), (hlen) * 8, \ 494 start_kwi, offset, intf); \ 495 } \ 496 } \ 497 } while (0) 498 499 /* List LID, LTYPE, start offset from layer and length(in bytes) of 500 * packet header fields below. 501 * Example: Source IP is 4 bytes and starts at 12th byte of IP header 502 */ 503 NPC_SCAN_HDR(NPC_TOS, NPC_LID_LC, NPC_LT_LC_IP, 1, 1); 504 NPC_SCAN_HDR(NPC_IPFRAG_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 6, 1); 505 NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4); 506 NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4); 507 NPC_SCAN_HDR(NPC_IPFRAG_IPV6, NPC_LID_LC, NPC_LT_LC_IP6_EXT, 6, 1); 508 if (rvu->hw->cap.npc_hash_extract) { 509 if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][0]) 510 NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 4); 511 else 512 NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16); 513 514 if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][1]) 515 NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 4); 516 else 517 NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16); 518 } else { 519 NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16); 520 NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16); 521 } 522 523 NPC_SCAN_HDR(NPC_SPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 0, 2); 524 NPC_SCAN_HDR(NPC_DPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 2, 2); 525 NPC_SCAN_HDR(NPC_SPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 0, 2); 526 NPC_SCAN_HDR(NPC_DPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 2, 2); 527 NPC_SCAN_HDR(NPC_SPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 0, 2); 528 NPC_SCAN_HDR(NPC_DPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 2, 2); 529 NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2); 530 NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2); 531 NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2); 532 NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2); 533 NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2); 534 NPC_SCAN_HDR(NPC_VLAN_TAG3, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 6, 2); 535 NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6); 536 537 NPC_SCAN_HDR(NPC_IPSEC_SPI, NPC_LID_LD, NPC_LT_LD_AH, 4, 4); 538 NPC_SCAN_HDR(NPC_IPSEC_SPI, NPC_LID_LE, NPC_LT_LE_ESP, 0, 4); 539 NPC_SCAN_HDR(NPC_MPLS1_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 0, 3); 540 NPC_SCAN_HDR(NPC_MPLS1_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 3, 1); 541 NPC_SCAN_HDR(NPC_MPLS2_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 4, 3); 542 NPC_SCAN_HDR(NPC_MPLS2_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 7, 1); 543 NPC_SCAN_HDR(NPC_MPLS3_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 8, 3); 544 NPC_SCAN_HDR(NPC_MPLS3_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 11, 1); 545 NPC_SCAN_HDR(NPC_MPLS4_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 12, 3); 546 NPC_SCAN_HDR(NPC_MPLS4_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 15, 1); 547 548 /* SMAC follows the DMAC(which is 6 bytes) */ 549 NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start + 6, 6); 550 /* PF_FUNC is 2 bytes at 0th byte of NPC_LT_LA_IH_NIX_ETHER */ 551 NPC_SCAN_HDR(NPC_PF_FUNC, NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, 0, 2); 552 } 553 554 static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf) 555 { 556 struct npc_mcam *mcam = &rvu->hw->mcam; 557 u64 *features = &mcam->rx_features; 558 u64 tcp_udp_sctp; 559 int hdr; 560 561 if (is_npc_intf_tx(intf)) 562 features = &mcam->tx_features; 563 564 for (hdr = NPC_DMAC; hdr < NPC_HEADER_FIELDS_MAX; hdr++) { 565 if (npc_check_field(rvu, blkaddr, hdr, intf)) 566 *features |= BIT_ULL(hdr); 567 } 568 569 tcp_udp_sctp = BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_SPORT_UDP) | 570 BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP) | 571 BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP); 572 573 /* for tcp/udp/sctp corresponding layer type should be in the key */ 574 if (*features & tcp_udp_sctp) { 575 if (!npc_check_field(rvu, blkaddr, NPC_LD, intf)) 576 *features &= ~tcp_udp_sctp; 577 else 578 *features |= BIT_ULL(NPC_IPPROTO_TCP) | 579 BIT_ULL(NPC_IPPROTO_UDP) | 580 BIT_ULL(NPC_IPPROTO_SCTP); 581 } 582 583 /* for AH/ICMP/ICMPv6/, check if corresponding layer type is present in the key */ 584 if (npc_check_field(rvu, blkaddr, NPC_LD, intf)) { 585 *features |= BIT_ULL(NPC_IPPROTO_AH); 586 *features |= BIT_ULL(NPC_IPPROTO_ICMP); 587 *features |= BIT_ULL(NPC_IPPROTO_ICMP6); 588 } 589 590 /* for ESP, check if corresponding layer type is present in the key */ 591 if (npc_check_field(rvu, blkaddr, NPC_LE, intf)) 592 *features |= BIT_ULL(NPC_IPPROTO_ESP); 593 594 /* for vlan corresponding layer type should be in the key */ 595 if (*features & BIT_ULL(NPC_OUTER_VID)) 596 if (!npc_check_field(rvu, blkaddr, NPC_LB, intf)) 597 *features &= ~BIT_ULL(NPC_OUTER_VID); 598 599 /* Set SPI flag only if AH/ESP and IPSEC_SPI are in the key */ 600 if (npc_check_field(rvu, blkaddr, NPC_IPSEC_SPI, intf) && 601 (*features & (BIT_ULL(NPC_IPPROTO_ESP) | BIT_ULL(NPC_IPPROTO_AH)))) 602 *features |= BIT_ULL(NPC_IPSEC_SPI); 603 604 /* for vlan ethertypes corresponding layer type should be in the key */ 605 if (npc_check_field(rvu, blkaddr, NPC_LB, intf)) 606 *features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG) | 607 BIT_ULL(NPC_VLAN_ETYPE_STAG); 608 609 /* for L2M/L2B/L3M/L3B, check if the type is present in the key */ 610 if (npc_check_field(rvu, blkaddr, NPC_LXMB, intf)) 611 *features |= BIT_ULL(NPC_LXMB); 612 613 for (hdr = NPC_MPLS1_LBTCBOS; hdr <= NPC_MPLS4_TTL; hdr++) { 614 if (npc_check_field(rvu, blkaddr, hdr, intf)) 615 *features |= BIT_ULL(hdr); 616 } 617 } 618 619 /* Scan key extraction profile and record how fields of our interest 620 * fill the key structure. Also verify Channel and DMAC exists in 621 * key and not overwritten by other header fields. 622 */ 623 static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf) 624 { 625 struct npc_mcam *mcam = &rvu->hw->mcam; 626 u8 lid, lt, ld, bitnr; 627 u64 cfg, masked_cfg; 628 u8 key_nibble = 0; 629 630 /* Scan and note how parse result is going to be in key. 631 * A bit set in PARSE_NIBBLE_ENA corresponds to a nibble from 632 * parse result in the key. The enabled nibbles from parse result 633 * will be concatenated in key. 634 */ 635 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf)); 636 masked_cfg = cfg & NPC_PARSE_NIBBLE; 637 for_each_set_bit(bitnr, (unsigned long *)&masked_cfg, 31) { 638 npc_scan_parse_result(mcam, bitnr, key_nibble, intf); 639 key_nibble++; 640 } 641 642 /* Ignore exact match bits for mcam entries except the first rule 643 * which is drop on hit. This first rule is configured explitcitly by 644 * exact match code. 645 */ 646 masked_cfg = cfg & NPC_EXACT_NIBBLE; 647 bitnr = NPC_EXACT_NIBBLE_START; 648 for_each_set_bit_from(bitnr, (unsigned long *)&masked_cfg, NPC_EXACT_NIBBLE_END + 1) { 649 npc_scan_exact_result(mcam, bitnr, key_nibble, intf); 650 key_nibble++; 651 } 652 653 /* Scan and note how layer data is going to be in key */ 654 for (lid = 0; lid < NPC_MAX_LID; lid++) { 655 for (lt = 0; lt < NPC_MAX_LT; lt++) { 656 for (ld = 0; ld < NPC_MAX_LD; ld++) { 657 cfg = rvu_read64(rvu, blkaddr, 658 NPC_AF_INTFX_LIDX_LTX_LDX_CFG 659 (intf, lid, lt, ld)); 660 if (!FIELD_GET(NPC_LDATA_EN, cfg)) 661 continue; 662 npc_scan_ldata(rvu, blkaddr, lid, lt, cfg, 663 intf); 664 } 665 } 666 } 667 668 return 0; 669 } 670 671 static int npc_scan_verify_kex(struct rvu *rvu, int blkaddr) 672 { 673 int err; 674 675 err = npc_scan_kex(rvu, blkaddr, NIX_INTF_RX); 676 if (err) 677 return err; 678 679 err = npc_scan_kex(rvu, blkaddr, NIX_INTF_TX); 680 if (err) 681 return err; 682 683 /* Channel is mandatory */ 684 if (!npc_is_field_present(rvu, NPC_CHAN, NIX_INTF_RX)) { 685 dev_err(rvu->dev, "Channel not present in Key\n"); 686 return -EINVAL; 687 } 688 /* check that none of the fields overwrite channel */ 689 if (npc_check_overlap(rvu, blkaddr, NPC_CHAN, 0, NIX_INTF_RX)) { 690 dev_err(rvu->dev, "Channel cannot be overwritten\n"); 691 return -EINVAL; 692 } 693 694 npc_set_features(rvu, blkaddr, NIX_INTF_TX); 695 npc_set_features(rvu, blkaddr, NIX_INTF_RX); 696 npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_TX); 697 npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_RX); 698 699 return 0; 700 } 701 702 int npc_flow_steering_init(struct rvu *rvu, int blkaddr) 703 { 704 struct npc_mcam *mcam = &rvu->hw->mcam; 705 706 INIT_LIST_HEAD(&mcam->mcam_rules); 707 708 return npc_scan_verify_kex(rvu, blkaddr); 709 } 710 711 static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf) 712 { 713 struct npc_mcam *mcam = &rvu->hw->mcam; 714 u64 *mcam_features = &mcam->rx_features; 715 u64 unsupported; 716 u8 bit; 717 718 if (is_npc_intf_tx(intf)) 719 mcam_features = &mcam->tx_features; 720 721 unsupported = (*mcam_features ^ features) & ~(*mcam_features); 722 if (unsupported) { 723 dev_warn(rvu->dev, "Unsupported flow(s):\n"); 724 for_each_set_bit(bit, (unsigned long *)&unsupported, 64) 725 dev_warn(rvu->dev, "%s ", npc_get_field_name(bit)); 726 return -EOPNOTSUPP; 727 } 728 729 return 0; 730 } 731 732 /* npc_update_entry - Based on the masks generated during 733 * the key scanning, updates the given entry with value and 734 * masks for the field of interest. Maximum 16 bytes of a packet 735 * header can be extracted by HW hence lo and hi are sufficient. 736 * When field bytes are less than or equal to 8 then hi should be 737 * 0 for value and mask. 738 * 739 * If exact match of value is required then mask should be all 1's. 740 * If any bits in mask are 0 then corresponding bits in value are 741 * dont care. 742 */ 743 void npc_update_entry(struct rvu *rvu, enum key_fields type, 744 struct mcam_entry *entry, u64 val_lo, 745 u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf) 746 { 747 struct npc_mcam *mcam = &rvu->hw->mcam; 748 struct mcam_entry dummy = { {0} }; 749 struct npc_key_field *field; 750 u64 kw1, kw2, kw3; 751 u8 shift; 752 int i; 753 754 field = &mcam->rx_key_fields[type]; 755 if (is_npc_intf_tx(intf)) 756 field = &mcam->tx_key_fields[type]; 757 758 if (!field->nr_kws) 759 return; 760 761 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 762 if (!field->kw_mask[i]) 763 continue; 764 /* place key value in kw[x] */ 765 shift = __ffs64(field->kw_mask[i]); 766 /* update entry value */ 767 kw1 = (val_lo << shift) & field->kw_mask[i]; 768 dummy.kw[i] = kw1; 769 /* update entry mask */ 770 kw1 = (mask_lo << shift) & field->kw_mask[i]; 771 dummy.kw_mask[i] = kw1; 772 773 if (field->nr_kws == 1) 774 break; 775 /* place remaining bits of key value in kw[x + 1] */ 776 if (field->nr_kws == 2) { 777 /* update entry value */ 778 kw2 = shift ? val_lo >> (64 - shift) : 0; 779 kw2 |= (val_hi << shift); 780 kw2 &= field->kw_mask[i + 1]; 781 dummy.kw[i + 1] = kw2; 782 /* update entry mask */ 783 kw2 = shift ? mask_lo >> (64 - shift) : 0; 784 kw2 |= (mask_hi << shift); 785 kw2 &= field->kw_mask[i + 1]; 786 dummy.kw_mask[i + 1] = kw2; 787 break; 788 } 789 /* place remaining bits of key value in kw[x + 1], kw[x + 2] */ 790 if (field->nr_kws == 3) { 791 /* update entry value */ 792 kw2 = shift ? val_lo >> (64 - shift) : 0; 793 kw2 |= (val_hi << shift); 794 kw2 &= field->kw_mask[i + 1]; 795 kw3 = shift ? val_hi >> (64 - shift) : 0; 796 kw3 &= field->kw_mask[i + 2]; 797 dummy.kw[i + 1] = kw2; 798 dummy.kw[i + 2] = kw3; 799 /* update entry mask */ 800 kw2 = shift ? mask_lo >> (64 - shift) : 0; 801 kw2 |= (mask_hi << shift); 802 kw2 &= field->kw_mask[i + 1]; 803 kw3 = shift ? mask_hi >> (64 - shift) : 0; 804 kw3 &= field->kw_mask[i + 2]; 805 dummy.kw_mask[i + 1] = kw2; 806 dummy.kw_mask[i + 2] = kw3; 807 break; 808 } 809 } 810 /* dummy is ready with values and masks for given key 811 * field now clear and update input entry with those 812 */ 813 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 814 if (!field->kw_mask[i]) 815 continue; 816 entry->kw[i] &= ~field->kw_mask[i]; 817 entry->kw_mask[i] &= ~field->kw_mask[i]; 818 819 entry->kw[i] |= dummy.kw[i]; 820 entry->kw_mask[i] |= dummy.kw_mask[i]; 821 } 822 } 823 824 static void npc_update_ipv6_flow(struct rvu *rvu, struct mcam_entry *entry, 825 u64 features, struct flow_msg *pkt, 826 struct flow_msg *mask, 827 struct rvu_npc_mcam_rule *output, u8 intf) 828 { 829 u32 src_ip[IPV6_WORDS], src_ip_mask[IPV6_WORDS]; 830 u32 dst_ip[IPV6_WORDS], dst_ip_mask[IPV6_WORDS]; 831 struct flow_msg *opkt = &output->packet; 832 struct flow_msg *omask = &output->mask; 833 u64 mask_lo, mask_hi; 834 u64 val_lo, val_hi; 835 836 /* For an ipv6 address fe80::2c68:63ff:fe5e:2d0a the packet 837 * values to be programmed in MCAM should as below: 838 * val_high: 0xfe80000000000000 839 * val_low: 0x2c6863fffe5e2d0a 840 */ 841 if (features & BIT_ULL(NPC_SIP_IPV6)) { 842 be32_to_cpu_array(src_ip_mask, mask->ip6src, IPV6_WORDS); 843 be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS); 844 845 mask_hi = (u64)src_ip_mask[0] << 32 | src_ip_mask[1]; 846 mask_lo = (u64)src_ip_mask[2] << 32 | src_ip_mask[3]; 847 val_hi = (u64)src_ip[0] << 32 | src_ip[1]; 848 val_lo = (u64)src_ip[2] << 32 | src_ip[3]; 849 850 npc_update_entry(rvu, NPC_SIP_IPV6, entry, val_lo, val_hi, 851 mask_lo, mask_hi, intf); 852 memcpy(opkt->ip6src, pkt->ip6src, sizeof(opkt->ip6src)); 853 memcpy(omask->ip6src, mask->ip6src, sizeof(omask->ip6src)); 854 } 855 if (features & BIT_ULL(NPC_DIP_IPV6)) { 856 be32_to_cpu_array(dst_ip_mask, mask->ip6dst, IPV6_WORDS); 857 be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS); 858 859 mask_hi = (u64)dst_ip_mask[0] << 32 | dst_ip_mask[1]; 860 mask_lo = (u64)dst_ip_mask[2] << 32 | dst_ip_mask[3]; 861 val_hi = (u64)dst_ip[0] << 32 | dst_ip[1]; 862 val_lo = (u64)dst_ip[2] << 32 | dst_ip[3]; 863 864 npc_update_entry(rvu, NPC_DIP_IPV6, entry, val_lo, val_hi, 865 mask_lo, mask_hi, intf); 866 memcpy(opkt->ip6dst, pkt->ip6dst, sizeof(opkt->ip6dst)); 867 memcpy(omask->ip6dst, mask->ip6dst, sizeof(omask->ip6dst)); 868 } 869 } 870 871 static void npc_update_vlan_features(struct rvu *rvu, struct mcam_entry *entry, 872 u64 features, u8 intf) 873 { 874 bool ctag = !!(features & BIT_ULL(NPC_VLAN_ETYPE_CTAG)); 875 bool stag = !!(features & BIT_ULL(NPC_VLAN_ETYPE_STAG)); 876 bool vid = !!(features & BIT_ULL(NPC_OUTER_VID)); 877 878 /* If only VLAN id is given then always match outer VLAN id */ 879 if (vid && !ctag && !stag) { 880 npc_update_entry(rvu, NPC_LB, entry, 881 NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG, 0, 882 NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG, 0, intf); 883 return; 884 } 885 if (ctag) 886 npc_update_entry(rvu, NPC_LB, entry, NPC_LT_LB_CTAG, 0, 887 ~0ULL, 0, intf); 888 if (stag) 889 npc_update_entry(rvu, NPC_LB, entry, NPC_LT_LB_STAG_QINQ, 0, 890 ~0ULL, 0, intf); 891 } 892 893 static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry, 894 u64 features, struct flow_msg *pkt, 895 struct flow_msg *mask, 896 struct rvu_npc_mcam_rule *output, u8 intf, 897 int blkaddr) 898 { 899 u64 dmac_mask = ether_addr_to_u64(mask->dmac); 900 u64 smac_mask = ether_addr_to_u64(mask->smac); 901 u64 dmac_val = ether_addr_to_u64(pkt->dmac); 902 u64 smac_val = ether_addr_to_u64(pkt->smac); 903 struct flow_msg *opkt = &output->packet; 904 struct flow_msg *omask = &output->mask; 905 906 if (!features) 907 return; 908 909 /* For tcp/udp/sctp LTYPE should be present in entry */ 910 if (features & BIT_ULL(NPC_IPPROTO_TCP)) 911 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_TCP, 912 0, ~0ULL, 0, intf); 913 if (features & BIT_ULL(NPC_IPPROTO_UDP)) 914 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_UDP, 915 0, ~0ULL, 0, intf); 916 if (features & BIT_ULL(NPC_IPPROTO_SCTP)) 917 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_SCTP, 918 0, ~0ULL, 0, intf); 919 if (features & BIT_ULL(NPC_IPPROTO_ICMP)) 920 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP, 921 0, ~0ULL, 0, intf); 922 if (features & BIT_ULL(NPC_IPPROTO_ICMP6)) 923 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP6, 924 0, ~0ULL, 0, intf); 925 926 /* For AH, LTYPE should be present in entry */ 927 if (features & BIT_ULL(NPC_IPPROTO_AH)) 928 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_AH, 929 0, ~0ULL, 0, intf); 930 /* For ESP, LTYPE should be present in entry */ 931 if (features & BIT_ULL(NPC_IPPROTO_ESP)) 932 npc_update_entry(rvu, NPC_LE, entry, NPC_LT_LE_ESP, 933 0, ~0ULL, 0, intf); 934 935 if (features & BIT_ULL(NPC_LXMB)) { 936 output->lxmb = is_broadcast_ether_addr(pkt->dmac) ? 2 : 1; 937 npc_update_entry(rvu, NPC_LXMB, entry, output->lxmb, 0, 938 output->lxmb, 0, intf); 939 } 940 #define NPC_WRITE_FLOW(field, member, val_lo, val_hi, mask_lo, mask_hi) \ 941 do { \ 942 if (features & BIT_ULL((field))) { \ 943 npc_update_entry(rvu, (field), entry, (val_lo), (val_hi), \ 944 (mask_lo), (mask_hi), intf); \ 945 memcpy(&opkt->member, &pkt->member, sizeof(pkt->member)); \ 946 memcpy(&omask->member, &mask->member, sizeof(mask->member)); \ 947 } \ 948 } while (0) 949 950 NPC_WRITE_FLOW(NPC_DMAC, dmac, dmac_val, 0, dmac_mask, 0); 951 952 NPC_WRITE_FLOW(NPC_SMAC, smac, smac_val, 0, smac_mask, 0); 953 NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0, 954 ntohs(mask->etype), 0); 955 NPC_WRITE_FLOW(NPC_TOS, tos, pkt->tos, 0, mask->tos, 0); 956 NPC_WRITE_FLOW(NPC_IPFRAG_IPV4, ip_flag, pkt->ip_flag, 0, 957 mask->ip_flag, 0); 958 NPC_WRITE_FLOW(NPC_SIP_IPV4, ip4src, ntohl(pkt->ip4src), 0, 959 ntohl(mask->ip4src), 0); 960 NPC_WRITE_FLOW(NPC_DIP_IPV4, ip4dst, ntohl(pkt->ip4dst), 0, 961 ntohl(mask->ip4dst), 0); 962 NPC_WRITE_FLOW(NPC_SPORT_TCP, sport, ntohs(pkt->sport), 0, 963 ntohs(mask->sport), 0); 964 NPC_WRITE_FLOW(NPC_SPORT_UDP, sport, ntohs(pkt->sport), 0, 965 ntohs(mask->sport), 0); 966 NPC_WRITE_FLOW(NPC_DPORT_TCP, dport, ntohs(pkt->dport), 0, 967 ntohs(mask->dport), 0); 968 NPC_WRITE_FLOW(NPC_DPORT_UDP, dport, ntohs(pkt->dport), 0, 969 ntohs(mask->dport), 0); 970 NPC_WRITE_FLOW(NPC_SPORT_SCTP, sport, ntohs(pkt->sport), 0, 971 ntohs(mask->sport), 0); 972 NPC_WRITE_FLOW(NPC_DPORT_SCTP, dport, ntohs(pkt->dport), 0, 973 ntohs(mask->dport), 0); 974 975 NPC_WRITE_FLOW(NPC_IPSEC_SPI, spi, ntohl(pkt->spi), 0, 976 ntohl(mask->spi), 0); 977 978 NPC_WRITE_FLOW(NPC_OUTER_VID, vlan_tci, ntohs(pkt->vlan_tci), 0, 979 ntohs(mask->vlan_tci), 0); 980 NPC_WRITE_FLOW(NPC_INNER_VID, vlan_itci, ntohs(pkt->vlan_itci), 0, 981 ntohs(mask->vlan_itci), 0); 982 983 NPC_WRITE_FLOW(NPC_MPLS1_LBTCBOS, mpls_lse, 984 FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL, 985 pkt->mpls_lse[0]), 0, 986 FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL, 987 mask->mpls_lse[0]), 0); 988 NPC_WRITE_FLOW(NPC_MPLS1_TTL, mpls_lse, 989 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, 990 pkt->mpls_lse[0]), 0, 991 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, 992 mask->mpls_lse[0]), 0); 993 NPC_WRITE_FLOW(NPC_MPLS2_LBTCBOS, mpls_lse, 994 FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL, 995 pkt->mpls_lse[1]), 0, 996 FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL, 997 mask->mpls_lse[1]), 0); 998 NPC_WRITE_FLOW(NPC_MPLS2_TTL, mpls_lse, 999 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, 1000 pkt->mpls_lse[1]), 0, 1001 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, 1002 mask->mpls_lse[1]), 0); 1003 NPC_WRITE_FLOW(NPC_MPLS3_LBTCBOS, mpls_lse, 1004 FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL, 1005 pkt->mpls_lse[2]), 0, 1006 FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL, 1007 mask->mpls_lse[2]), 0); 1008 NPC_WRITE_FLOW(NPC_MPLS3_TTL, mpls_lse, 1009 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, 1010 pkt->mpls_lse[2]), 0, 1011 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, 1012 mask->mpls_lse[2]), 0); 1013 NPC_WRITE_FLOW(NPC_MPLS4_LBTCBOS, mpls_lse, 1014 FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL, 1015 pkt->mpls_lse[3]), 0, 1016 FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL, 1017 mask->mpls_lse[3]), 0); 1018 NPC_WRITE_FLOW(NPC_MPLS4_TTL, mpls_lse, 1019 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, 1020 pkt->mpls_lse[3]), 0, 1021 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, 1022 mask->mpls_lse[3]), 0); 1023 1024 NPC_WRITE_FLOW(NPC_IPFRAG_IPV6, next_header, pkt->next_header, 0, 1025 mask->next_header, 0); 1026 npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf); 1027 npc_update_vlan_features(rvu, entry, features, intf); 1028 1029 npc_update_field_hash(rvu, intf, entry, blkaddr, features, 1030 pkt, mask, opkt, omask); 1031 } 1032 1033 static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam, u16 entry) 1034 { 1035 struct rvu_npc_mcam_rule *iter; 1036 1037 mutex_lock(&mcam->lock); 1038 list_for_each_entry(iter, &mcam->mcam_rules, list) { 1039 if (iter->entry == entry) { 1040 mutex_unlock(&mcam->lock); 1041 return iter; 1042 } 1043 } 1044 mutex_unlock(&mcam->lock); 1045 1046 return NULL; 1047 } 1048 1049 static void rvu_mcam_add_rule(struct npc_mcam *mcam, 1050 struct rvu_npc_mcam_rule *rule) 1051 { 1052 struct list_head *head = &mcam->mcam_rules; 1053 struct rvu_npc_mcam_rule *iter; 1054 1055 mutex_lock(&mcam->lock); 1056 list_for_each_entry(iter, &mcam->mcam_rules, list) { 1057 if (iter->entry > rule->entry) 1058 break; 1059 head = &iter->list; 1060 } 1061 1062 list_add(&rule->list, head); 1063 mutex_unlock(&mcam->lock); 1064 } 1065 1066 static void rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc, 1067 struct rvu_npc_mcam_rule *rule) 1068 { 1069 struct npc_mcam_oper_counter_req free_req = { 0 }; 1070 struct msg_rsp free_rsp; 1071 1072 if (!rule->has_cntr) 1073 return; 1074 1075 free_req.hdr.pcifunc = pcifunc; 1076 free_req.cntr = rule->cntr; 1077 1078 rvu_mbox_handler_npc_mcam_free_counter(rvu, &free_req, &free_rsp); 1079 rule->has_cntr = false; 1080 } 1081 1082 static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc, 1083 struct rvu_npc_mcam_rule *rule, 1084 struct npc_install_flow_rsp *rsp) 1085 { 1086 struct npc_mcam_alloc_counter_req cntr_req = { 0 }; 1087 struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 }; 1088 int err; 1089 1090 cntr_req.hdr.pcifunc = pcifunc; 1091 cntr_req.contig = true; 1092 cntr_req.count = 1; 1093 1094 /* we try to allocate a counter to track the stats of this 1095 * rule. If counter could not be allocated then proceed 1096 * without counter because counters are limited than entries. 1097 */ 1098 err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, 1099 &cntr_rsp); 1100 if (!err && cntr_rsp.count) { 1101 rule->cntr = cntr_rsp.cntr; 1102 rule->has_cntr = true; 1103 rsp->counter = rule->cntr; 1104 } else { 1105 rsp->counter = err; 1106 } 1107 } 1108 1109 static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, 1110 struct mcam_entry *entry, 1111 struct npc_install_flow_req *req, 1112 u16 target, bool pf_set_vfs_mac) 1113 { 1114 struct rvu_switch *rswitch = &rvu->rswitch; 1115 struct nix_rx_action action; 1116 1117 if (rswitch->mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && pf_set_vfs_mac) 1118 req->chan_mask = 0x0; /* Do not care channel */ 1119 1120 npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, req->chan_mask, 1121 0, NIX_INTF_RX); 1122 1123 *(u64 *)&action = 0x00; 1124 action.pf_func = target; 1125 action.op = req->op; 1126 action.index = req->index; 1127 action.match_id = req->match_id; 1128 action.flow_key_alg = req->flow_key_alg; 1129 1130 if (req->op == NIX_RX_ACTION_DEFAULT) { 1131 if (pfvf->def_ucast_rule) { 1132 action = pfvf->def_ucast_rule->rx_action; 1133 } else { 1134 /* For profiles which do not extract DMAC, the default 1135 * unicast entry is unused. Hence modify action for the 1136 * requests which use same action as default unicast 1137 * entry 1138 */ 1139 *(u64 *)&action = 0; 1140 action.pf_func = target; 1141 action.op = NIX_RX_ACTIONOP_UCAST; 1142 } 1143 } 1144 1145 entry->action = *(u64 *)&action; 1146 1147 /* VTAG0 starts at 0th byte of LID_B. 1148 * VTAG1 starts at 4th byte of LID_B. 1149 */ 1150 entry->vtag_action = FIELD_PREP(RX_VTAG0_VALID_BIT, req->vtag0_valid) | 1151 FIELD_PREP(RX_VTAG0_TYPE_MASK, req->vtag0_type) | 1152 FIELD_PREP(RX_VTAG0_LID_MASK, NPC_LID_LB) | 1153 FIELD_PREP(RX_VTAG0_RELPTR_MASK, 0) | 1154 FIELD_PREP(RX_VTAG1_VALID_BIT, req->vtag1_valid) | 1155 FIELD_PREP(RX_VTAG1_TYPE_MASK, req->vtag1_type) | 1156 FIELD_PREP(RX_VTAG1_LID_MASK, NPC_LID_LB) | 1157 FIELD_PREP(RX_VTAG1_RELPTR_MASK, 4); 1158 } 1159 1160 static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, 1161 struct mcam_entry *entry, 1162 struct npc_install_flow_req *req, u16 target) 1163 { 1164 struct nix_tx_action action; 1165 u64 mask = ~0ULL; 1166 1167 /* If AF is installing then do not care about 1168 * PF_FUNC in Send Descriptor 1169 */ 1170 if (is_pffunc_af(req->hdr.pcifunc)) 1171 mask = 0; 1172 1173 npc_update_entry(rvu, NPC_PF_FUNC, entry, (__force u16)htons(target), 1174 0, mask, 0, NIX_INTF_TX); 1175 1176 *(u64 *)&action = 0x00; 1177 action.op = req->op; 1178 action.index = req->index; 1179 action.match_id = req->match_id; 1180 1181 entry->action = *(u64 *)&action; 1182 1183 /* VTAG0 starts at 0th byte of LID_B. 1184 * VTAG1 starts at 4th byte of LID_B. 1185 */ 1186 entry->vtag_action = FIELD_PREP(TX_VTAG0_DEF_MASK, req->vtag0_def) | 1187 FIELD_PREP(TX_VTAG0_OP_MASK, req->vtag0_op) | 1188 FIELD_PREP(TX_VTAG0_LID_MASK, NPC_LID_LA) | 1189 FIELD_PREP(TX_VTAG0_RELPTR_MASK, 20) | 1190 FIELD_PREP(TX_VTAG1_DEF_MASK, req->vtag1_def) | 1191 FIELD_PREP(TX_VTAG1_OP_MASK, req->vtag1_op) | 1192 FIELD_PREP(TX_VTAG1_LID_MASK, NPC_LID_LA) | 1193 FIELD_PREP(TX_VTAG1_RELPTR_MASK, 24); 1194 } 1195 1196 static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target, 1197 int nixlf, struct rvu_pfvf *pfvf, 1198 struct npc_install_flow_req *req, 1199 struct npc_install_flow_rsp *rsp, bool enable, 1200 bool pf_set_vfs_mac) 1201 { 1202 struct rvu_npc_mcam_rule *def_ucast_rule = pfvf->def_ucast_rule; 1203 u64 features, installed_features, missing_features = 0; 1204 struct npc_mcam_write_entry_req write_req = { 0 }; 1205 struct npc_mcam *mcam = &rvu->hw->mcam; 1206 struct rvu_npc_mcam_rule dummy = { 0 }; 1207 struct rvu_npc_mcam_rule *rule; 1208 u16 owner = req->hdr.pcifunc; 1209 struct msg_rsp write_rsp; 1210 struct mcam_entry *entry; 1211 bool new = false; 1212 u16 entry_index; 1213 int err; 1214 1215 installed_features = req->features; 1216 features = req->features; 1217 entry = &write_req.entry_data; 1218 entry_index = req->entry; 1219 1220 npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy, 1221 req->intf, blkaddr); 1222 1223 if (is_npc_intf_rx(req->intf)) 1224 npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac); 1225 else 1226 npc_update_tx_entry(rvu, pfvf, entry, req, target); 1227 1228 /* Default unicast rules do not exist for TX */ 1229 if (is_npc_intf_tx(req->intf)) 1230 goto find_rule; 1231 1232 if (req->default_rule) { 1233 entry_index = npc_get_nixlf_mcam_index(mcam, target, nixlf, 1234 NIXLF_UCAST_ENTRY); 1235 enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, entry_index); 1236 } 1237 1238 /* update mcam entry with default unicast rule attributes */ 1239 if (def_ucast_rule && (req->default_rule && req->append)) { 1240 missing_features = (def_ucast_rule->features ^ features) & 1241 def_ucast_rule->features; 1242 if (missing_features) 1243 npc_update_flow(rvu, entry, missing_features, 1244 &def_ucast_rule->packet, 1245 &def_ucast_rule->mask, 1246 &dummy, req->intf, 1247 blkaddr); 1248 installed_features = req->features | missing_features; 1249 } 1250 1251 find_rule: 1252 rule = rvu_mcam_find_rule(mcam, entry_index); 1253 if (!rule) { 1254 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 1255 if (!rule) 1256 return -ENOMEM; 1257 new = true; 1258 } 1259 1260 /* allocate new counter if rule has no counter */ 1261 if (!req->default_rule && req->set_cntr && !rule->has_cntr) 1262 rvu_mcam_add_counter_to_rule(rvu, owner, rule, rsp); 1263 1264 /* if user wants to delete an existing counter for a rule then 1265 * free the counter 1266 */ 1267 if (!req->set_cntr && rule->has_cntr) 1268 rvu_mcam_remove_counter_from_rule(rvu, owner, rule); 1269 1270 write_req.hdr.pcifunc = owner; 1271 1272 /* AF owns the default rules so change the owner just to relax 1273 * the checks in rvu_mbox_handler_npc_mcam_write_entry 1274 */ 1275 if (req->default_rule) 1276 write_req.hdr.pcifunc = 0; 1277 1278 write_req.entry = entry_index; 1279 write_req.intf = req->intf; 1280 write_req.enable_entry = (u8)enable; 1281 /* if counter is available then clear and use it */ 1282 if (req->set_cntr && rule->has_cntr) { 1283 rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), req->cntr_val); 1284 write_req.set_cntr = 1; 1285 write_req.cntr = rule->cntr; 1286 } 1287 1288 /* update rule */ 1289 memcpy(&rule->packet, &dummy.packet, sizeof(rule->packet)); 1290 memcpy(&rule->mask, &dummy.mask, sizeof(rule->mask)); 1291 rule->entry = entry_index; 1292 memcpy(&rule->rx_action, &entry->action, sizeof(struct nix_rx_action)); 1293 if (is_npc_intf_tx(req->intf)) 1294 memcpy(&rule->tx_action, &entry->action, 1295 sizeof(struct nix_tx_action)); 1296 rule->vtag_action = entry->vtag_action; 1297 rule->features = installed_features; 1298 rule->default_rule = req->default_rule; 1299 rule->owner = owner; 1300 rule->enable = enable; 1301 rule->chan_mask = write_req.entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK; 1302 rule->chan = write_req.entry_data.kw[0] & NPC_KEX_CHAN_MASK; 1303 rule->chan &= rule->chan_mask; 1304 rule->lxmb = dummy.lxmb; 1305 if (is_npc_intf_tx(req->intf)) 1306 rule->intf = pfvf->nix_tx_intf; 1307 else 1308 rule->intf = pfvf->nix_rx_intf; 1309 1310 if (new) 1311 rvu_mcam_add_rule(mcam, rule); 1312 if (req->default_rule) 1313 pfvf->def_ucast_rule = rule; 1314 1315 /* write to mcam entry registers */ 1316 err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req, 1317 &write_rsp); 1318 if (err) { 1319 rvu_mcam_remove_counter_from_rule(rvu, owner, rule); 1320 if (new) { 1321 list_del(&rule->list); 1322 kfree(rule); 1323 } 1324 return err; 1325 } 1326 1327 /* VF's MAC address is being changed via PF */ 1328 if (pf_set_vfs_mac) { 1329 ether_addr_copy(pfvf->default_mac, req->packet.dmac); 1330 ether_addr_copy(pfvf->mac_addr, req->packet.dmac); 1331 set_bit(PF_SET_VF_MAC, &pfvf->flags); 1332 } 1333 1334 if (test_bit(PF_SET_VF_CFG, &pfvf->flags) && 1335 req->vtag0_type == NIX_AF_LFX_RX_VTAG_TYPE7) 1336 rule->vfvlan_cfg = true; 1337 1338 if (is_npc_intf_rx(req->intf) && req->match_id && 1339 (req->op == NIX_RX_ACTIONOP_UCAST || req->op == NIX_RX_ACTIONOP_RSS)) 1340 return rvu_nix_setup_ratelimit_aggr(rvu, req->hdr.pcifunc, 1341 req->index, req->match_id); 1342 1343 return 0; 1344 } 1345 1346 int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, 1347 struct npc_install_flow_req *req, 1348 struct npc_install_flow_rsp *rsp) 1349 { 1350 bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK); 1351 struct rvu_switch *rswitch = &rvu->rswitch; 1352 int blkaddr, nixlf, err; 1353 struct rvu_pfvf *pfvf; 1354 bool pf_set_vfs_mac = false; 1355 bool enable = true; 1356 u16 target; 1357 1358 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1359 if (blkaddr < 0) { 1360 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 1361 return NPC_MCAM_INVALID_REQ; 1362 } 1363 1364 if (!is_npc_interface_valid(rvu, req->intf)) 1365 return NPC_FLOW_INTF_INVALID; 1366 1367 /* If DMAC is not extracted in MKEX, rules installed by AF 1368 * can rely on L2MB bit set by hardware protocol checker for 1369 * broadcast and multicast addresses. 1370 */ 1371 if (npc_check_field(rvu, blkaddr, NPC_DMAC, req->intf)) 1372 goto process_flow; 1373 1374 if (is_pffunc_af(req->hdr.pcifunc) && 1375 req->features & BIT_ULL(NPC_DMAC)) { 1376 if (is_unicast_ether_addr(req->packet.dmac)) { 1377 dev_warn(rvu->dev, 1378 "%s: mkex profile does not support ucast flow\n", 1379 __func__); 1380 return NPC_FLOW_NOT_SUPPORTED; 1381 } 1382 1383 if (!npc_is_field_present(rvu, NPC_LXMB, req->intf)) { 1384 dev_warn(rvu->dev, 1385 "%s: mkex profile does not support bcast/mcast flow", 1386 __func__); 1387 return NPC_FLOW_NOT_SUPPORTED; 1388 } 1389 1390 /* Modify feature to use LXMB instead of DMAC */ 1391 req->features &= ~BIT_ULL(NPC_DMAC); 1392 req->features |= BIT_ULL(NPC_LXMB); 1393 } 1394 1395 process_flow: 1396 if (from_vf && req->default_rule) 1397 return NPC_FLOW_VF_PERM_DENIED; 1398 1399 /* Each PF/VF info is maintained in struct rvu_pfvf. 1400 * rvu_pfvf for the target PF/VF needs to be retrieved 1401 * hence modify pcifunc accordingly. 1402 */ 1403 1404 /* AF installing for a PF/VF */ 1405 if (!req->hdr.pcifunc) 1406 target = req->vf; 1407 /* PF installing for its VF */ 1408 else if (!from_vf && req->vf) { 1409 target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf; 1410 pf_set_vfs_mac = req->default_rule && 1411 (req->features & BIT_ULL(NPC_DMAC)); 1412 } 1413 /* msg received from PF/VF */ 1414 else 1415 target = req->hdr.pcifunc; 1416 1417 /* ignore chan_mask in case pf func is not AF, revisit later */ 1418 if (!is_pffunc_af(req->hdr.pcifunc)) 1419 req->chan_mask = 0xFFF; 1420 1421 err = npc_check_unsupported_flows(rvu, req->features, req->intf); 1422 if (err) 1423 return NPC_FLOW_NOT_SUPPORTED; 1424 1425 pfvf = rvu_get_pfvf(rvu, target); 1426 1427 /* PF installing for its VF */ 1428 if (req->hdr.pcifunc && !from_vf && req->vf) 1429 set_bit(PF_SET_VF_CFG, &pfvf->flags); 1430 1431 /* update req destination mac addr */ 1432 if ((req->features & BIT_ULL(NPC_DMAC)) && is_npc_intf_rx(req->intf) && 1433 is_zero_ether_addr(req->packet.dmac)) { 1434 ether_addr_copy(req->packet.dmac, pfvf->mac_addr); 1435 eth_broadcast_addr((u8 *)&req->mask.dmac); 1436 } 1437 1438 /* Proceed if NIXLF is attached or not for TX rules */ 1439 err = nix_get_nixlf(rvu, target, &nixlf, NULL); 1440 if (err && is_npc_intf_rx(req->intf) && !pf_set_vfs_mac) 1441 return NPC_FLOW_NO_NIXLF; 1442 1443 /* don't enable rule when nixlf not attached or initialized */ 1444 if (!(is_nixlf_attached(rvu, target) && 1445 test_bit(NIXLF_INITIALIZED, &pfvf->flags))) 1446 enable = false; 1447 1448 /* Packets reaching NPC in Tx path implies that a 1449 * NIXLF is properly setup and transmitting. 1450 * Hence rules can be enabled for Tx. 1451 */ 1452 if (is_npc_intf_tx(req->intf)) 1453 enable = true; 1454 1455 /* Do not allow requests from uninitialized VFs */ 1456 if (from_vf && !enable) 1457 return NPC_FLOW_VF_NOT_INIT; 1458 1459 /* PF sets VF mac & VF NIXLF is not attached, update the mac addr */ 1460 if (pf_set_vfs_mac && !enable) { 1461 ether_addr_copy(pfvf->default_mac, req->packet.dmac); 1462 ether_addr_copy(pfvf->mac_addr, req->packet.dmac); 1463 set_bit(PF_SET_VF_MAC, &pfvf->flags); 1464 return 0; 1465 } 1466 1467 mutex_lock(&rswitch->switch_lock); 1468 err = npc_install_flow(rvu, blkaddr, target, nixlf, pfvf, 1469 req, rsp, enable, pf_set_vfs_mac); 1470 mutex_unlock(&rswitch->switch_lock); 1471 1472 return err; 1473 } 1474 1475 static int npc_delete_flow(struct rvu *rvu, struct rvu_npc_mcam_rule *rule, 1476 u16 pcifunc) 1477 { 1478 struct npc_mcam_ena_dis_entry_req dis_req = { 0 }; 1479 struct msg_rsp dis_rsp; 1480 1481 if (rule->default_rule) 1482 return 0; 1483 1484 if (rule->has_cntr) 1485 rvu_mcam_remove_counter_from_rule(rvu, pcifunc, rule); 1486 1487 dis_req.hdr.pcifunc = pcifunc; 1488 dis_req.entry = rule->entry; 1489 1490 list_del(&rule->list); 1491 kfree(rule); 1492 1493 return rvu_mbox_handler_npc_mcam_dis_entry(rvu, &dis_req, &dis_rsp); 1494 } 1495 1496 int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu, 1497 struct npc_delete_flow_req *req, 1498 struct npc_delete_flow_rsp *rsp) 1499 { 1500 struct npc_mcam *mcam = &rvu->hw->mcam; 1501 struct rvu_npc_mcam_rule *iter, *tmp; 1502 u16 pcifunc = req->hdr.pcifunc; 1503 struct list_head del_list; 1504 int blkaddr; 1505 1506 INIT_LIST_HEAD(&del_list); 1507 1508 mutex_lock(&mcam->lock); 1509 list_for_each_entry_safe(iter, tmp, &mcam->mcam_rules, list) { 1510 if (iter->owner == pcifunc) { 1511 /* All rules */ 1512 if (req->all) { 1513 list_move_tail(&iter->list, &del_list); 1514 /* Range of rules */ 1515 } else if (req->end && iter->entry >= req->start && 1516 iter->entry <= req->end) { 1517 list_move_tail(&iter->list, &del_list); 1518 /* single rule */ 1519 } else if (req->entry == iter->entry) { 1520 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1521 if (blkaddr) 1522 rsp->cntr_val = rvu_read64(rvu, blkaddr, 1523 NPC_AF_MATCH_STATX(iter->cntr)); 1524 list_move_tail(&iter->list, &del_list); 1525 break; 1526 } 1527 } 1528 } 1529 mutex_unlock(&mcam->lock); 1530 1531 list_for_each_entry_safe(iter, tmp, &del_list, list) { 1532 u16 entry = iter->entry; 1533 1534 /* clear the mcam entry target pcifunc */ 1535 mcam->entry2target_pffunc[entry] = 0x0; 1536 if (npc_delete_flow(rvu, iter, pcifunc)) 1537 dev_err(rvu->dev, "rule deletion failed for entry:%u", 1538 entry); 1539 } 1540 1541 return 0; 1542 } 1543 1544 static int npc_update_dmac_value(struct rvu *rvu, int npcblkaddr, 1545 struct rvu_npc_mcam_rule *rule, 1546 struct rvu_pfvf *pfvf) 1547 { 1548 struct npc_mcam_write_entry_req write_req = { 0 }; 1549 struct mcam_entry *entry = &write_req.entry_data; 1550 struct npc_mcam *mcam = &rvu->hw->mcam; 1551 struct msg_rsp rsp; 1552 u8 intf, enable; 1553 int err; 1554 1555 ether_addr_copy(rule->packet.dmac, pfvf->mac_addr); 1556 1557 npc_read_mcam_entry(rvu, mcam, npcblkaddr, rule->entry, 1558 entry, &intf, &enable); 1559 1560 npc_update_entry(rvu, NPC_DMAC, entry, 1561 ether_addr_to_u64(pfvf->mac_addr), 0, 1562 0xffffffffffffull, 0, intf); 1563 1564 write_req.hdr.pcifunc = rule->owner; 1565 write_req.entry = rule->entry; 1566 write_req.intf = pfvf->nix_rx_intf; 1567 1568 mutex_unlock(&mcam->lock); 1569 err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req, &rsp); 1570 mutex_lock(&mcam->lock); 1571 1572 return err; 1573 } 1574 1575 void npc_mcam_enable_flows(struct rvu *rvu, u16 target) 1576 { 1577 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, target); 1578 struct rvu_npc_mcam_rule *def_ucast_rule; 1579 struct npc_mcam *mcam = &rvu->hw->mcam; 1580 struct rvu_npc_mcam_rule *rule; 1581 int blkaddr, bank, index; 1582 u64 def_action; 1583 1584 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1585 if (blkaddr < 0) 1586 return; 1587 1588 def_ucast_rule = pfvf->def_ucast_rule; 1589 1590 mutex_lock(&mcam->lock); 1591 list_for_each_entry(rule, &mcam->mcam_rules, list) { 1592 if (is_npc_intf_rx(rule->intf) && 1593 rule->rx_action.pf_func == target && !rule->enable) { 1594 if (rule->default_rule) { 1595 npc_enable_mcam_entry(rvu, mcam, blkaddr, 1596 rule->entry, true); 1597 rule->enable = true; 1598 continue; 1599 } 1600 1601 if (rule->vfvlan_cfg) 1602 npc_update_dmac_value(rvu, blkaddr, rule, pfvf); 1603 1604 if (rule->rx_action.op == NIX_RX_ACTION_DEFAULT) { 1605 if (!def_ucast_rule) 1606 continue; 1607 /* Use default unicast entry action */ 1608 rule->rx_action = def_ucast_rule->rx_action; 1609 def_action = *(u64 *)&def_ucast_rule->rx_action; 1610 bank = npc_get_bank(mcam, rule->entry); 1611 rvu_write64(rvu, blkaddr, 1612 NPC_AF_MCAMEX_BANKX_ACTION 1613 (rule->entry, bank), def_action); 1614 } 1615 1616 npc_enable_mcam_entry(rvu, mcam, blkaddr, 1617 rule->entry, true); 1618 rule->enable = true; 1619 } 1620 } 1621 1622 /* Enable MCAM entries installed by PF with target as VF pcifunc */ 1623 for (index = 0; index < mcam->bmap_entries; index++) { 1624 if (mcam->entry2target_pffunc[index] == target) 1625 npc_enable_mcam_entry(rvu, mcam, blkaddr, 1626 index, true); 1627 } 1628 mutex_unlock(&mcam->lock); 1629 } 1630 1631 void npc_mcam_disable_flows(struct rvu *rvu, u16 target) 1632 { 1633 struct npc_mcam *mcam = &rvu->hw->mcam; 1634 int blkaddr, index; 1635 1636 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1637 if (blkaddr < 0) 1638 return; 1639 1640 mutex_lock(&mcam->lock); 1641 /* Disable MCAM entries installed by PF with target as VF pcifunc */ 1642 for (index = 0; index < mcam->bmap_entries; index++) { 1643 if (mcam->entry2target_pffunc[index] == target) 1644 npc_enable_mcam_entry(rvu, mcam, blkaddr, 1645 index, false); 1646 } 1647 mutex_unlock(&mcam->lock); 1648 } 1649 1650 /* single drop on non hit rule starting from 0th index. This an extension 1651 * to RPM mac filter to support more rules. 1652 */ 1653 int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx, 1654 u64 chan_val, u64 chan_mask, u64 exact_val, u64 exact_mask, 1655 u64 bcast_mcast_val, u64 bcast_mcast_mask) 1656 { 1657 struct npc_mcam_alloc_counter_req cntr_req = { 0 }; 1658 struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 }; 1659 struct npc_mcam_write_entry_req req = { 0 }; 1660 struct npc_mcam *mcam = &rvu->hw->mcam; 1661 struct rvu_npc_mcam_rule *rule; 1662 struct msg_rsp rsp; 1663 bool enabled; 1664 int blkaddr; 1665 int err; 1666 1667 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1668 if (blkaddr < 0) { 1669 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 1670 return -ENODEV; 1671 } 1672 1673 /* Bail out if no exact match support */ 1674 if (!rvu_npc_exact_has_match_table(rvu)) { 1675 dev_info(rvu->dev, "%s: No support for exact match feature\n", __func__); 1676 return -EINVAL; 1677 } 1678 1679 /* If 0th entry is already used, return err */ 1680 enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, mcam_idx); 1681 if (enabled) { 1682 dev_err(rvu->dev, "%s: failed to add single drop on non hit rule at %d th index\n", 1683 __func__, mcam_idx); 1684 return -EINVAL; 1685 } 1686 1687 /* Add this entry to mcam rules list */ 1688 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 1689 if (!rule) 1690 return -ENOMEM; 1691 1692 /* Disable rule by default. Enable rule when first dmac filter is 1693 * installed 1694 */ 1695 rule->enable = false; 1696 rule->chan = chan_val; 1697 rule->chan_mask = chan_mask; 1698 rule->entry = mcam_idx; 1699 rvu_mcam_add_rule(mcam, rule); 1700 1701 /* Reserve slot 0 */ 1702 npc_mcam_rsrcs_reserve(rvu, blkaddr, mcam_idx); 1703 1704 /* Allocate counter for this single drop on non hit rule */ 1705 cntr_req.hdr.pcifunc = 0; /* AF request */ 1706 cntr_req.contig = true; 1707 cntr_req.count = 1; 1708 err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp); 1709 if (err) { 1710 dev_err(rvu->dev, "%s: Err to allocate cntr for drop rule (err=%d)\n", 1711 __func__, err); 1712 return -EFAULT; 1713 } 1714 *counter_idx = cntr_rsp.cntr; 1715 1716 /* Fill in fields for this mcam entry */ 1717 npc_update_entry(rvu, NPC_EXACT_RESULT, &req.entry_data, exact_val, 0, 1718 exact_mask, 0, NIX_INTF_RX); 1719 npc_update_entry(rvu, NPC_CHAN, &req.entry_data, chan_val, 0, 1720 chan_mask, 0, NIX_INTF_RX); 1721 npc_update_entry(rvu, NPC_LXMB, &req.entry_data, bcast_mcast_val, 0, 1722 bcast_mcast_mask, 0, NIX_INTF_RX); 1723 1724 req.intf = NIX_INTF_RX; 1725 req.set_cntr = true; 1726 req.cntr = cntr_rsp.cntr; 1727 req.entry = mcam_idx; 1728 1729 err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &req, &rsp); 1730 if (err) { 1731 dev_err(rvu->dev, "%s: Installation of single drop on non hit rule at %d failed\n", 1732 __func__, mcam_idx); 1733 return err; 1734 } 1735 1736 dev_err(rvu->dev, "%s: Installed single drop on non hit rule at %d, cntr=%d\n", 1737 __func__, mcam_idx, req.cntr); 1738 1739 /* disable entry at Bank 0, index 0 */ 1740 npc_enable_mcam_entry(rvu, mcam, blkaddr, mcam_idx, false); 1741 1742 return 0; 1743 } 1744 1745 int rvu_mbox_handler_npc_get_field_status(struct rvu *rvu, 1746 struct npc_get_field_status_req *req, 1747 struct npc_get_field_status_rsp *rsp) 1748 { 1749 int blkaddr; 1750 1751 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1752 if (blkaddr < 0) 1753 return NPC_MCAM_INVALID_REQ; 1754 1755 if (!is_npc_interface_valid(rvu, req->intf)) 1756 return NPC_FLOW_INTF_INVALID; 1757 1758 if (npc_check_field(rvu, blkaddr, req->field, req->intf)) 1759 rsp->enable = 1; 1760 1761 return 0; 1762 } 1763