1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 RVU Admin Function driver 3 * 4 * Copyright (C) 2020 Marvell. 5 */ 6 7 #include <linux/bitfield.h> 8 9 #include "rvu_struct.h" 10 #include "rvu_reg.h" 11 #include "rvu.h" 12 #include "npc.h" 13 14 #define NPC_BYTESM GENMASK_ULL(19, 16) 15 #define NPC_HDR_OFFSET GENMASK_ULL(15, 8) 16 #define NPC_KEY_OFFSET GENMASK_ULL(5, 0) 17 #define NPC_LDATA_EN BIT_ULL(7) 18 19 static const char * const npc_flow_names[] = { 20 [NPC_DMAC] = "dmac", 21 [NPC_SMAC] = "smac", 22 [NPC_ETYPE] = "ether type", 23 [NPC_OUTER_VID] = "outer vlan id", 24 [NPC_TOS] = "tos", 25 [NPC_SIP_IPV4] = "ipv4 source ip", 26 [NPC_DIP_IPV4] = "ipv4 destination ip", 27 [NPC_SIP_IPV6] = "ipv6 source ip", 28 [NPC_DIP_IPV6] = "ipv6 destination ip", 29 [NPC_IPPROTO_TCP] = "ip proto tcp", 30 [NPC_IPPROTO_UDP] = "ip proto udp", 31 [NPC_IPPROTO_SCTP] = "ip proto sctp", 32 [NPC_IPPROTO_ICMP] = "ip proto icmp", 33 [NPC_IPPROTO_ICMP6] = "ip proto icmp6", 34 [NPC_IPPROTO_AH] = "ip proto AH", 35 [NPC_IPPROTO_ESP] = "ip proto ESP", 36 [NPC_SPORT_TCP] = "tcp source port", 37 [NPC_DPORT_TCP] = "tcp destination port", 38 [NPC_SPORT_UDP] = "udp source port", 39 [NPC_DPORT_UDP] = "udp destination port", 40 [NPC_SPORT_SCTP] = "sctp source port", 41 [NPC_DPORT_SCTP] = "sctp destination port", 42 [NPC_UNKNOWN] = "unknown", 43 }; 44 45 const char *npc_get_field_name(u8 hdr) 46 { 47 if (hdr >= ARRAY_SIZE(npc_flow_names)) 48 return npc_flow_names[NPC_UNKNOWN]; 49 50 return npc_flow_names[hdr]; 51 } 52 53 /* Compute keyword masks and figure out the number of keywords a field 54 * spans in the key. 55 */ 56 static void npc_set_kw_masks(struct npc_mcam *mcam, u8 type, 57 u8 nr_bits, int start_kwi, int offset, u8 intf) 58 { 59 struct npc_key_field *field = &mcam->rx_key_fields[type]; 60 u8 bits_in_kw; 61 int max_kwi; 62 63 if (mcam->banks_per_entry == 1) 64 max_kwi = 1; /* NPC_MCAM_KEY_X1 */ 65 else if (mcam->banks_per_entry == 2) 66 max_kwi = 3; /* NPC_MCAM_KEY_X2 */ 67 else 68 max_kwi = 6; /* NPC_MCAM_KEY_X4 */ 69 70 if (is_npc_intf_tx(intf)) 71 field = &mcam->tx_key_fields[type]; 72 73 if (offset + nr_bits <= 64) { 74 /* one KW only */ 75 if (start_kwi > max_kwi) 76 return; 77 field->kw_mask[start_kwi] |= GENMASK_ULL(nr_bits - 1, 0) 78 << offset; 79 field->nr_kws = 1; 80 } else if (offset + nr_bits > 64 && 81 offset + nr_bits <= 128) { 82 /* two KWs */ 83 if (start_kwi + 1 > max_kwi) 84 return; 85 /* first KW mask */ 86 bits_in_kw = 64 - offset; 87 field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0) 88 << offset; 89 /* second KW mask i.e. mask for rest of bits */ 90 bits_in_kw = nr_bits + offset - 64; 91 field->kw_mask[start_kwi + 1] |= GENMASK_ULL(bits_in_kw - 1, 0); 92 field->nr_kws = 2; 93 } else { 94 /* three KWs */ 95 if (start_kwi + 2 > max_kwi) 96 return; 97 /* first KW mask */ 98 bits_in_kw = 64 - offset; 99 field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0) 100 << offset; 101 /* second KW mask */ 102 field->kw_mask[start_kwi + 1] = ~0ULL; 103 /* third KW mask i.e. mask for rest of bits */ 104 bits_in_kw = nr_bits + offset - 128; 105 field->kw_mask[start_kwi + 2] |= GENMASK_ULL(bits_in_kw - 1, 0); 106 field->nr_kws = 3; 107 } 108 } 109 110 /* Helper function to figure out whether field exists in the key */ 111 static bool npc_is_field_present(struct rvu *rvu, enum key_fields type, u8 intf) 112 { 113 struct npc_mcam *mcam = &rvu->hw->mcam; 114 struct npc_key_field *input; 115 116 input = &mcam->rx_key_fields[type]; 117 if (is_npc_intf_tx(intf)) 118 input = &mcam->tx_key_fields[type]; 119 120 return input->nr_kws > 0; 121 } 122 123 static bool npc_is_same(struct npc_key_field *input, 124 struct npc_key_field *field) 125 { 126 int ret; 127 128 ret = memcmp(&input->layer_mdata, &field->layer_mdata, 129 sizeof(struct npc_layer_mdata)); 130 return ret == 0; 131 } 132 133 static void npc_set_layer_mdata(struct npc_mcam *mcam, enum key_fields type, 134 u64 cfg, u8 lid, u8 lt, u8 intf) 135 { 136 struct npc_key_field *input = &mcam->rx_key_fields[type]; 137 138 if (is_npc_intf_tx(intf)) 139 input = &mcam->tx_key_fields[type]; 140 141 input->layer_mdata.hdr = FIELD_GET(NPC_HDR_OFFSET, cfg); 142 input->layer_mdata.key = FIELD_GET(NPC_KEY_OFFSET, cfg); 143 input->layer_mdata.len = FIELD_GET(NPC_BYTESM, cfg) + 1; 144 input->layer_mdata.ltype = lt; 145 input->layer_mdata.lid = lid; 146 } 147 148 static bool npc_check_overlap_fields(struct npc_key_field *input1, 149 struct npc_key_field *input2) 150 { 151 int kwi; 152 153 /* Fields with same layer id and different ltypes are mutually 154 * exclusive hence they can be overlapped 155 */ 156 if (input1->layer_mdata.lid == input2->layer_mdata.lid && 157 input1->layer_mdata.ltype != input2->layer_mdata.ltype) 158 return false; 159 160 for (kwi = 0; kwi < NPC_MAX_KWS_IN_KEY; kwi++) { 161 if (input1->kw_mask[kwi] & input2->kw_mask[kwi]) 162 return true; 163 } 164 165 return false; 166 } 167 168 /* Helper function to check whether given field overlaps with any other fields 169 * in the key. Due to limitations on key size and the key extraction profile in 170 * use higher layers can overwrite lower layer's header fields. Hence overlap 171 * needs to be checked. 172 */ 173 static bool npc_check_overlap(struct rvu *rvu, int blkaddr, 174 enum key_fields type, u8 start_lid, u8 intf) 175 { 176 struct npc_mcam *mcam = &rvu->hw->mcam; 177 struct npc_key_field *dummy, *input; 178 int start_kwi, offset; 179 u8 nr_bits, lid, lt, ld; 180 u64 cfg; 181 182 dummy = &mcam->rx_key_fields[NPC_UNKNOWN]; 183 input = &mcam->rx_key_fields[type]; 184 185 if (is_npc_intf_tx(intf)) { 186 dummy = &mcam->tx_key_fields[NPC_UNKNOWN]; 187 input = &mcam->tx_key_fields[type]; 188 } 189 190 for (lid = start_lid; lid < NPC_MAX_LID; lid++) { 191 for (lt = 0; lt < NPC_MAX_LT; lt++) { 192 for (ld = 0; ld < NPC_MAX_LD; ld++) { 193 cfg = rvu_read64(rvu, blkaddr, 194 NPC_AF_INTFX_LIDX_LTX_LDX_CFG 195 (intf, lid, lt, ld)); 196 if (!FIELD_GET(NPC_LDATA_EN, cfg)) 197 continue; 198 memset(dummy, 0, sizeof(struct npc_key_field)); 199 npc_set_layer_mdata(mcam, NPC_UNKNOWN, cfg, 200 lid, lt, intf); 201 /* exclude input */ 202 if (npc_is_same(input, dummy)) 203 continue; 204 start_kwi = dummy->layer_mdata.key / 8; 205 offset = (dummy->layer_mdata.key * 8) % 64; 206 nr_bits = dummy->layer_mdata.len * 8; 207 /* form KW masks */ 208 npc_set_kw_masks(mcam, NPC_UNKNOWN, nr_bits, 209 start_kwi, offset, intf); 210 /* check any input field bits falls in any 211 * other field bits. 212 */ 213 if (npc_check_overlap_fields(dummy, input)) 214 return true; 215 } 216 } 217 } 218 219 return false; 220 } 221 222 static bool npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type, 223 u8 intf) 224 { 225 if (!npc_is_field_present(rvu, type, intf) || 226 npc_check_overlap(rvu, blkaddr, type, 0, intf)) 227 return false; 228 return true; 229 } 230 231 static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number, 232 u8 key_nibble, u8 intf) 233 { 234 u8 offset = (key_nibble * 4) % 64; /* offset within key word */ 235 u8 kwi = (key_nibble * 4) / 64; /* which word in key */ 236 u8 nr_bits = 4; /* bits in a nibble */ 237 u8 type; 238 239 switch (bit_number) { 240 case 0 ... 2: 241 type = NPC_CHAN; 242 break; 243 case 3: 244 type = NPC_ERRLEV; 245 break; 246 case 4 ... 5: 247 type = NPC_ERRCODE; 248 break; 249 case 6: 250 type = NPC_LXMB; 251 break; 252 /* check for LTYPE only as of now */ 253 case 9: 254 type = NPC_LA; 255 break; 256 case 12: 257 type = NPC_LB; 258 break; 259 case 15: 260 type = NPC_LC; 261 break; 262 case 18: 263 type = NPC_LD; 264 break; 265 case 21: 266 type = NPC_LE; 267 break; 268 case 24: 269 type = NPC_LF; 270 break; 271 case 27: 272 type = NPC_LG; 273 break; 274 case 30: 275 type = NPC_LH; 276 break; 277 default: 278 return; 279 } 280 npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf); 281 } 282 283 static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf) 284 { 285 struct npc_mcam *mcam = &rvu->hw->mcam; 286 struct npc_key_field *key_fields; 287 /* Ether type can come from three layers 288 * (ethernet, single tagged, double tagged) 289 */ 290 struct npc_key_field *etype_ether; 291 struct npc_key_field *etype_tag1; 292 struct npc_key_field *etype_tag2; 293 /* Outer VLAN TCI can come from two layers 294 * (single tagged, double tagged) 295 */ 296 struct npc_key_field *vlan_tag1; 297 struct npc_key_field *vlan_tag2; 298 u64 *features; 299 u8 start_lid; 300 int i; 301 302 key_fields = mcam->rx_key_fields; 303 features = &mcam->rx_features; 304 305 if (is_npc_intf_tx(intf)) { 306 key_fields = mcam->tx_key_fields; 307 features = &mcam->tx_features; 308 } 309 310 /* Handle header fields which can come from multiple layers like 311 * etype, outer vlan tci. These fields should have same position in 312 * the key otherwise to install a mcam rule more than one entry is 313 * needed which complicates mcam space management. 314 */ 315 etype_ether = &key_fields[NPC_ETYPE_ETHER]; 316 etype_tag1 = &key_fields[NPC_ETYPE_TAG1]; 317 etype_tag2 = &key_fields[NPC_ETYPE_TAG2]; 318 vlan_tag1 = &key_fields[NPC_VLAN_TAG1]; 319 vlan_tag2 = &key_fields[NPC_VLAN_TAG2]; 320 321 /* if key profile programmed does not extract Ethertype at all */ 322 if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) 323 goto vlan_tci; 324 325 /* if key profile programmed extracts Ethertype from one layer */ 326 if (etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) 327 key_fields[NPC_ETYPE] = *etype_ether; 328 if (!etype_ether->nr_kws && etype_tag1->nr_kws && !etype_tag2->nr_kws) 329 key_fields[NPC_ETYPE] = *etype_tag1; 330 if (!etype_ether->nr_kws && !etype_tag1->nr_kws && etype_tag2->nr_kws) 331 key_fields[NPC_ETYPE] = *etype_tag2; 332 333 /* if key profile programmed extracts Ethertype from multiple layers */ 334 if (etype_ether->nr_kws && etype_tag1->nr_kws) { 335 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 336 if (etype_ether->kw_mask[i] != etype_tag1->kw_mask[i]) 337 goto vlan_tci; 338 } 339 key_fields[NPC_ETYPE] = *etype_tag1; 340 } 341 if (etype_ether->nr_kws && etype_tag2->nr_kws) { 342 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 343 if (etype_ether->kw_mask[i] != etype_tag2->kw_mask[i]) 344 goto vlan_tci; 345 } 346 key_fields[NPC_ETYPE] = *etype_tag2; 347 } 348 if (etype_tag1->nr_kws && etype_tag2->nr_kws) { 349 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 350 if (etype_tag1->kw_mask[i] != etype_tag2->kw_mask[i]) 351 goto vlan_tci; 352 } 353 key_fields[NPC_ETYPE] = *etype_tag2; 354 } 355 356 /* check none of higher layers overwrite Ethertype */ 357 start_lid = key_fields[NPC_ETYPE].layer_mdata.lid + 1; 358 if (npc_check_overlap(rvu, blkaddr, NPC_ETYPE, start_lid, intf)) 359 goto vlan_tci; 360 *features |= BIT_ULL(NPC_ETYPE); 361 vlan_tci: 362 /* if key profile does not extract outer vlan tci at all */ 363 if (!vlan_tag1->nr_kws && !vlan_tag2->nr_kws) 364 goto done; 365 366 /* if key profile extracts outer vlan tci from one layer */ 367 if (vlan_tag1->nr_kws && !vlan_tag2->nr_kws) 368 key_fields[NPC_OUTER_VID] = *vlan_tag1; 369 if (!vlan_tag1->nr_kws && vlan_tag2->nr_kws) 370 key_fields[NPC_OUTER_VID] = *vlan_tag2; 371 372 /* if key profile extracts outer vlan tci from multiple layers */ 373 if (vlan_tag1->nr_kws && vlan_tag2->nr_kws) { 374 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 375 if (vlan_tag1->kw_mask[i] != vlan_tag2->kw_mask[i]) 376 goto done; 377 } 378 key_fields[NPC_OUTER_VID] = *vlan_tag2; 379 } 380 /* check none of higher layers overwrite outer vlan tci */ 381 start_lid = key_fields[NPC_OUTER_VID].layer_mdata.lid + 1; 382 if (npc_check_overlap(rvu, blkaddr, NPC_OUTER_VID, start_lid, intf)) 383 goto done; 384 *features |= BIT_ULL(NPC_OUTER_VID); 385 done: 386 return; 387 } 388 389 static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid, 390 u8 lt, u64 cfg, u8 intf) 391 { 392 struct npc_mcam *mcam = &rvu->hw->mcam; 393 u8 hdr, key, nr_bytes, bit_offset; 394 u8 la_ltype, la_start; 395 /* starting KW index and starting bit position */ 396 int start_kwi, offset; 397 398 nr_bytes = FIELD_GET(NPC_BYTESM, cfg) + 1; 399 hdr = FIELD_GET(NPC_HDR_OFFSET, cfg); 400 key = FIELD_GET(NPC_KEY_OFFSET, cfg); 401 start_kwi = key / 8; 402 offset = (key * 8) % 64; 403 404 /* For Tx, Layer A has NIX_INST_HDR_S(64 bytes) preceding 405 * ethernet header. 406 */ 407 if (is_npc_intf_tx(intf)) { 408 la_ltype = NPC_LT_LA_IH_NIX_ETHER; 409 la_start = 8; 410 } else { 411 la_ltype = NPC_LT_LA_ETHER; 412 la_start = 0; 413 } 414 415 #define NPC_SCAN_HDR(name, hlid, hlt, hstart, hlen) \ 416 do { \ 417 if (lid == (hlid) && lt == (hlt)) { \ 418 if ((hstart) >= hdr && \ 419 ((hstart) + (hlen)) <= (hdr + nr_bytes)) { \ 420 bit_offset = (hdr + nr_bytes - (hstart) - (hlen)) * 8; \ 421 npc_set_layer_mdata(mcam, (name), cfg, lid, lt, intf); \ 422 npc_set_kw_masks(mcam, (name), (hlen) * 8, \ 423 start_kwi, offset + bit_offset, intf);\ 424 } \ 425 } \ 426 } while (0) 427 428 /* List LID, LTYPE, start offset from layer and length(in bytes) of 429 * packet header fields below. 430 * Example: Source IP is 4 bytes and starts at 12th byte of IP header 431 */ 432 NPC_SCAN_HDR(NPC_TOS, NPC_LID_LC, NPC_LT_LC_IP, 1, 1); 433 NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4); 434 NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4); 435 NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16); 436 NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16); 437 NPC_SCAN_HDR(NPC_SPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 0, 2); 438 NPC_SCAN_HDR(NPC_DPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 2, 2); 439 NPC_SCAN_HDR(NPC_SPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 0, 2); 440 NPC_SCAN_HDR(NPC_DPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 2, 2); 441 NPC_SCAN_HDR(NPC_SPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 0, 2); 442 NPC_SCAN_HDR(NPC_DPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 2, 2); 443 NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2); 444 NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2); 445 NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2); 446 NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2); 447 NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2); 448 NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6); 449 NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start, 6); 450 /* PF_FUNC is 2 bytes at 0th byte of NPC_LT_LA_IH_NIX_ETHER */ 451 NPC_SCAN_HDR(NPC_PF_FUNC, NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, 0, 2); 452 } 453 454 static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf) 455 { 456 struct npc_mcam *mcam = &rvu->hw->mcam; 457 u64 *features = &mcam->rx_features; 458 u64 tcp_udp_sctp; 459 int hdr; 460 461 if (is_npc_intf_tx(intf)) 462 features = &mcam->tx_features; 463 464 for (hdr = NPC_DMAC; hdr < NPC_HEADER_FIELDS_MAX; hdr++) { 465 if (npc_check_field(rvu, blkaddr, hdr, intf)) 466 *features |= BIT_ULL(hdr); 467 } 468 469 tcp_udp_sctp = BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_SPORT_UDP) | 470 BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP) | 471 BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP); 472 473 /* for tcp/udp/sctp corresponding layer type should be in the key */ 474 if (*features & tcp_udp_sctp) { 475 if (!npc_check_field(rvu, blkaddr, NPC_LD, intf)) 476 *features &= ~tcp_udp_sctp; 477 else 478 *features |= BIT_ULL(NPC_IPPROTO_TCP) | 479 BIT_ULL(NPC_IPPROTO_UDP) | 480 BIT_ULL(NPC_IPPROTO_SCTP); 481 } 482 483 /* for AH/ICMP/ICMPv6/, check if corresponding layer type is present in the key */ 484 if (npc_check_field(rvu, blkaddr, NPC_LD, intf)) { 485 *features |= BIT_ULL(NPC_IPPROTO_AH); 486 *features |= BIT_ULL(NPC_IPPROTO_ICMP); 487 *features |= BIT_ULL(NPC_IPPROTO_ICMP6); 488 } 489 490 /* for ESP, check if corresponding layer type is present in the key */ 491 if (npc_check_field(rvu, blkaddr, NPC_LE, intf)) 492 *features |= BIT_ULL(NPC_IPPROTO_ESP); 493 494 /* for vlan corresponding layer type should be in the key */ 495 if (*features & BIT_ULL(NPC_OUTER_VID)) 496 if (!npc_check_field(rvu, blkaddr, NPC_LB, intf)) 497 *features &= ~BIT_ULL(NPC_OUTER_VID); 498 } 499 500 /* Scan key extraction profile and record how fields of our interest 501 * fill the key structure. Also verify Channel and DMAC exists in 502 * key and not overwritten by other header fields. 503 */ 504 static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf) 505 { 506 struct npc_mcam *mcam = &rvu->hw->mcam; 507 u8 lid, lt, ld, bitnr; 508 u8 key_nibble = 0; 509 u64 cfg; 510 511 /* Scan and note how parse result is going to be in key. 512 * A bit set in PARSE_NIBBLE_ENA corresponds to a nibble from 513 * parse result in the key. The enabled nibbles from parse result 514 * will be concatenated in key. 515 */ 516 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf)); 517 cfg &= NPC_PARSE_NIBBLE; 518 for_each_set_bit(bitnr, (unsigned long *)&cfg, 31) { 519 npc_scan_parse_result(mcam, bitnr, key_nibble, intf); 520 key_nibble++; 521 } 522 523 /* Scan and note how layer data is going to be in key */ 524 for (lid = 0; lid < NPC_MAX_LID; lid++) { 525 for (lt = 0; lt < NPC_MAX_LT; lt++) { 526 for (ld = 0; ld < NPC_MAX_LD; ld++) { 527 cfg = rvu_read64(rvu, blkaddr, 528 NPC_AF_INTFX_LIDX_LTX_LDX_CFG 529 (intf, lid, lt, ld)); 530 if (!FIELD_GET(NPC_LDATA_EN, cfg)) 531 continue; 532 npc_scan_ldata(rvu, blkaddr, lid, lt, cfg, 533 intf); 534 } 535 } 536 } 537 538 return 0; 539 } 540 541 static int npc_scan_verify_kex(struct rvu *rvu, int blkaddr) 542 { 543 int err; 544 545 err = npc_scan_kex(rvu, blkaddr, NIX_INTF_RX); 546 if (err) 547 return err; 548 549 err = npc_scan_kex(rvu, blkaddr, NIX_INTF_TX); 550 if (err) 551 return err; 552 553 /* Channel is mandatory */ 554 if (!npc_is_field_present(rvu, NPC_CHAN, NIX_INTF_RX)) { 555 dev_err(rvu->dev, "Channel not present in Key\n"); 556 return -EINVAL; 557 } 558 /* check that none of the fields overwrite channel */ 559 if (npc_check_overlap(rvu, blkaddr, NPC_CHAN, 0, NIX_INTF_RX)) { 560 dev_err(rvu->dev, "Channel cannot be overwritten\n"); 561 return -EINVAL; 562 } 563 /* DMAC should be present in key for unicast filter to work */ 564 if (!npc_is_field_present(rvu, NPC_DMAC, NIX_INTF_RX)) { 565 dev_err(rvu->dev, "DMAC not present in Key\n"); 566 return -EINVAL; 567 } 568 /* check that none of the fields overwrite DMAC */ 569 if (npc_check_overlap(rvu, blkaddr, NPC_DMAC, 0, NIX_INTF_RX)) { 570 dev_err(rvu->dev, "DMAC cannot be overwritten\n"); 571 return -EINVAL; 572 } 573 574 npc_set_features(rvu, blkaddr, NIX_INTF_TX); 575 npc_set_features(rvu, blkaddr, NIX_INTF_RX); 576 npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_TX); 577 npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_RX); 578 579 return 0; 580 } 581 582 int npc_flow_steering_init(struct rvu *rvu, int blkaddr) 583 { 584 struct npc_mcam *mcam = &rvu->hw->mcam; 585 586 INIT_LIST_HEAD(&mcam->mcam_rules); 587 588 return npc_scan_verify_kex(rvu, blkaddr); 589 } 590 591 static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf) 592 { 593 struct npc_mcam *mcam = &rvu->hw->mcam; 594 u64 *mcam_features = &mcam->rx_features; 595 u64 unsupported; 596 u8 bit; 597 598 if (is_npc_intf_tx(intf)) 599 mcam_features = &mcam->tx_features; 600 601 unsupported = (*mcam_features ^ features) & ~(*mcam_features); 602 if (unsupported) { 603 dev_info(rvu->dev, "Unsupported flow(s):\n"); 604 for_each_set_bit(bit, (unsigned long *)&unsupported, 64) 605 dev_info(rvu->dev, "%s ", npc_get_field_name(bit)); 606 return NIX_AF_ERR_NPC_KEY_NOT_SUPP; 607 } 608 609 return 0; 610 } 611 612 /* npc_update_entry - Based on the masks generated during 613 * the key scanning, updates the given entry with value and 614 * masks for the field of interest. Maximum 16 bytes of a packet 615 * header can be extracted by HW hence lo and hi are sufficient. 616 * When field bytes are less than or equal to 8 then hi should be 617 * 0 for value and mask. 618 * 619 * If exact match of value is required then mask should be all 1's. 620 * If any bits in mask are 0 then corresponding bits in value are 621 * dont care. 622 */ 623 static void npc_update_entry(struct rvu *rvu, enum key_fields type, 624 struct mcam_entry *entry, u64 val_lo, 625 u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf) 626 { 627 struct npc_mcam *mcam = &rvu->hw->mcam; 628 struct mcam_entry dummy = { {0} }; 629 struct npc_key_field *field; 630 u64 kw1, kw2, kw3; 631 u8 shift; 632 int i; 633 634 field = &mcam->rx_key_fields[type]; 635 if (is_npc_intf_tx(intf)) 636 field = &mcam->tx_key_fields[type]; 637 638 if (!field->nr_kws) 639 return; 640 641 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 642 if (!field->kw_mask[i]) 643 continue; 644 /* place key value in kw[x] */ 645 shift = __ffs64(field->kw_mask[i]); 646 /* update entry value */ 647 kw1 = (val_lo << shift) & field->kw_mask[i]; 648 dummy.kw[i] = kw1; 649 /* update entry mask */ 650 kw1 = (mask_lo << shift) & field->kw_mask[i]; 651 dummy.kw_mask[i] = kw1; 652 653 if (field->nr_kws == 1) 654 break; 655 /* place remaining bits of key value in kw[x + 1] */ 656 if (field->nr_kws == 2) { 657 /* update entry value */ 658 kw2 = shift ? val_lo >> (64 - shift) : 0; 659 kw2 |= (val_hi << shift); 660 kw2 &= field->kw_mask[i + 1]; 661 dummy.kw[i + 1] = kw2; 662 /* update entry mask */ 663 kw2 = shift ? mask_lo >> (64 - shift) : 0; 664 kw2 |= (mask_hi << shift); 665 kw2 &= field->kw_mask[i + 1]; 666 dummy.kw_mask[i + 1] = kw2; 667 break; 668 } 669 /* place remaining bits of key value in kw[x + 1], kw[x + 2] */ 670 if (field->nr_kws == 3) { 671 /* update entry value */ 672 kw2 = shift ? val_lo >> (64 - shift) : 0; 673 kw2 |= (val_hi << shift); 674 kw2 &= field->kw_mask[i + 1]; 675 kw3 = shift ? val_hi >> (64 - shift) : 0; 676 kw3 &= field->kw_mask[i + 2]; 677 dummy.kw[i + 1] = kw2; 678 dummy.kw[i + 2] = kw3; 679 /* update entry mask */ 680 kw2 = shift ? mask_lo >> (64 - shift) : 0; 681 kw2 |= (mask_hi << shift); 682 kw2 &= field->kw_mask[i + 1]; 683 kw3 = shift ? mask_hi >> (64 - shift) : 0; 684 kw3 &= field->kw_mask[i + 2]; 685 dummy.kw_mask[i + 1] = kw2; 686 dummy.kw_mask[i + 2] = kw3; 687 break; 688 } 689 } 690 /* dummy is ready with values and masks for given key 691 * field now clear and update input entry with those 692 */ 693 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 694 if (!field->kw_mask[i]) 695 continue; 696 entry->kw[i] &= ~field->kw_mask[i]; 697 entry->kw_mask[i] &= ~field->kw_mask[i]; 698 699 entry->kw[i] |= dummy.kw[i]; 700 entry->kw_mask[i] |= dummy.kw_mask[i]; 701 } 702 } 703 704 #define IPV6_WORDS 4 705 706 static void npc_update_ipv6_flow(struct rvu *rvu, struct mcam_entry *entry, 707 u64 features, struct flow_msg *pkt, 708 struct flow_msg *mask, 709 struct rvu_npc_mcam_rule *output, u8 intf) 710 { 711 u32 src_ip[IPV6_WORDS], src_ip_mask[IPV6_WORDS]; 712 u32 dst_ip[IPV6_WORDS], dst_ip_mask[IPV6_WORDS]; 713 struct flow_msg *opkt = &output->packet; 714 struct flow_msg *omask = &output->mask; 715 u64 mask_lo, mask_hi; 716 u64 val_lo, val_hi; 717 718 /* For an ipv6 address fe80::2c68:63ff:fe5e:2d0a the packet 719 * values to be programmed in MCAM should as below: 720 * val_high: 0xfe80000000000000 721 * val_low: 0x2c6863fffe5e2d0a 722 */ 723 if (features & BIT_ULL(NPC_SIP_IPV6)) { 724 be32_to_cpu_array(src_ip_mask, mask->ip6src, IPV6_WORDS); 725 be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS); 726 727 mask_hi = (u64)src_ip_mask[0] << 32 | src_ip_mask[1]; 728 mask_lo = (u64)src_ip_mask[2] << 32 | src_ip_mask[3]; 729 val_hi = (u64)src_ip[0] << 32 | src_ip[1]; 730 val_lo = (u64)src_ip[2] << 32 | src_ip[3]; 731 732 npc_update_entry(rvu, NPC_SIP_IPV6, entry, val_lo, val_hi, 733 mask_lo, mask_hi, intf); 734 memcpy(opkt->ip6src, pkt->ip6src, sizeof(opkt->ip6src)); 735 memcpy(omask->ip6src, mask->ip6src, sizeof(omask->ip6src)); 736 } 737 if (features & BIT_ULL(NPC_DIP_IPV6)) { 738 be32_to_cpu_array(dst_ip_mask, mask->ip6dst, IPV6_WORDS); 739 be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS); 740 741 mask_hi = (u64)dst_ip_mask[0] << 32 | dst_ip_mask[1]; 742 mask_lo = (u64)dst_ip_mask[2] << 32 | dst_ip_mask[3]; 743 val_hi = (u64)dst_ip[0] << 32 | dst_ip[1]; 744 val_lo = (u64)dst_ip[2] << 32 | dst_ip[3]; 745 746 npc_update_entry(rvu, NPC_DIP_IPV6, entry, val_lo, val_hi, 747 mask_lo, mask_hi, intf); 748 memcpy(opkt->ip6dst, pkt->ip6dst, sizeof(opkt->ip6dst)); 749 memcpy(omask->ip6dst, mask->ip6dst, sizeof(omask->ip6dst)); 750 } 751 } 752 753 static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry, 754 u64 features, struct flow_msg *pkt, 755 struct flow_msg *mask, 756 struct rvu_npc_mcam_rule *output, u8 intf) 757 { 758 u64 dmac_mask = ether_addr_to_u64(mask->dmac); 759 u64 smac_mask = ether_addr_to_u64(mask->smac); 760 u64 dmac_val = ether_addr_to_u64(pkt->dmac); 761 u64 smac_val = ether_addr_to_u64(pkt->smac); 762 struct flow_msg *opkt = &output->packet; 763 struct flow_msg *omask = &output->mask; 764 765 if (!features) 766 return; 767 768 /* For tcp/udp/sctp LTYPE should be present in entry */ 769 if (features & BIT_ULL(NPC_IPPROTO_TCP)) 770 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_TCP, 771 0, ~0ULL, 0, intf); 772 if (features & BIT_ULL(NPC_IPPROTO_UDP)) 773 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_UDP, 774 0, ~0ULL, 0, intf); 775 if (features & BIT_ULL(NPC_IPPROTO_SCTP)) 776 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_SCTP, 777 0, ~0ULL, 0, intf); 778 if (features & BIT_ULL(NPC_IPPROTO_ICMP)) 779 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP, 780 0, ~0ULL, 0, intf); 781 if (features & BIT_ULL(NPC_IPPROTO_ICMP6)) 782 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP6, 783 0, ~0ULL, 0, intf); 784 785 if (features & BIT_ULL(NPC_OUTER_VID)) 786 npc_update_entry(rvu, NPC_LB, entry, 787 NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG, 0, 788 NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG, 0, intf); 789 790 /* For AH, LTYPE should be present in entry */ 791 if (features & BIT_ULL(NPC_IPPROTO_AH)) 792 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_AH, 793 0, ~0ULL, 0, intf); 794 /* For ESP, LTYPE should be present in entry */ 795 if (features & BIT_ULL(NPC_IPPROTO_ESP)) 796 npc_update_entry(rvu, NPC_LE, entry, NPC_LT_LE_ESP, 797 0, ~0ULL, 0, intf); 798 799 #define NPC_WRITE_FLOW(field, member, val_lo, val_hi, mask_lo, mask_hi) \ 800 do { \ 801 if (features & BIT_ULL((field))) { \ 802 npc_update_entry(rvu, (field), entry, (val_lo), (val_hi), \ 803 (mask_lo), (mask_hi), intf); \ 804 memcpy(&opkt->member, &pkt->member, sizeof(pkt->member)); \ 805 memcpy(&omask->member, &mask->member, sizeof(mask->member)); \ 806 } \ 807 } while (0) 808 809 NPC_WRITE_FLOW(NPC_DMAC, dmac, dmac_val, 0, dmac_mask, 0); 810 NPC_WRITE_FLOW(NPC_SMAC, smac, smac_val, 0, smac_mask, 0); 811 NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0, 812 ntohs(mask->etype), 0); 813 NPC_WRITE_FLOW(NPC_TOS, tos, pkt->tos, 0, mask->tos, 0); 814 NPC_WRITE_FLOW(NPC_SIP_IPV4, ip4src, ntohl(pkt->ip4src), 0, 815 ntohl(mask->ip4src), 0); 816 NPC_WRITE_FLOW(NPC_DIP_IPV4, ip4dst, ntohl(pkt->ip4dst), 0, 817 ntohl(mask->ip4dst), 0); 818 NPC_WRITE_FLOW(NPC_SPORT_TCP, sport, ntohs(pkt->sport), 0, 819 ntohs(mask->sport), 0); 820 NPC_WRITE_FLOW(NPC_SPORT_UDP, sport, ntohs(pkt->sport), 0, 821 ntohs(mask->sport), 0); 822 NPC_WRITE_FLOW(NPC_DPORT_TCP, dport, ntohs(pkt->dport), 0, 823 ntohs(mask->dport), 0); 824 NPC_WRITE_FLOW(NPC_DPORT_UDP, dport, ntohs(pkt->dport), 0, 825 ntohs(mask->dport), 0); 826 NPC_WRITE_FLOW(NPC_SPORT_SCTP, sport, ntohs(pkt->sport), 0, 827 ntohs(mask->sport), 0); 828 NPC_WRITE_FLOW(NPC_DPORT_SCTP, dport, ntohs(pkt->dport), 0, 829 ntohs(mask->dport), 0); 830 831 NPC_WRITE_FLOW(NPC_OUTER_VID, vlan_tci, ntohs(pkt->vlan_tci), 0, 832 ntohs(mask->vlan_tci), 0); 833 834 npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf); 835 } 836 837 static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam, 838 u16 entry) 839 { 840 struct rvu_npc_mcam_rule *iter; 841 842 mutex_lock(&mcam->lock); 843 list_for_each_entry(iter, &mcam->mcam_rules, list) { 844 if (iter->entry == entry) { 845 mutex_unlock(&mcam->lock); 846 return iter; 847 } 848 } 849 mutex_unlock(&mcam->lock); 850 851 return NULL; 852 } 853 854 static void rvu_mcam_add_rule(struct npc_mcam *mcam, 855 struct rvu_npc_mcam_rule *rule) 856 { 857 struct list_head *head = &mcam->mcam_rules; 858 struct rvu_npc_mcam_rule *iter; 859 860 mutex_lock(&mcam->lock); 861 list_for_each_entry(iter, &mcam->mcam_rules, list) { 862 if (iter->entry > rule->entry) 863 break; 864 head = &iter->list; 865 } 866 867 list_add(&rule->list, head); 868 mutex_unlock(&mcam->lock); 869 } 870 871 static void rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc, 872 struct rvu_npc_mcam_rule *rule) 873 { 874 struct npc_mcam_oper_counter_req free_req = { 0 }; 875 struct msg_rsp free_rsp; 876 877 if (!rule->has_cntr) 878 return; 879 880 free_req.hdr.pcifunc = pcifunc; 881 free_req.cntr = rule->cntr; 882 883 rvu_mbox_handler_npc_mcam_free_counter(rvu, &free_req, &free_rsp); 884 rule->has_cntr = false; 885 } 886 887 static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc, 888 struct rvu_npc_mcam_rule *rule, 889 struct npc_install_flow_rsp *rsp) 890 { 891 struct npc_mcam_alloc_counter_req cntr_req = { 0 }; 892 struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 }; 893 int err; 894 895 cntr_req.hdr.pcifunc = pcifunc; 896 cntr_req.contig = true; 897 cntr_req.count = 1; 898 899 /* we try to allocate a counter to track the stats of this 900 * rule. If counter could not be allocated then proceed 901 * without counter because counters are limited than entries. 902 */ 903 err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, 904 &cntr_rsp); 905 if (!err && cntr_rsp.count) { 906 rule->cntr = cntr_rsp.cntr; 907 rule->has_cntr = true; 908 rsp->counter = rule->cntr; 909 } else { 910 rsp->counter = err; 911 } 912 } 913 914 static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, 915 struct mcam_entry *entry, 916 struct npc_install_flow_req *req, u16 target) 917 { 918 struct nix_rx_action action; 919 u64 chan_mask; 920 921 chan_mask = req->chan_mask ? req->chan_mask : ~0ULL; 922 npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, chan_mask, 0, 923 NIX_INTF_RX); 924 925 *(u64 *)&action = 0x00; 926 action.pf_func = target; 927 action.op = req->op; 928 action.index = req->index; 929 action.match_id = req->match_id; 930 action.flow_key_alg = req->flow_key_alg; 931 932 if (req->op == NIX_RX_ACTION_DEFAULT && pfvf->def_ucast_rule) 933 action = pfvf->def_ucast_rule->rx_action; 934 935 entry->action = *(u64 *)&action; 936 937 /* VTAG0 starts at 0th byte of LID_B. 938 * VTAG1 starts at 4th byte of LID_B. 939 */ 940 entry->vtag_action = FIELD_PREP(RX_VTAG0_VALID_BIT, req->vtag0_valid) | 941 FIELD_PREP(RX_VTAG0_TYPE_MASK, req->vtag0_type) | 942 FIELD_PREP(RX_VTAG0_LID_MASK, NPC_LID_LB) | 943 FIELD_PREP(RX_VTAG0_RELPTR_MASK, 0) | 944 FIELD_PREP(RX_VTAG1_VALID_BIT, req->vtag1_valid) | 945 FIELD_PREP(RX_VTAG1_TYPE_MASK, req->vtag1_type) | 946 FIELD_PREP(RX_VTAG1_LID_MASK, NPC_LID_LB) | 947 FIELD_PREP(RX_VTAG1_RELPTR_MASK, 4); 948 } 949 950 static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, 951 struct mcam_entry *entry, 952 struct npc_install_flow_req *req, u16 target) 953 { 954 struct nix_tx_action action; 955 956 npc_update_entry(rvu, NPC_PF_FUNC, entry, (__force u16)htons(target), 957 0, ~0ULL, 0, NIX_INTF_TX); 958 959 *(u64 *)&action = 0x00; 960 action.op = req->op; 961 action.index = req->index; 962 action.match_id = req->match_id; 963 964 entry->action = *(u64 *)&action; 965 966 /* VTAG0 starts at 0th byte of LID_B. 967 * VTAG1 starts at 4th byte of LID_B. 968 */ 969 entry->vtag_action = FIELD_PREP(TX_VTAG0_DEF_MASK, req->vtag0_def) | 970 FIELD_PREP(TX_VTAG0_OP_MASK, req->vtag0_op) | 971 FIELD_PREP(TX_VTAG0_LID_MASK, NPC_LID_LA) | 972 FIELD_PREP(TX_VTAG0_RELPTR_MASK, 20) | 973 FIELD_PREP(TX_VTAG1_DEF_MASK, req->vtag1_def) | 974 FIELD_PREP(TX_VTAG1_OP_MASK, req->vtag1_op) | 975 FIELD_PREP(TX_VTAG1_LID_MASK, NPC_LID_LA) | 976 FIELD_PREP(TX_VTAG1_RELPTR_MASK, 24); 977 } 978 979 static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target, 980 int nixlf, struct rvu_pfvf *pfvf, 981 struct npc_install_flow_req *req, 982 struct npc_install_flow_rsp *rsp, bool enable, 983 bool pf_set_vfs_mac) 984 { 985 struct rvu_npc_mcam_rule *def_ucast_rule = pfvf->def_ucast_rule; 986 u64 features, installed_features, missing_features = 0; 987 struct npc_mcam_write_entry_req write_req = { 0 }; 988 struct npc_mcam *mcam = &rvu->hw->mcam; 989 struct rvu_npc_mcam_rule dummy = { 0 }; 990 struct rvu_npc_mcam_rule *rule; 991 bool new = false, msg_from_vf; 992 u16 owner = req->hdr.pcifunc; 993 struct msg_rsp write_rsp; 994 struct mcam_entry *entry; 995 int entry_index, err; 996 997 msg_from_vf = !!(owner & RVU_PFVF_FUNC_MASK); 998 999 installed_features = req->features; 1000 features = req->features; 1001 entry = &write_req.entry_data; 1002 entry_index = req->entry; 1003 1004 npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy, 1005 req->intf); 1006 1007 if (is_npc_intf_rx(req->intf)) 1008 npc_update_rx_entry(rvu, pfvf, entry, req, target); 1009 else 1010 npc_update_tx_entry(rvu, pfvf, entry, req, target); 1011 1012 /* Default unicast rules do not exist for TX */ 1013 if (is_npc_intf_tx(req->intf)) 1014 goto find_rule; 1015 1016 if (req->default_rule) { 1017 entry_index = npc_get_nixlf_mcam_index(mcam, target, nixlf, 1018 NIXLF_UCAST_ENTRY); 1019 enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, entry_index); 1020 } 1021 1022 /* update mcam entry with default unicast rule attributes */ 1023 if (def_ucast_rule && (msg_from_vf || (req->default_rule && req->append))) { 1024 missing_features = (def_ucast_rule->features ^ features) & 1025 def_ucast_rule->features; 1026 if (missing_features) 1027 npc_update_flow(rvu, entry, missing_features, 1028 &def_ucast_rule->packet, 1029 &def_ucast_rule->mask, 1030 &dummy, req->intf); 1031 installed_features = req->features | missing_features; 1032 } 1033 1034 find_rule: 1035 rule = rvu_mcam_find_rule(mcam, entry_index); 1036 if (!rule) { 1037 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 1038 if (!rule) 1039 return -ENOMEM; 1040 new = true; 1041 } 1042 1043 /* allocate new counter if rule has no counter */ 1044 if (!req->default_rule && req->set_cntr && !rule->has_cntr) 1045 rvu_mcam_add_counter_to_rule(rvu, owner, rule, rsp); 1046 1047 /* if user wants to delete an existing counter for a rule then 1048 * free the counter 1049 */ 1050 if (!req->set_cntr && rule->has_cntr) 1051 rvu_mcam_remove_counter_from_rule(rvu, owner, rule); 1052 1053 write_req.hdr.pcifunc = owner; 1054 1055 /* AF owns the default rules so change the owner just to relax 1056 * the checks in rvu_mbox_handler_npc_mcam_write_entry 1057 */ 1058 if (req->default_rule) 1059 write_req.hdr.pcifunc = 0; 1060 1061 write_req.entry = entry_index; 1062 write_req.intf = req->intf; 1063 write_req.enable_entry = (u8)enable; 1064 /* if counter is available then clear and use it */ 1065 if (req->set_cntr && rule->has_cntr) { 1066 rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), 0x00); 1067 write_req.set_cntr = 1; 1068 write_req.cntr = rule->cntr; 1069 } 1070 1071 err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req, 1072 &write_rsp); 1073 if (err) { 1074 rvu_mcam_remove_counter_from_rule(rvu, owner, rule); 1075 if (new) 1076 kfree(rule); 1077 return err; 1078 } 1079 /* update rule */ 1080 memcpy(&rule->packet, &dummy.packet, sizeof(rule->packet)); 1081 memcpy(&rule->mask, &dummy.mask, sizeof(rule->mask)); 1082 rule->entry = entry_index; 1083 memcpy(&rule->rx_action, &entry->action, sizeof(struct nix_rx_action)); 1084 if (is_npc_intf_tx(req->intf)) 1085 memcpy(&rule->tx_action, &entry->action, 1086 sizeof(struct nix_tx_action)); 1087 rule->vtag_action = entry->vtag_action; 1088 rule->features = installed_features; 1089 rule->default_rule = req->default_rule; 1090 rule->owner = owner; 1091 rule->enable = enable; 1092 if (is_npc_intf_tx(req->intf)) 1093 rule->intf = pfvf->nix_tx_intf; 1094 else 1095 rule->intf = pfvf->nix_rx_intf; 1096 1097 if (new) 1098 rvu_mcam_add_rule(mcam, rule); 1099 if (req->default_rule) 1100 pfvf->def_ucast_rule = rule; 1101 1102 /* VF's MAC address is being changed via PF */ 1103 if (pf_set_vfs_mac) { 1104 ether_addr_copy(pfvf->default_mac, req->packet.dmac); 1105 ether_addr_copy(pfvf->mac_addr, req->packet.dmac); 1106 } 1107 1108 if (pfvf->pf_set_vf_cfg && req->vtag0_type == NIX_AF_LFX_RX_VTAG_TYPE7) 1109 rule->vfvlan_cfg = true; 1110 1111 return 0; 1112 } 1113 1114 int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, 1115 struct npc_install_flow_req *req, 1116 struct npc_install_flow_rsp *rsp) 1117 { 1118 bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK); 1119 int blkaddr, nixlf, err; 1120 struct rvu_pfvf *pfvf; 1121 bool pf_set_vfs_mac = false; 1122 bool enable = true; 1123 u16 target; 1124 1125 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1126 if (blkaddr < 0) { 1127 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 1128 return -ENODEV; 1129 } 1130 1131 if (!is_npc_interface_valid(rvu, req->intf)) 1132 return -EINVAL; 1133 1134 if (from_vf && req->default_rule) 1135 return NPC_MCAM_PERM_DENIED; 1136 1137 /* Each PF/VF info is maintained in struct rvu_pfvf. 1138 * rvu_pfvf for the target PF/VF needs to be retrieved 1139 * hence modify pcifunc accordingly. 1140 */ 1141 1142 /* AF installing for a PF/VF */ 1143 if (!req->hdr.pcifunc) 1144 target = req->vf; 1145 /* PF installing for its VF */ 1146 else if (!from_vf && req->vf) { 1147 target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf; 1148 pf_set_vfs_mac = req->default_rule && 1149 (req->features & BIT_ULL(NPC_DMAC)); 1150 } 1151 /* msg received from PF/VF */ 1152 else 1153 target = req->hdr.pcifunc; 1154 1155 /* ignore chan_mask in case pf func is not AF, revisit later */ 1156 if (!is_pffunc_af(req->hdr.pcifunc)) 1157 req->chan_mask = 0xFFF; 1158 1159 err = npc_check_unsupported_flows(rvu, req->features, req->intf); 1160 if (err) 1161 return err; 1162 1163 if (npc_mcam_verify_channel(rvu, target, req->intf, req->channel)) 1164 return -EINVAL; 1165 1166 pfvf = rvu_get_pfvf(rvu, target); 1167 1168 /* PF installing for its VF */ 1169 if (req->hdr.pcifunc && !from_vf && req->vf) 1170 pfvf->pf_set_vf_cfg = 1; 1171 1172 /* update req destination mac addr */ 1173 if ((req->features & BIT_ULL(NPC_DMAC)) && is_npc_intf_rx(req->intf) && 1174 is_zero_ether_addr(req->packet.dmac)) { 1175 ether_addr_copy(req->packet.dmac, pfvf->mac_addr); 1176 eth_broadcast_addr((u8 *)&req->mask.dmac); 1177 } 1178 1179 err = nix_get_nixlf(rvu, target, &nixlf, NULL); 1180 1181 /* If interface is uninitialized then do not enable entry */ 1182 if (err || (!req->default_rule && !pfvf->def_ucast_rule)) 1183 enable = false; 1184 1185 /* Packets reaching NPC in Tx path implies that a 1186 * NIXLF is properly setup and transmitting. 1187 * Hence rules can be enabled for Tx. 1188 */ 1189 if (is_npc_intf_tx(req->intf)) 1190 enable = true; 1191 1192 /* Do not allow requests from uninitialized VFs */ 1193 if (from_vf && !enable) 1194 return -EINVAL; 1195 1196 /* If message is from VF then its flow should not overlap with 1197 * reserved unicast flow. 1198 */ 1199 if (from_vf && pfvf->def_ucast_rule && is_npc_intf_rx(req->intf) && 1200 pfvf->def_ucast_rule->features & req->features) 1201 return -EINVAL; 1202 1203 return npc_install_flow(rvu, blkaddr, target, nixlf, pfvf, req, rsp, 1204 enable, pf_set_vfs_mac); 1205 } 1206 1207 static int npc_delete_flow(struct rvu *rvu, struct rvu_npc_mcam_rule *rule, 1208 u16 pcifunc) 1209 { 1210 struct npc_mcam_ena_dis_entry_req dis_req = { 0 }; 1211 struct msg_rsp dis_rsp; 1212 1213 if (rule->default_rule) 1214 return 0; 1215 1216 if (rule->has_cntr) 1217 rvu_mcam_remove_counter_from_rule(rvu, pcifunc, rule); 1218 1219 dis_req.hdr.pcifunc = pcifunc; 1220 dis_req.entry = rule->entry; 1221 1222 list_del(&rule->list); 1223 kfree(rule); 1224 1225 return rvu_mbox_handler_npc_mcam_dis_entry(rvu, &dis_req, &dis_rsp); 1226 } 1227 1228 int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu, 1229 struct npc_delete_flow_req *req, 1230 struct msg_rsp *rsp) 1231 { 1232 struct npc_mcam *mcam = &rvu->hw->mcam; 1233 struct rvu_npc_mcam_rule *iter, *tmp; 1234 u16 pcifunc = req->hdr.pcifunc; 1235 struct list_head del_list; 1236 1237 INIT_LIST_HEAD(&del_list); 1238 1239 mutex_lock(&mcam->lock); 1240 list_for_each_entry_safe(iter, tmp, &mcam->mcam_rules, list) { 1241 if (iter->owner == pcifunc) { 1242 /* All rules */ 1243 if (req->all) { 1244 list_move_tail(&iter->list, &del_list); 1245 /* Range of rules */ 1246 } else if (req->end && iter->entry >= req->start && 1247 iter->entry <= req->end) { 1248 list_move_tail(&iter->list, &del_list); 1249 /* single rule */ 1250 } else if (req->entry == iter->entry) { 1251 list_move_tail(&iter->list, &del_list); 1252 break; 1253 } 1254 } 1255 } 1256 mutex_unlock(&mcam->lock); 1257 1258 list_for_each_entry_safe(iter, tmp, &del_list, list) { 1259 u16 entry = iter->entry; 1260 1261 /* clear the mcam entry target pcifunc */ 1262 mcam->entry2target_pffunc[entry] = 0x0; 1263 if (npc_delete_flow(rvu, iter, pcifunc)) 1264 dev_err(rvu->dev, "rule deletion failed for entry:%u", 1265 entry); 1266 } 1267 1268 return 0; 1269 } 1270 1271 static int npc_update_dmac_value(struct rvu *rvu, int npcblkaddr, 1272 struct rvu_npc_mcam_rule *rule, 1273 struct rvu_pfvf *pfvf) 1274 { 1275 struct npc_mcam_write_entry_req write_req = { 0 }; 1276 struct mcam_entry *entry = &write_req.entry_data; 1277 struct npc_mcam *mcam = &rvu->hw->mcam; 1278 struct msg_rsp rsp; 1279 u8 intf, enable; 1280 int err; 1281 1282 ether_addr_copy(rule->packet.dmac, pfvf->mac_addr); 1283 1284 npc_read_mcam_entry(rvu, mcam, npcblkaddr, rule->entry, 1285 entry, &intf, &enable); 1286 1287 npc_update_entry(rvu, NPC_DMAC, entry, 1288 ether_addr_to_u64(pfvf->mac_addr), 0, 1289 0xffffffffffffull, 0, intf); 1290 1291 write_req.hdr.pcifunc = rule->owner; 1292 write_req.entry = rule->entry; 1293 write_req.intf = pfvf->nix_rx_intf; 1294 1295 mutex_unlock(&mcam->lock); 1296 err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req, &rsp); 1297 mutex_lock(&mcam->lock); 1298 1299 return err; 1300 } 1301 1302 void npc_mcam_enable_flows(struct rvu *rvu, u16 target) 1303 { 1304 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, target); 1305 struct rvu_npc_mcam_rule *def_ucast_rule; 1306 struct npc_mcam *mcam = &rvu->hw->mcam; 1307 struct rvu_npc_mcam_rule *rule; 1308 int blkaddr, bank, index; 1309 u64 def_action; 1310 1311 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1312 if (blkaddr < 0) 1313 return; 1314 1315 def_ucast_rule = pfvf->def_ucast_rule; 1316 1317 mutex_lock(&mcam->lock); 1318 list_for_each_entry(rule, &mcam->mcam_rules, list) { 1319 if (is_npc_intf_rx(rule->intf) && 1320 rule->rx_action.pf_func == target && !rule->enable) { 1321 if (rule->default_rule) { 1322 npc_enable_mcam_entry(rvu, mcam, blkaddr, 1323 rule->entry, true); 1324 rule->enable = true; 1325 continue; 1326 } 1327 1328 if (rule->vfvlan_cfg) 1329 npc_update_dmac_value(rvu, blkaddr, rule, pfvf); 1330 1331 if (rule->rx_action.op == NIX_RX_ACTION_DEFAULT) { 1332 if (!def_ucast_rule) 1333 continue; 1334 /* Use default unicast entry action */ 1335 rule->rx_action = def_ucast_rule->rx_action; 1336 def_action = *(u64 *)&def_ucast_rule->rx_action; 1337 bank = npc_get_bank(mcam, rule->entry); 1338 rvu_write64(rvu, blkaddr, 1339 NPC_AF_MCAMEX_BANKX_ACTION 1340 (rule->entry, bank), def_action); 1341 } 1342 1343 npc_enable_mcam_entry(rvu, mcam, blkaddr, 1344 rule->entry, true); 1345 rule->enable = true; 1346 } 1347 } 1348 1349 /* Enable MCAM entries installed by PF with target as VF pcifunc */ 1350 for (index = 0; index < mcam->bmap_entries; index++) { 1351 if (mcam->entry2target_pffunc[index] == target) 1352 npc_enable_mcam_entry(rvu, mcam, blkaddr, 1353 index, true); 1354 } 1355 mutex_unlock(&mcam->lock); 1356 } 1357 1358 void npc_mcam_disable_flows(struct rvu *rvu, u16 target) 1359 { 1360 struct npc_mcam *mcam = &rvu->hw->mcam; 1361 int blkaddr, index; 1362 1363 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1364 if (blkaddr < 0) 1365 return; 1366 1367 mutex_lock(&mcam->lock); 1368 /* Disable MCAM entries installed by PF with target as VF pcifunc */ 1369 for (index = 0; index < mcam->bmap_entries; index++) { 1370 if (mcam->entry2target_pffunc[index] == target) 1371 npc_enable_mcam_entry(rvu, mcam, blkaddr, 1372 index, false); 1373 } 1374 mutex_unlock(&mcam->lock); 1375 } 1376