1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2020 Marvell. 5 * 6 */ 7 8 #include <net/ipv6.h> 9 #include <linux/sort.h> 10 11 #include "otx2_common.h" 12 13 #define OTX2_DEFAULT_ACTION 0x1 14 15 struct otx2_flow { 16 struct ethtool_rx_flow_spec flow_spec; 17 struct list_head list; 18 u32 location; 19 u32 entry; 20 bool is_vf; 21 u8 rss_ctx_id; 22 #define DMAC_FILTER_RULE BIT(0) 23 #define PFC_FLOWCTRL_RULE BIT(1) 24 u16 rule_type; 25 int vf; 26 }; 27 28 enum dmac_req { 29 DMAC_ADDR_UPDATE, 30 DMAC_ADDR_DEL 31 }; 32 33 static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg) 34 { 35 devm_kfree(pfvf->dev, flow_cfg->flow_ent); 36 flow_cfg->flow_ent = NULL; 37 flow_cfg->max_flows = 0; 38 } 39 40 static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf) 41 { 42 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; 43 struct npc_mcam_free_entry_req *req; 44 int ent, err; 45 46 if (!flow_cfg->max_flows) 47 return 0; 48 49 mutex_lock(&pfvf->mbox.lock); 50 for (ent = 0; ent < flow_cfg->max_flows; ent++) { 51 req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox); 52 if (!req) 53 break; 54 55 req->entry = flow_cfg->flow_ent[ent]; 56 57 /* Send message to AF to free MCAM entries */ 58 err = otx2_sync_mbox_msg(&pfvf->mbox); 59 if (err) 60 break; 61 } 62 mutex_unlock(&pfvf->mbox.lock); 63 otx2_clear_ntuple_flow_info(pfvf, flow_cfg); 64 return 0; 65 } 66 67 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count) 68 { 69 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; 70 struct npc_mcam_alloc_entry_req *req; 71 struct npc_mcam_alloc_entry_rsp *rsp; 72 int ent, allocated = 0; 73 74 /* Free current ones and allocate new ones with requested count */ 75 otx2_free_ntuple_mcam_entries(pfvf); 76 77 if (!count) 78 return 0; 79 80 flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count, 81 sizeof(u16), GFP_KERNEL); 82 if (!flow_cfg->flow_ent) { 83 netdev_err(pfvf->netdev, 84 "%s: Unable to allocate memory for flow entries\n", 85 __func__); 86 return -ENOMEM; 87 } 88 89 mutex_lock(&pfvf->mbox.lock); 90 91 /* In a single request a max of NPC_MAX_NONCONTIG_ENTRIES MCAM entries 92 * can only be allocated. 93 */ 94 while (allocated < count) { 95 req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox); 96 if (!req) 97 goto exit; 98 99 req->contig = false; 100 req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ? 101 NPC_MAX_NONCONTIG_ENTRIES : count - allocated; 102 103 /* Allocate higher priority entries for PFs, so that VF's entries 104 * will be on top of PF. 105 */ 106 if (!is_otx2_vf(pfvf->pcifunc)) { 107 req->priority = NPC_MCAM_HIGHER_PRIO; 108 req->ref_entry = flow_cfg->def_ent[0]; 109 } 110 111 /* Send message to AF */ 112 if (otx2_sync_mbox_msg(&pfvf->mbox)) 113 goto exit; 114 115 rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp 116 (&pfvf->mbox.mbox, 0, &req->hdr); 117 if (IS_ERR(rsp)) 118 goto exit; 119 120 for (ent = 0; ent < rsp->count; ent++) 121 flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent]; 122 123 allocated += rsp->count; 124 125 /* If this request is not fulfilled, no need to send 126 * further requests. 127 */ 128 if (rsp->count != req->count) 129 break; 130 } 131 132 /* Multiple MCAM entry alloc requests could result in non-sequential 133 * MCAM entries in the flow_ent[] array. Sort them in an ascending order, 134 * otherwise user installed ntuple filter index and MCAM entry index will 135 * not be in sync. 136 */ 137 if (allocated) 138 sort(&flow_cfg->flow_ent[0], allocated, 139 sizeof(flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL); 140 141 exit: 142 mutex_unlock(&pfvf->mbox.lock); 143 144 flow_cfg->max_flows = allocated; 145 146 if (allocated) { 147 pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC; 148 pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT; 149 } 150 151 if (allocated != count) 152 netdev_info(pfvf->netdev, 153 "Unable to allocate %d MCAM entries, got only %d\n", 154 count, allocated); 155 return allocated; 156 } 157 EXPORT_SYMBOL(otx2_alloc_mcam_entries); 158 159 int otx2_mcam_entry_init(struct otx2_nic *pfvf) 160 { 161 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; 162 struct npc_get_field_status_req *freq; 163 struct npc_get_field_status_rsp *frsp; 164 struct npc_mcam_alloc_entry_req *req; 165 struct npc_mcam_alloc_entry_rsp *rsp; 166 int vf_vlan_max_flows; 167 int ent, count; 168 169 vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS; 170 count = flow_cfg->ucast_flt_cnt + 171 OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows; 172 173 flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count, 174 sizeof(u16), GFP_KERNEL); 175 if (!flow_cfg->def_ent) 176 return -ENOMEM; 177 178 mutex_lock(&pfvf->mbox.lock); 179 180 req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox); 181 if (!req) { 182 mutex_unlock(&pfvf->mbox.lock); 183 return -ENOMEM; 184 } 185 186 req->contig = false; 187 req->count = count; 188 189 /* Send message to AF */ 190 if (otx2_sync_mbox_msg(&pfvf->mbox)) { 191 mutex_unlock(&pfvf->mbox.lock); 192 return -EINVAL; 193 } 194 195 rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp 196 (&pfvf->mbox.mbox, 0, &req->hdr); 197 if (IS_ERR(rsp)) { 198 mutex_unlock(&pfvf->mbox.lock); 199 return PTR_ERR(rsp); 200 } 201 202 if (rsp->count != req->count) { 203 netdev_info(pfvf->netdev, 204 "Unable to allocate MCAM entries for ucast, vlan and vf_vlan\n"); 205 mutex_unlock(&pfvf->mbox.lock); 206 devm_kfree(pfvf->dev, flow_cfg->def_ent); 207 return 0; 208 } 209 210 for (ent = 0; ent < rsp->count; ent++) 211 flow_cfg->def_ent[ent] = rsp->entry_list[ent]; 212 213 flow_cfg->vf_vlan_offset = 0; 214 flow_cfg->unicast_offset = vf_vlan_max_flows; 215 flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset + 216 flow_cfg->ucast_flt_cnt; 217 pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT; 218 219 /* Check if NPC_DMAC field is supported 220 * by the mkex profile before setting VLAN support flag. 221 */ 222 freq = otx2_mbox_alloc_msg_npc_get_field_status(&pfvf->mbox); 223 if (!freq) { 224 mutex_unlock(&pfvf->mbox.lock); 225 return -ENOMEM; 226 } 227 228 freq->field = NPC_DMAC; 229 if (otx2_sync_mbox_msg(&pfvf->mbox)) { 230 mutex_unlock(&pfvf->mbox.lock); 231 return -EINVAL; 232 } 233 234 frsp = (struct npc_get_field_status_rsp *)otx2_mbox_get_rsp 235 (&pfvf->mbox.mbox, 0, &freq->hdr); 236 if (IS_ERR(frsp)) { 237 mutex_unlock(&pfvf->mbox.lock); 238 return PTR_ERR(frsp); 239 } 240 241 if (frsp->enable) { 242 pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT; 243 pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT; 244 } 245 246 pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC; 247 mutex_unlock(&pfvf->mbox.lock); 248 249 /* Allocate entries for Ntuple filters */ 250 count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT); 251 if (count <= 0) { 252 otx2_clear_ntuple_flow_info(pfvf, flow_cfg); 253 return 0; 254 } 255 256 pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT; 257 258 refcount_set(&flow_cfg->mark_flows, 1); 259 return 0; 260 } 261 EXPORT_SYMBOL(otx2_mcam_entry_init); 262 263 /* TODO : revisit on size */ 264 #define OTX2_DMAC_FLTR_BITMAP_SZ (4 * 2048 + 32) 265 266 int otx2vf_mcam_flow_init(struct otx2_nic *pfvf) 267 { 268 struct otx2_flow_config *flow_cfg; 269 270 pfvf->flow_cfg = devm_kzalloc(pfvf->dev, 271 sizeof(struct otx2_flow_config), 272 GFP_KERNEL); 273 if (!pfvf->flow_cfg) 274 return -ENOMEM; 275 276 pfvf->flow_cfg->dmacflt_bmap = devm_kcalloc(pfvf->dev, 277 BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ), 278 sizeof(long), GFP_KERNEL); 279 if (!pfvf->flow_cfg->dmacflt_bmap) 280 return -ENOMEM; 281 282 flow_cfg = pfvf->flow_cfg; 283 INIT_LIST_HEAD(&flow_cfg->flow_list); 284 INIT_LIST_HEAD(&flow_cfg->flow_list_tc); 285 flow_cfg->max_flows = 0; 286 287 return 0; 288 } 289 EXPORT_SYMBOL(otx2vf_mcam_flow_init); 290 291 int otx2_mcam_flow_init(struct otx2_nic *pf) 292 { 293 int err; 294 295 pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config), 296 GFP_KERNEL); 297 if (!pf->flow_cfg) 298 return -ENOMEM; 299 300 pf->flow_cfg->dmacflt_bmap = devm_kcalloc(pf->dev, 301 BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ), 302 sizeof(long), GFP_KERNEL); 303 if (!pf->flow_cfg->dmacflt_bmap) 304 return -ENOMEM; 305 306 INIT_LIST_HEAD(&pf->flow_cfg->flow_list); 307 INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc); 308 309 pf->flow_cfg->ucast_flt_cnt = OTX2_DEFAULT_UNICAST_FLOWS; 310 311 /* Allocate bare minimum number of MCAM entries needed for 312 * unicast and ntuple filters. 313 */ 314 err = otx2_mcam_entry_init(pf); 315 if (err) 316 return err; 317 318 /* Check if MCAM entries are allocate or not */ 319 if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)) 320 return 0; 321 322 pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table) 323 * pf->flow_cfg->ucast_flt_cnt, GFP_KERNEL); 324 if (!pf->mac_table) 325 return -ENOMEM; 326 327 otx2_dmacflt_get_max_cnt(pf); 328 329 /* DMAC filters are not allocated */ 330 if (!pf->flow_cfg->dmacflt_max_flows) 331 return 0; 332 333 pf->flow_cfg->bmap_to_dmacindex = 334 devm_kzalloc(pf->dev, sizeof(u32) * 335 pf->flow_cfg->dmacflt_max_flows, 336 GFP_KERNEL); 337 338 if (!pf->flow_cfg->bmap_to_dmacindex) 339 return -ENOMEM; 340 341 pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT; 342 343 return 0; 344 } 345 346 void otx2_mcam_flow_del(struct otx2_nic *pf) 347 { 348 otx2_destroy_mcam_flows(pf); 349 } 350 EXPORT_SYMBOL(otx2_mcam_flow_del); 351 352 /* On success adds mcam entry 353 * On failure enable promisous mode 354 */ 355 static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac) 356 { 357 struct otx2_flow_config *flow_cfg = pf->flow_cfg; 358 struct npc_install_flow_req *req; 359 int err, i; 360 361 if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)) 362 return -ENOMEM; 363 364 /* dont have free mcam entries or uc list is greater than alloted */ 365 if (netdev_uc_count(pf->netdev) > pf->flow_cfg->ucast_flt_cnt) 366 return -ENOMEM; 367 368 mutex_lock(&pf->mbox.lock); 369 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox); 370 if (!req) { 371 mutex_unlock(&pf->mbox.lock); 372 return -ENOMEM; 373 } 374 375 /* unicast offset starts with 32 0..31 for ntuple */ 376 for (i = 0; i < pf->flow_cfg->ucast_flt_cnt; i++) { 377 if (pf->mac_table[i].inuse) 378 continue; 379 ether_addr_copy(pf->mac_table[i].addr, mac); 380 pf->mac_table[i].inuse = true; 381 pf->mac_table[i].mcam_entry = 382 flow_cfg->def_ent[i + flow_cfg->unicast_offset]; 383 req->entry = pf->mac_table[i].mcam_entry; 384 break; 385 } 386 387 ether_addr_copy(req->packet.dmac, mac); 388 eth_broadcast_addr((u8 *)&req->mask.dmac); 389 req->features = BIT_ULL(NPC_DMAC); 390 req->channel = pf->hw.rx_chan_base; 391 req->intf = NIX_INTF_RX; 392 req->op = NIX_RX_ACTION_DEFAULT; 393 req->set_cntr = 1; 394 395 err = otx2_sync_mbox_msg(&pf->mbox); 396 mutex_unlock(&pf->mbox.lock); 397 398 return err; 399 } 400 401 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac) 402 { 403 struct otx2_nic *pf = netdev_priv(netdev); 404 405 if (!bitmap_empty(pf->flow_cfg->dmacflt_bmap, 406 pf->flow_cfg->dmacflt_max_flows)) 407 netdev_warn(netdev, 408 "Add %pM to CGX/RPM DMAC filters list as well\n", 409 mac); 410 411 return otx2_do_add_macfilter(pf, mac); 412 } 413 414 static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac, 415 int *mcam_entry) 416 { 417 int i; 418 419 for (i = 0; i < pf->flow_cfg->ucast_flt_cnt; i++) { 420 if (!pf->mac_table[i].inuse) 421 continue; 422 423 if (ether_addr_equal(pf->mac_table[i].addr, mac)) { 424 *mcam_entry = pf->mac_table[i].mcam_entry; 425 pf->mac_table[i].inuse = false; 426 return true; 427 } 428 } 429 return false; 430 } 431 432 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac) 433 { 434 struct otx2_nic *pf = netdev_priv(netdev); 435 struct npc_delete_flow_req *req; 436 int err, mcam_entry; 437 438 /* check does mcam entry exists for given mac */ 439 if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry)) 440 return 0; 441 442 mutex_lock(&pf->mbox.lock); 443 req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox); 444 if (!req) { 445 mutex_unlock(&pf->mbox.lock); 446 return -ENOMEM; 447 } 448 req->entry = mcam_entry; 449 /* Send message to AF */ 450 err = otx2_sync_mbox_msg(&pf->mbox); 451 mutex_unlock(&pf->mbox.lock); 452 453 return err; 454 } 455 456 static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location) 457 { 458 struct otx2_flow *iter; 459 460 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) { 461 if (iter->location == location) 462 return iter; 463 } 464 465 return NULL; 466 } 467 468 static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow) 469 { 470 struct list_head *head = &pfvf->flow_cfg->flow_list; 471 struct otx2_flow *iter; 472 473 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) { 474 if (iter->location > flow->location) 475 break; 476 head = &iter->list; 477 } 478 479 list_add(&flow->list, head); 480 } 481 482 int otx2_get_maxflows(struct otx2_flow_config *flow_cfg) 483 { 484 if (!flow_cfg) 485 return 0; 486 487 if (flow_cfg->nr_flows == flow_cfg->max_flows || 488 !bitmap_empty(flow_cfg->dmacflt_bmap, 489 flow_cfg->dmacflt_max_flows)) 490 return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows; 491 else 492 return flow_cfg->max_flows; 493 } 494 EXPORT_SYMBOL(otx2_get_maxflows); 495 496 int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc, 497 u32 location) 498 { 499 struct otx2_flow *iter; 500 501 if (location >= otx2_get_maxflows(pfvf->flow_cfg)) 502 return -EINVAL; 503 504 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) { 505 if (iter->location == location) { 506 nfc->fs = iter->flow_spec; 507 nfc->rss_context = iter->rss_ctx_id; 508 return 0; 509 } 510 } 511 512 return -ENOENT; 513 } 514 515 int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc, 516 u32 *rule_locs) 517 { 518 u32 rule_cnt = nfc->rule_cnt; 519 u32 location = 0; 520 int idx = 0; 521 int err = 0; 522 523 nfc->data = otx2_get_maxflows(pfvf->flow_cfg); 524 while ((!err || err == -ENOENT) && idx < rule_cnt) { 525 err = otx2_get_flow(pfvf, nfc, location); 526 if (!err) 527 rule_locs[idx++] = location; 528 location++; 529 } 530 nfc->rule_cnt = rule_cnt; 531 532 return err; 533 } 534 535 static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp, 536 struct npc_install_flow_req *req, 537 u32 flow_type) 538 { 539 struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec; 540 struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec; 541 struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec; 542 struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec; 543 struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec; 544 struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec; 545 struct flow_msg *pmask = &req->mask; 546 struct flow_msg *pkt = &req->packet; 547 548 switch (flow_type) { 549 case IP_USER_FLOW: 550 if (ipv4_usr_mask->ip4src) { 551 memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src, 552 sizeof(pkt->ip4src)); 553 memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src, 554 sizeof(pmask->ip4src)); 555 req->features |= BIT_ULL(NPC_SIP_IPV4); 556 } 557 if (ipv4_usr_mask->ip4dst) { 558 memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst, 559 sizeof(pkt->ip4dst)); 560 memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst, 561 sizeof(pmask->ip4dst)); 562 req->features |= BIT_ULL(NPC_DIP_IPV4); 563 } 564 if (ipv4_usr_mask->tos) { 565 pkt->tos = ipv4_usr_hdr->tos; 566 pmask->tos = ipv4_usr_mask->tos; 567 req->features |= BIT_ULL(NPC_TOS); 568 } 569 if (ipv4_usr_mask->proto) { 570 switch (ipv4_usr_hdr->proto) { 571 case IPPROTO_ICMP: 572 req->features |= BIT_ULL(NPC_IPPROTO_ICMP); 573 break; 574 case IPPROTO_TCP: 575 req->features |= BIT_ULL(NPC_IPPROTO_TCP); 576 break; 577 case IPPROTO_UDP: 578 req->features |= BIT_ULL(NPC_IPPROTO_UDP); 579 break; 580 case IPPROTO_SCTP: 581 req->features |= BIT_ULL(NPC_IPPROTO_SCTP); 582 break; 583 case IPPROTO_AH: 584 req->features |= BIT_ULL(NPC_IPPROTO_AH); 585 break; 586 case IPPROTO_ESP: 587 req->features |= BIT_ULL(NPC_IPPROTO_ESP); 588 break; 589 default: 590 return -EOPNOTSUPP; 591 } 592 } 593 pkt->etype = cpu_to_be16(ETH_P_IP); 594 pmask->etype = cpu_to_be16(0xFFFF); 595 req->features |= BIT_ULL(NPC_ETYPE); 596 break; 597 case TCP_V4_FLOW: 598 case UDP_V4_FLOW: 599 case SCTP_V4_FLOW: 600 pkt->etype = cpu_to_be16(ETH_P_IP); 601 pmask->etype = cpu_to_be16(0xFFFF); 602 req->features |= BIT_ULL(NPC_ETYPE); 603 if (ipv4_l4_mask->ip4src) { 604 memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src, 605 sizeof(pkt->ip4src)); 606 memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src, 607 sizeof(pmask->ip4src)); 608 req->features |= BIT_ULL(NPC_SIP_IPV4); 609 } 610 if (ipv4_l4_mask->ip4dst) { 611 memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst, 612 sizeof(pkt->ip4dst)); 613 memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst, 614 sizeof(pmask->ip4dst)); 615 req->features |= BIT_ULL(NPC_DIP_IPV4); 616 } 617 if (ipv4_l4_mask->tos) { 618 pkt->tos = ipv4_l4_hdr->tos; 619 pmask->tos = ipv4_l4_mask->tos; 620 req->features |= BIT_ULL(NPC_TOS); 621 } 622 if (ipv4_l4_mask->psrc) { 623 memcpy(&pkt->sport, &ipv4_l4_hdr->psrc, 624 sizeof(pkt->sport)); 625 memcpy(&pmask->sport, &ipv4_l4_mask->psrc, 626 sizeof(pmask->sport)); 627 if (flow_type == UDP_V4_FLOW) 628 req->features |= BIT_ULL(NPC_SPORT_UDP); 629 else if (flow_type == TCP_V4_FLOW) 630 req->features |= BIT_ULL(NPC_SPORT_TCP); 631 else 632 req->features |= BIT_ULL(NPC_SPORT_SCTP); 633 } 634 if (ipv4_l4_mask->pdst) { 635 memcpy(&pkt->dport, &ipv4_l4_hdr->pdst, 636 sizeof(pkt->dport)); 637 memcpy(&pmask->dport, &ipv4_l4_mask->pdst, 638 sizeof(pmask->dport)); 639 if (flow_type == UDP_V4_FLOW) 640 req->features |= BIT_ULL(NPC_DPORT_UDP); 641 else if (flow_type == TCP_V4_FLOW) 642 req->features |= BIT_ULL(NPC_DPORT_TCP); 643 else 644 req->features |= BIT_ULL(NPC_DPORT_SCTP); 645 } 646 if (flow_type == UDP_V4_FLOW) 647 req->features |= BIT_ULL(NPC_IPPROTO_UDP); 648 else if (flow_type == TCP_V4_FLOW) 649 req->features |= BIT_ULL(NPC_IPPROTO_TCP); 650 else 651 req->features |= BIT_ULL(NPC_IPPROTO_SCTP); 652 break; 653 case AH_V4_FLOW: 654 case ESP_V4_FLOW: 655 pkt->etype = cpu_to_be16(ETH_P_IP); 656 pmask->etype = cpu_to_be16(0xFFFF); 657 req->features |= BIT_ULL(NPC_ETYPE); 658 if (ah_esp_mask->ip4src) { 659 memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src, 660 sizeof(pkt->ip4src)); 661 memcpy(&pmask->ip4src, &ah_esp_mask->ip4src, 662 sizeof(pmask->ip4src)); 663 req->features |= BIT_ULL(NPC_SIP_IPV4); 664 } 665 if (ah_esp_mask->ip4dst) { 666 memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst, 667 sizeof(pkt->ip4dst)); 668 memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst, 669 sizeof(pmask->ip4dst)); 670 req->features |= BIT_ULL(NPC_DIP_IPV4); 671 } 672 if (ah_esp_mask->tos) { 673 pkt->tos = ah_esp_hdr->tos; 674 pmask->tos = ah_esp_mask->tos; 675 req->features |= BIT_ULL(NPC_TOS); 676 } 677 678 /* NPC profile doesn't extract AH/ESP header fields */ 679 if (ah_esp_mask->spi & ah_esp_hdr->spi) 680 return -EOPNOTSUPP; 681 682 if (flow_type == AH_V4_FLOW) 683 req->features |= BIT_ULL(NPC_IPPROTO_AH); 684 else 685 req->features |= BIT_ULL(NPC_IPPROTO_ESP); 686 break; 687 default: 688 break; 689 } 690 691 return 0; 692 } 693 694 static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp, 695 struct npc_install_flow_req *req, 696 u32 flow_type) 697 { 698 struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec; 699 struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec; 700 struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec; 701 struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec; 702 struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec; 703 struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec; 704 struct flow_msg *pmask = &req->mask; 705 struct flow_msg *pkt = &req->packet; 706 707 switch (flow_type) { 708 case IPV6_USER_FLOW: 709 if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) { 710 memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src, 711 sizeof(pkt->ip6src)); 712 memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src, 713 sizeof(pmask->ip6src)); 714 req->features |= BIT_ULL(NPC_SIP_IPV6); 715 } 716 if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) { 717 memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst, 718 sizeof(pkt->ip6dst)); 719 memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst, 720 sizeof(pmask->ip6dst)); 721 req->features |= BIT_ULL(NPC_DIP_IPV6); 722 } 723 if (ipv6_usr_hdr->l4_proto == IPPROTO_FRAGMENT) { 724 pkt->next_header = ipv6_usr_hdr->l4_proto; 725 pmask->next_header = ipv6_usr_mask->l4_proto; 726 req->features |= BIT_ULL(NPC_IPFRAG_IPV6); 727 } 728 pkt->etype = cpu_to_be16(ETH_P_IPV6); 729 pmask->etype = cpu_to_be16(0xFFFF); 730 req->features |= BIT_ULL(NPC_ETYPE); 731 break; 732 case TCP_V6_FLOW: 733 case UDP_V6_FLOW: 734 case SCTP_V6_FLOW: 735 pkt->etype = cpu_to_be16(ETH_P_IPV6); 736 pmask->etype = cpu_to_be16(0xFFFF); 737 req->features |= BIT_ULL(NPC_ETYPE); 738 if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) { 739 memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src, 740 sizeof(pkt->ip6src)); 741 memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src, 742 sizeof(pmask->ip6src)); 743 req->features |= BIT_ULL(NPC_SIP_IPV6); 744 } 745 if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) { 746 memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst, 747 sizeof(pkt->ip6dst)); 748 memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst, 749 sizeof(pmask->ip6dst)); 750 req->features |= BIT_ULL(NPC_DIP_IPV6); 751 } 752 if (ipv6_l4_mask->psrc) { 753 memcpy(&pkt->sport, &ipv6_l4_hdr->psrc, 754 sizeof(pkt->sport)); 755 memcpy(&pmask->sport, &ipv6_l4_mask->psrc, 756 sizeof(pmask->sport)); 757 if (flow_type == UDP_V6_FLOW) 758 req->features |= BIT_ULL(NPC_SPORT_UDP); 759 else if (flow_type == TCP_V6_FLOW) 760 req->features |= BIT_ULL(NPC_SPORT_TCP); 761 else 762 req->features |= BIT_ULL(NPC_SPORT_SCTP); 763 } 764 if (ipv6_l4_mask->pdst) { 765 memcpy(&pkt->dport, &ipv6_l4_hdr->pdst, 766 sizeof(pkt->dport)); 767 memcpy(&pmask->dport, &ipv6_l4_mask->pdst, 768 sizeof(pmask->dport)); 769 if (flow_type == UDP_V6_FLOW) 770 req->features |= BIT_ULL(NPC_DPORT_UDP); 771 else if (flow_type == TCP_V6_FLOW) 772 req->features |= BIT_ULL(NPC_DPORT_TCP); 773 else 774 req->features |= BIT_ULL(NPC_DPORT_SCTP); 775 } 776 if (flow_type == UDP_V6_FLOW) 777 req->features |= BIT_ULL(NPC_IPPROTO_UDP); 778 else if (flow_type == TCP_V6_FLOW) 779 req->features |= BIT_ULL(NPC_IPPROTO_TCP); 780 else 781 req->features |= BIT_ULL(NPC_IPPROTO_SCTP); 782 break; 783 case AH_V6_FLOW: 784 case ESP_V6_FLOW: 785 pkt->etype = cpu_to_be16(ETH_P_IPV6); 786 pmask->etype = cpu_to_be16(0xFFFF); 787 req->features |= BIT_ULL(NPC_ETYPE); 788 if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) { 789 memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src, 790 sizeof(pkt->ip6src)); 791 memcpy(&pmask->ip6src, &ah_esp_mask->ip6src, 792 sizeof(pmask->ip6src)); 793 req->features |= BIT_ULL(NPC_SIP_IPV6); 794 } 795 if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) { 796 memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst, 797 sizeof(pkt->ip6dst)); 798 memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst, 799 sizeof(pmask->ip6dst)); 800 req->features |= BIT_ULL(NPC_DIP_IPV6); 801 } 802 803 /* NPC profile doesn't extract AH/ESP header fields */ 804 if ((ah_esp_mask->spi & ah_esp_hdr->spi) || 805 (ah_esp_mask->tclass & ah_esp_hdr->tclass)) 806 return -EOPNOTSUPP; 807 808 if (flow_type == AH_V6_FLOW) 809 req->features |= BIT_ULL(NPC_IPPROTO_AH); 810 else 811 req->features |= BIT_ULL(NPC_IPPROTO_ESP); 812 break; 813 default: 814 break; 815 } 816 817 return 0; 818 } 819 820 static int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp, 821 struct npc_install_flow_req *req) 822 { 823 struct ethhdr *eth_mask = &fsp->m_u.ether_spec; 824 struct ethhdr *eth_hdr = &fsp->h_u.ether_spec; 825 struct flow_msg *pmask = &req->mask; 826 struct flow_msg *pkt = &req->packet; 827 u32 flow_type; 828 int ret; 829 830 flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); 831 switch (flow_type) { 832 /* bits not set in mask are don't care */ 833 case ETHER_FLOW: 834 if (!is_zero_ether_addr(eth_mask->h_source)) { 835 ether_addr_copy(pkt->smac, eth_hdr->h_source); 836 ether_addr_copy(pmask->smac, eth_mask->h_source); 837 req->features |= BIT_ULL(NPC_SMAC); 838 } 839 if (!is_zero_ether_addr(eth_mask->h_dest)) { 840 ether_addr_copy(pkt->dmac, eth_hdr->h_dest); 841 ether_addr_copy(pmask->dmac, eth_mask->h_dest); 842 req->features |= BIT_ULL(NPC_DMAC); 843 } 844 if (eth_hdr->h_proto) { 845 memcpy(&pkt->etype, ð_hdr->h_proto, 846 sizeof(pkt->etype)); 847 memcpy(&pmask->etype, ð_mask->h_proto, 848 sizeof(pmask->etype)); 849 req->features |= BIT_ULL(NPC_ETYPE); 850 } 851 break; 852 case IP_USER_FLOW: 853 case TCP_V4_FLOW: 854 case UDP_V4_FLOW: 855 case SCTP_V4_FLOW: 856 case AH_V4_FLOW: 857 case ESP_V4_FLOW: 858 ret = otx2_prepare_ipv4_flow(fsp, req, flow_type); 859 if (ret) 860 return ret; 861 break; 862 case IPV6_USER_FLOW: 863 case TCP_V6_FLOW: 864 case UDP_V6_FLOW: 865 case SCTP_V6_FLOW: 866 case AH_V6_FLOW: 867 case ESP_V6_FLOW: 868 ret = otx2_prepare_ipv6_flow(fsp, req, flow_type); 869 if (ret) 870 return ret; 871 break; 872 default: 873 return -EOPNOTSUPP; 874 } 875 if (fsp->flow_type & FLOW_EXT) { 876 u16 vlan_etype; 877 878 if (fsp->m_ext.vlan_etype) { 879 /* Partial masks not supported */ 880 if (be16_to_cpu(fsp->m_ext.vlan_etype) != 0xFFFF) 881 return -EINVAL; 882 883 vlan_etype = be16_to_cpu(fsp->h_ext.vlan_etype); 884 885 /* Drop rule with vlan_etype == 802.1Q 886 * and vlan_id == 0 is not supported 887 */ 888 if (vlan_etype == ETH_P_8021Q && !fsp->m_ext.vlan_tci && 889 fsp->ring_cookie == RX_CLS_FLOW_DISC) 890 return -EINVAL; 891 892 /* Only ETH_P_8021Q and ETH_P_802AD types supported */ 893 if (vlan_etype != ETH_P_8021Q && 894 vlan_etype != ETH_P_8021AD) 895 return -EINVAL; 896 897 memcpy(&pkt->vlan_etype, &fsp->h_ext.vlan_etype, 898 sizeof(pkt->vlan_etype)); 899 memcpy(&pmask->vlan_etype, &fsp->m_ext.vlan_etype, 900 sizeof(pmask->vlan_etype)); 901 902 if (vlan_etype == ETH_P_8021Q) 903 req->features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG); 904 else 905 req->features |= BIT_ULL(NPC_VLAN_ETYPE_STAG); 906 } 907 908 if (fsp->m_ext.vlan_tci) { 909 memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci, 910 sizeof(pkt->vlan_tci)); 911 memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci, 912 sizeof(pmask->vlan_tci)); 913 req->features |= BIT_ULL(NPC_OUTER_VID); 914 } 915 916 if (fsp->m_ext.data[1]) { 917 if (flow_type == IP_USER_FLOW) { 918 if (be32_to_cpu(fsp->h_ext.data[1]) != IPV4_FLAG_MORE) 919 return -EINVAL; 920 921 pkt->ip_flag = be32_to_cpu(fsp->h_ext.data[1]); 922 pmask->ip_flag = be32_to_cpu(fsp->m_ext.data[1]); 923 req->features |= BIT_ULL(NPC_IPFRAG_IPV4); 924 } else if (fsp->h_ext.data[1] == 925 cpu_to_be32(OTX2_DEFAULT_ACTION)) { 926 /* Not Drop/Direct to queue but use action 927 * in default entry 928 */ 929 req->op = NIX_RX_ACTION_DEFAULT; 930 } 931 } 932 } 933 934 if (fsp->flow_type & FLOW_MAC_EXT && 935 !is_zero_ether_addr(fsp->m_ext.h_dest)) { 936 ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest); 937 ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest); 938 req->features |= BIT_ULL(NPC_DMAC); 939 } 940 941 if (!req->features) 942 return -EOPNOTSUPP; 943 944 return 0; 945 } 946 947 static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf, 948 struct ethtool_rx_flow_spec *fsp) 949 { 950 struct ethhdr *eth_mask = &fsp->m_u.ether_spec; 951 struct ethhdr *eth_hdr = &fsp->h_u.ether_spec; 952 u64 ring_cookie = fsp->ring_cookie; 953 u32 flow_type; 954 955 if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)) 956 return false; 957 958 flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); 959 960 /* CGX/RPM block dmac filtering configured for white listing 961 * check for action other than DROP 962 */ 963 if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC && 964 !ethtool_get_flow_spec_ring_vf(ring_cookie)) { 965 if (is_zero_ether_addr(eth_mask->h_dest) && 966 is_valid_ether_addr(eth_hdr->h_dest)) 967 return true; 968 } 969 970 return false; 971 } 972 973 static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow) 974 { 975 u64 ring_cookie = flow->flow_spec.ring_cookie; 976 #ifdef CONFIG_DCB 977 int vlan_prio, qidx, pfc_rule = 0; 978 #endif 979 struct npc_install_flow_req *req; 980 int err, vf = 0; 981 982 mutex_lock(&pfvf->mbox.lock); 983 req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox); 984 if (!req) { 985 mutex_unlock(&pfvf->mbox.lock); 986 return -ENOMEM; 987 } 988 989 err = otx2_prepare_flow_request(&flow->flow_spec, req); 990 if (err) { 991 /* free the allocated msg above */ 992 otx2_mbox_reset(&pfvf->mbox.mbox, 0); 993 mutex_unlock(&pfvf->mbox.lock); 994 return err; 995 } 996 997 req->entry = flow->entry; 998 req->intf = NIX_INTF_RX; 999 req->set_cntr = 1; 1000 req->channel = pfvf->hw.rx_chan_base; 1001 if (ring_cookie == RX_CLS_FLOW_DISC) { 1002 req->op = NIX_RX_ACTIONOP_DROP; 1003 } else { 1004 /* change to unicast only if action of default entry is not 1005 * requested by user 1006 */ 1007 if (flow->flow_spec.flow_type & FLOW_RSS) { 1008 req->op = NIX_RX_ACTIONOP_RSS; 1009 req->index = flow->rss_ctx_id; 1010 req->flow_key_alg = pfvf->hw.flowkey_alg_idx; 1011 } else { 1012 req->op = NIX_RX_ACTIONOP_UCAST; 1013 req->index = ethtool_get_flow_spec_ring(ring_cookie); 1014 } 1015 vf = ethtool_get_flow_spec_ring_vf(ring_cookie); 1016 if (vf > pci_num_vf(pfvf->pdev)) { 1017 mutex_unlock(&pfvf->mbox.lock); 1018 return -EINVAL; 1019 } 1020 1021 #ifdef CONFIG_DCB 1022 /* Identify PFC rule if PFC enabled and ntuple rule is vlan */ 1023 if (!vf && (req->features & BIT_ULL(NPC_OUTER_VID)) && 1024 pfvf->pfc_en && req->op != NIX_RX_ACTIONOP_RSS) { 1025 vlan_prio = ntohs(req->packet.vlan_tci) & 1026 ntohs(req->mask.vlan_tci); 1027 1028 /* Get the priority */ 1029 vlan_prio >>= 13; 1030 flow->rule_type |= PFC_FLOWCTRL_RULE; 1031 /* Check if PFC enabled for this priority */ 1032 if (pfvf->pfc_en & BIT(vlan_prio)) { 1033 pfc_rule = true; 1034 qidx = req->index; 1035 } 1036 } 1037 #endif 1038 } 1039 1040 /* ethtool ring_cookie has (VF + 1) for VF */ 1041 if (vf) { 1042 req->vf = vf; 1043 flow->is_vf = true; 1044 flow->vf = vf; 1045 } 1046 1047 /* Send message to AF */ 1048 err = otx2_sync_mbox_msg(&pfvf->mbox); 1049 1050 #ifdef CONFIG_DCB 1051 if (!err && pfc_rule) 1052 otx2_update_bpid_in_rqctx(pfvf, vlan_prio, qidx, true); 1053 #endif 1054 1055 mutex_unlock(&pfvf->mbox.lock); 1056 return err; 1057 } 1058 1059 static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf, 1060 struct otx2_flow *flow) 1061 { 1062 struct otx2_flow *pf_mac; 1063 struct ethhdr *eth_hdr; 1064 1065 pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL); 1066 if (!pf_mac) 1067 return -ENOMEM; 1068 1069 pf_mac->entry = 0; 1070 pf_mac->rule_type |= DMAC_FILTER_RULE; 1071 pf_mac->location = pfvf->flow_cfg->max_flows; 1072 memcpy(&pf_mac->flow_spec, &flow->flow_spec, 1073 sizeof(struct ethtool_rx_flow_spec)); 1074 pf_mac->flow_spec.location = pf_mac->location; 1075 1076 /* Copy PF mac address */ 1077 eth_hdr = &pf_mac->flow_spec.h_u.ether_spec; 1078 ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr); 1079 1080 /* Install DMAC filter with PF mac address */ 1081 otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0); 1082 1083 otx2_add_flow_to_list(pfvf, pf_mac); 1084 pfvf->flow_cfg->nr_flows++; 1085 set_bit(0, pfvf->flow_cfg->dmacflt_bmap); 1086 1087 return 0; 1088 } 1089 1090 int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) 1091 { 1092 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; 1093 struct ethtool_rx_flow_spec *fsp = &nfc->fs; 1094 struct otx2_flow *flow; 1095 struct ethhdr *eth_hdr; 1096 bool new = false; 1097 int err = 0; 1098 u64 vf_num; 1099 u32 ring; 1100 1101 if (!flow_cfg->max_flows) { 1102 netdev_err(pfvf->netdev, 1103 "Ntuple rule count is 0, allocate and retry\n"); 1104 return -EINVAL; 1105 } 1106 1107 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); 1108 if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT)) 1109 return -ENOMEM; 1110 1111 /* Number of queues on a VF can be greater or less than 1112 * the PF's queue. Hence no need to check for the 1113 * queue count. Hence no need to check queue count if PF 1114 * is installing for its VF. Below is the expected vf_num value 1115 * based on the ethtool commands. 1116 * 1117 * e.g. 1118 * 1. ethtool -U <netdev> ... action -1 ==> vf_num:255 1119 * 2. ethtool -U <netdev> ... action <queue_num> ==> vf_num:0 1120 * 3. ethtool -U <netdev> ... vf <vf_idx> queue <queue_num> ==> 1121 * vf_num:vf_idx+1 1122 */ 1123 vf_num = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); 1124 if (!is_otx2_vf(pfvf->pcifunc) && !vf_num && 1125 ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC) 1126 return -EINVAL; 1127 1128 if (fsp->location >= otx2_get_maxflows(flow_cfg)) 1129 return -EINVAL; 1130 1131 flow = otx2_find_flow(pfvf, fsp->location); 1132 if (!flow) { 1133 flow = kzalloc(sizeof(*flow), GFP_KERNEL); 1134 if (!flow) 1135 return -ENOMEM; 1136 flow->location = fsp->location; 1137 flow->entry = flow_cfg->flow_ent[flow->location]; 1138 new = true; 1139 } 1140 /* struct copy */ 1141 flow->flow_spec = *fsp; 1142 1143 if (fsp->flow_type & FLOW_RSS) 1144 flow->rss_ctx_id = nfc->rss_context; 1145 1146 if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) { 1147 eth_hdr = &flow->flow_spec.h_u.ether_spec; 1148 1149 /* Sync dmac filter table with updated fields */ 1150 if (flow->rule_type & DMAC_FILTER_RULE) 1151 return otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 1152 flow->entry); 1153 1154 if (bitmap_full(flow_cfg->dmacflt_bmap, 1155 flow_cfg->dmacflt_max_flows)) { 1156 netdev_warn(pfvf->netdev, 1157 "Can't insert the rule %d as max allowed dmac filters are %d\n", 1158 flow->location + 1159 flow_cfg->dmacflt_max_flows, 1160 flow_cfg->dmacflt_max_flows); 1161 err = -EINVAL; 1162 if (new) 1163 kfree(flow); 1164 return err; 1165 } 1166 1167 /* Install PF mac address to DMAC filter list */ 1168 if (!test_bit(0, flow_cfg->dmacflt_bmap)) 1169 otx2_add_flow_with_pfmac(pfvf, flow); 1170 1171 flow->rule_type |= DMAC_FILTER_RULE; 1172 flow->entry = find_first_zero_bit(flow_cfg->dmacflt_bmap, 1173 flow_cfg->dmacflt_max_flows); 1174 fsp->location = flow_cfg->max_flows + flow->entry; 1175 flow->flow_spec.location = fsp->location; 1176 flow->location = fsp->location; 1177 1178 set_bit(flow->entry, flow_cfg->dmacflt_bmap); 1179 otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry); 1180 1181 } else { 1182 if (flow->location >= pfvf->flow_cfg->max_flows) { 1183 netdev_warn(pfvf->netdev, 1184 "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n", 1185 flow->location, 1186 flow_cfg->max_flows - 1); 1187 err = -EINVAL; 1188 } else { 1189 err = otx2_add_flow_msg(pfvf, flow); 1190 } 1191 } 1192 1193 if (err) { 1194 if (err == MBOX_MSG_INVALID) 1195 err = -EINVAL; 1196 if (new) 1197 kfree(flow); 1198 return err; 1199 } 1200 1201 /* add the new flow installed to list */ 1202 if (new) { 1203 otx2_add_flow_to_list(pfvf, flow); 1204 flow_cfg->nr_flows++; 1205 } 1206 1207 if (flow->is_vf) 1208 netdev_info(pfvf->netdev, 1209 "Make sure that VF's queue number is within its queue limit\n"); 1210 return 0; 1211 } 1212 1213 static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all) 1214 { 1215 struct npc_delete_flow_req *req; 1216 int err; 1217 1218 mutex_lock(&pfvf->mbox.lock); 1219 req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox); 1220 if (!req) { 1221 mutex_unlock(&pfvf->mbox.lock); 1222 return -ENOMEM; 1223 } 1224 1225 req->entry = entry; 1226 if (all) 1227 req->all = 1; 1228 1229 /* Send message to AF */ 1230 err = otx2_sync_mbox_msg(&pfvf->mbox); 1231 mutex_unlock(&pfvf->mbox.lock); 1232 return err; 1233 } 1234 1235 static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req) 1236 { 1237 struct otx2_flow *iter; 1238 struct ethhdr *eth_hdr; 1239 bool found = false; 1240 1241 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) { 1242 if ((iter->rule_type & DMAC_FILTER_RULE) && iter->entry == 0) { 1243 eth_hdr = &iter->flow_spec.h_u.ether_spec; 1244 if (req == DMAC_ADDR_DEL) { 1245 otx2_dmacflt_remove(pfvf, eth_hdr->h_dest, 1246 0); 1247 clear_bit(0, pfvf->flow_cfg->dmacflt_bmap); 1248 found = true; 1249 } else { 1250 ether_addr_copy(eth_hdr->h_dest, 1251 pfvf->netdev->dev_addr); 1252 1253 otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0); 1254 } 1255 break; 1256 } 1257 } 1258 1259 if (found) { 1260 list_del(&iter->list); 1261 kfree(iter); 1262 pfvf->flow_cfg->nr_flows--; 1263 } 1264 } 1265 1266 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location) 1267 { 1268 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; 1269 struct otx2_flow *flow; 1270 int err; 1271 1272 if (location >= otx2_get_maxflows(flow_cfg)) 1273 return -EINVAL; 1274 1275 flow = otx2_find_flow(pfvf, location); 1276 if (!flow) 1277 return -ENOENT; 1278 1279 if (flow->rule_type & DMAC_FILTER_RULE) { 1280 struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec; 1281 1282 /* user not allowed to remove dmac filter with interface mac */ 1283 if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest)) 1284 return -EPERM; 1285 1286 err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest, 1287 flow->entry); 1288 clear_bit(flow->entry, flow_cfg->dmacflt_bmap); 1289 /* If all dmac filters are removed delete macfilter with 1290 * interface mac address and configure CGX/RPM block in 1291 * promiscuous mode 1292 */ 1293 if (bitmap_weight(flow_cfg->dmacflt_bmap, 1294 flow_cfg->dmacflt_max_flows) == 1) 1295 otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL); 1296 } else { 1297 #ifdef CONFIG_DCB 1298 if (flow->rule_type & PFC_FLOWCTRL_RULE) 1299 otx2_update_bpid_in_rqctx(pfvf, 0, 1300 flow->flow_spec.ring_cookie, 1301 false); 1302 #endif 1303 1304 err = otx2_remove_flow_msg(pfvf, flow->entry, false); 1305 } 1306 1307 if (err) 1308 return err; 1309 1310 list_del(&flow->list); 1311 kfree(flow); 1312 flow_cfg->nr_flows--; 1313 1314 return 0; 1315 } 1316 1317 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id) 1318 { 1319 struct otx2_flow *flow, *tmp; 1320 int err; 1321 1322 list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) { 1323 if (flow->rss_ctx_id != ctx_id) 1324 continue; 1325 err = otx2_remove_flow(pfvf, flow->location); 1326 if (err) 1327 netdev_warn(pfvf->netdev, 1328 "Can't delete the rule %d associated with this rss group err:%d", 1329 flow->location, err); 1330 } 1331 } 1332 1333 int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf) 1334 { 1335 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; 1336 struct npc_delete_flow_req *req; 1337 struct otx2_flow *iter, *tmp; 1338 int err; 1339 1340 if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT)) 1341 return 0; 1342 1343 if (!flow_cfg->max_flows) 1344 return 0; 1345 1346 mutex_lock(&pfvf->mbox.lock); 1347 req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox); 1348 if (!req) { 1349 mutex_unlock(&pfvf->mbox.lock); 1350 return -ENOMEM; 1351 } 1352 1353 req->start = flow_cfg->flow_ent[0]; 1354 req->end = flow_cfg->flow_ent[flow_cfg->max_flows - 1]; 1355 err = otx2_sync_mbox_msg(&pfvf->mbox); 1356 mutex_unlock(&pfvf->mbox.lock); 1357 1358 list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) { 1359 list_del(&iter->list); 1360 kfree(iter); 1361 flow_cfg->nr_flows--; 1362 } 1363 return err; 1364 } 1365 1366 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf) 1367 { 1368 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; 1369 struct npc_mcam_free_entry_req *req; 1370 struct otx2_flow *iter, *tmp; 1371 int err; 1372 1373 if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) 1374 return 0; 1375 1376 /* remove all flows */ 1377 err = otx2_remove_flow_msg(pfvf, 0, true); 1378 if (err) 1379 return err; 1380 1381 list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) { 1382 list_del(&iter->list); 1383 kfree(iter); 1384 flow_cfg->nr_flows--; 1385 } 1386 1387 mutex_lock(&pfvf->mbox.lock); 1388 req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox); 1389 if (!req) { 1390 mutex_unlock(&pfvf->mbox.lock); 1391 return -ENOMEM; 1392 } 1393 1394 req->all = 1; 1395 /* Send message to AF to free MCAM entries */ 1396 err = otx2_sync_mbox_msg(&pfvf->mbox); 1397 if (err) { 1398 mutex_unlock(&pfvf->mbox.lock); 1399 return err; 1400 } 1401 1402 pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC; 1403 flow_cfg->max_flows = 0; 1404 mutex_unlock(&pfvf->mbox.lock); 1405 1406 return 0; 1407 } 1408 1409 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf) 1410 { 1411 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; 1412 struct npc_install_flow_req *req; 1413 int err; 1414 1415 mutex_lock(&pfvf->mbox.lock); 1416 req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox); 1417 if (!req) { 1418 mutex_unlock(&pfvf->mbox.lock); 1419 return -ENOMEM; 1420 } 1421 1422 req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset]; 1423 req->intf = NIX_INTF_RX; 1424 ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr); 1425 eth_broadcast_addr((u8 *)&req->mask.dmac); 1426 req->channel = pfvf->hw.rx_chan_base; 1427 req->op = NIX_RX_ACTION_DEFAULT; 1428 req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC); 1429 req->vtag0_valid = true; 1430 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0; 1431 1432 /* Send message to AF */ 1433 err = otx2_sync_mbox_msg(&pfvf->mbox); 1434 mutex_unlock(&pfvf->mbox.lock); 1435 return err; 1436 } 1437 1438 static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf) 1439 { 1440 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; 1441 struct npc_delete_flow_req *req; 1442 int err; 1443 1444 mutex_lock(&pfvf->mbox.lock); 1445 req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox); 1446 if (!req) { 1447 mutex_unlock(&pfvf->mbox.lock); 1448 return -ENOMEM; 1449 } 1450 1451 req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset]; 1452 /* Send message to AF */ 1453 err = otx2_sync_mbox_msg(&pfvf->mbox); 1454 mutex_unlock(&pfvf->mbox.lock); 1455 return err; 1456 } 1457 1458 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable) 1459 { 1460 struct nix_vtag_config *req; 1461 struct mbox_msghdr *rsp_hdr; 1462 int err; 1463 1464 /* Dont have enough mcam entries */ 1465 if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)) 1466 return -ENOMEM; 1467 1468 if (enable) { 1469 err = otx2_install_rxvlan_offload_flow(pf); 1470 if (err) 1471 return err; 1472 } else { 1473 err = otx2_delete_rxvlan_offload_flow(pf); 1474 if (err) 1475 return err; 1476 } 1477 1478 mutex_lock(&pf->mbox.lock); 1479 req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox); 1480 if (!req) { 1481 mutex_unlock(&pf->mbox.lock); 1482 return -ENOMEM; 1483 } 1484 1485 /* config strip, capture and size */ 1486 req->vtag_size = VTAGSIZE_T4; 1487 req->cfg_type = 1; /* rx vlan cfg */ 1488 req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0; 1489 req->rx.strip_vtag = enable; 1490 req->rx.capture_vtag = enable; 1491 1492 err = otx2_sync_mbox_msg(&pf->mbox); 1493 if (err) { 1494 mutex_unlock(&pf->mbox.lock); 1495 return err; 1496 } 1497 1498 rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr); 1499 if (IS_ERR(rsp_hdr)) { 1500 mutex_unlock(&pf->mbox.lock); 1501 return PTR_ERR(rsp_hdr); 1502 } 1503 1504 mutex_unlock(&pf->mbox.lock); 1505 return rsp_hdr->rc; 1506 } 1507 1508 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf) 1509 { 1510 struct otx2_flow *iter; 1511 struct ethhdr *eth_hdr; 1512 1513 list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) { 1514 if (iter->rule_type & DMAC_FILTER_RULE) { 1515 eth_hdr = &iter->flow_spec.h_u.ether_spec; 1516 otx2_dmacflt_add(pf, eth_hdr->h_dest, 1517 iter->entry); 1518 } 1519 } 1520 } 1521 1522 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf) 1523 { 1524 otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE); 1525 } 1526