1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2021 Marvell. 5 * 6 */ 7 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/inetdevice.h> 11 #include <linux/rhashtable.h> 12 #include <linux/bitfield.h> 13 #include <net/flow_dissector.h> 14 #include <net/pkt_cls.h> 15 #include <net/tc_act/tc_gact.h> 16 #include <net/tc_act/tc_mirred.h> 17 #include <net/tc_act/tc_vlan.h> 18 #include <net/ipv6.h> 19 20 #include "cn10k.h" 21 #include "otx2_common.h" 22 #include "qos.h" 23 24 #define CN10K_MAX_BURST_MANTISSA 0x7FFFULL 25 #define CN10K_MAX_BURST_SIZE 8453888ULL 26 27 #define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29) 28 #define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44) 29 30 #define OTX2_UNSUPP_LSE_DEPTH GENMASK(6, 4) 31 32 struct otx2_tc_flow_stats { 33 u64 bytes; 34 u64 pkts; 35 u64 used; 36 }; 37 38 struct otx2_tc_flow { 39 struct list_head list; 40 unsigned long cookie; 41 struct rcu_head rcu; 42 struct otx2_tc_flow_stats stats; 43 spinlock_t lock; /* lock for stats */ 44 u16 rq; 45 u16 entry; 46 u16 leaf_profile; 47 bool is_act_police; 48 u32 prio; 49 struct npc_install_flow_req req; 50 }; 51 52 static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst, 53 u32 *burst_exp, u32 *burst_mantissa) 54 { 55 int max_burst, max_mantissa; 56 unsigned int tmp; 57 58 if (is_dev_otx2(nic->pdev)) { 59 max_burst = MAX_BURST_SIZE; 60 max_mantissa = MAX_BURST_MANTISSA; 61 } else { 62 max_burst = CN10K_MAX_BURST_SIZE; 63 max_mantissa = CN10K_MAX_BURST_MANTISSA; 64 } 65 66 /* Burst is calculated as 67 * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256 68 * Max supported burst size is 130,816 bytes. 69 */ 70 burst = min_t(u32, burst, max_burst); 71 if (burst) { 72 *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0; 73 tmp = burst - rounddown_pow_of_two(burst); 74 if (burst < max_mantissa) 75 *burst_mantissa = tmp * 2; 76 else 77 *burst_mantissa = tmp / (1ULL << (*burst_exp - 7)); 78 } else { 79 *burst_exp = MAX_BURST_EXPONENT; 80 *burst_mantissa = max_mantissa; 81 } 82 } 83 84 static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp, 85 u32 *mantissa, u32 *div_exp) 86 { 87 u64 tmp; 88 89 /* Rate calculation by hardware 90 * 91 * PIR_ADD = ((256 + mantissa) << exp) / 256 92 * rate = (2 * PIR_ADD) / ( 1 << div_exp) 93 * The resultant rate is in Mbps. 94 */ 95 96 /* 2Mbps to 100Gbps can be expressed with div_exp = 0. 97 * Setting this to '0' will ease the calculation of 98 * exponent and mantissa. 99 */ 100 *div_exp = 0; 101 102 if (maxrate) { 103 *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0; 104 tmp = maxrate - rounddown_pow_of_two(maxrate); 105 if (maxrate < MAX_RATE_MANTISSA) 106 *mantissa = tmp * 2; 107 else 108 *mantissa = tmp / (1ULL << (*exp - 7)); 109 } else { 110 /* Instead of disabling rate limiting, set all values to max */ 111 *exp = MAX_RATE_EXPONENT; 112 *mantissa = MAX_RATE_MANTISSA; 113 } 114 } 115 116 u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic, 117 u64 maxrate, u32 burst) 118 { 119 u32 burst_exp, burst_mantissa; 120 u32 exp, mantissa, div_exp; 121 u64 regval = 0; 122 123 /* Get exponent and mantissa values from the desired rate */ 124 otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa); 125 otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp); 126 127 if (is_dev_otx2(nic->pdev)) { 128 regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) | 129 FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) | 130 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 131 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 132 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 133 } else { 134 regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) | 135 FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) | 136 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 137 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 138 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 139 } 140 141 return regval; 142 } 143 144 static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, 145 u32 burst, u64 maxrate) 146 { 147 struct otx2_hw *hw = &nic->hw; 148 struct nix_txschq_config *req; 149 int txschq, err; 150 151 /* All SQs share the same TL4, so pick the first scheduler */ 152 txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0]; 153 154 mutex_lock(&nic->mbox.lock); 155 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox); 156 if (!req) { 157 mutex_unlock(&nic->mbox.lock); 158 return -ENOMEM; 159 } 160 161 req->lvl = NIX_TXSCH_LVL_TL4; 162 req->num_regs = 1; 163 req->reg[0] = NIX_AF_TL4X_PIR(txschq); 164 req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst); 165 166 err = otx2_sync_mbox_msg(&nic->mbox); 167 mutex_unlock(&nic->mbox.lock); 168 return err; 169 } 170 171 static int otx2_tc_validate_flow(struct otx2_nic *nic, 172 struct flow_action *actions, 173 struct netlink_ext_ack *extack) 174 { 175 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 176 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 177 return -EINVAL; 178 } 179 180 if (!flow_action_has_entries(actions)) { 181 NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action"); 182 return -EINVAL; 183 } 184 185 if (!flow_offload_has_one_action(actions)) { 186 NL_SET_ERR_MSG_MOD(extack, 187 "Egress MATCHALL offload supports only 1 policing action"); 188 return -EINVAL; 189 } 190 return 0; 191 } 192 193 static int otx2_policer_validate(const struct flow_action *action, 194 const struct flow_action_entry *act, 195 struct netlink_ext_ack *extack) 196 { 197 if (act->police.exceed.act_id != FLOW_ACTION_DROP) { 198 NL_SET_ERR_MSG_MOD(extack, 199 "Offload not supported when exceed action is not drop"); 200 return -EOPNOTSUPP; 201 } 202 203 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && 204 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { 205 NL_SET_ERR_MSG_MOD(extack, 206 "Offload not supported when conform action is not pipe or ok"); 207 return -EOPNOTSUPP; 208 } 209 210 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && 211 !flow_action_is_last_entry(action, act)) { 212 NL_SET_ERR_MSG_MOD(extack, 213 "Offload not supported when conform action is ok, but action is not last"); 214 return -EOPNOTSUPP; 215 } 216 217 if (act->police.peakrate_bytes_ps || 218 act->police.avrate || act->police.overhead) { 219 NL_SET_ERR_MSG_MOD(extack, 220 "Offload not supported when peakrate/avrate/overhead is configured"); 221 return -EOPNOTSUPP; 222 } 223 224 return 0; 225 } 226 227 static int otx2_tc_egress_matchall_install(struct otx2_nic *nic, 228 struct tc_cls_matchall_offload *cls) 229 { 230 struct netlink_ext_ack *extack = cls->common.extack; 231 struct flow_action *actions = &cls->rule->action; 232 struct flow_action_entry *entry; 233 int err; 234 235 err = otx2_tc_validate_flow(nic, actions, extack); 236 if (err) 237 return err; 238 239 if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) { 240 NL_SET_ERR_MSG_MOD(extack, 241 "Only one Egress MATCHALL ratelimiter can be offloaded"); 242 return -ENOMEM; 243 } 244 245 entry = &cls->rule->action.entries[0]; 246 switch (entry->id) { 247 case FLOW_ACTION_POLICE: 248 err = otx2_policer_validate(&cls->rule->action, entry, extack); 249 if (err) 250 return err; 251 252 if (entry->police.rate_pkt_ps) { 253 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); 254 return -EOPNOTSUPP; 255 } 256 err = otx2_set_matchall_egress_rate(nic, entry->police.burst, 257 otx2_convert_rate(entry->police.rate_bytes_ps)); 258 if (err) 259 return err; 260 nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 261 break; 262 default: 263 NL_SET_ERR_MSG_MOD(extack, 264 "Only police action is supported with Egress MATCHALL offload"); 265 return -EOPNOTSUPP; 266 } 267 268 return 0; 269 } 270 271 static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic, 272 struct tc_cls_matchall_offload *cls) 273 { 274 struct netlink_ext_ack *extack = cls->common.extack; 275 int err; 276 277 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 278 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 279 return -EINVAL; 280 } 281 282 err = otx2_set_matchall_egress_rate(nic, 0, 0); 283 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 284 return err; 285 } 286 287 static int otx2_tc_act_set_police(struct otx2_nic *nic, 288 struct otx2_tc_flow *node, 289 struct flow_cls_offload *f, 290 u64 rate, u32 burst, u32 mark, 291 struct npc_install_flow_req *req, bool pps) 292 { 293 struct netlink_ext_ack *extack = f->common.extack; 294 struct otx2_hw *hw = &nic->hw; 295 int rq_idx, rc; 296 297 rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues); 298 if (rq_idx >= hw->rx_queues) { 299 NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded"); 300 return -EINVAL; 301 } 302 303 mutex_lock(&nic->mbox.lock); 304 305 rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile); 306 if (rc) { 307 mutex_unlock(&nic->mbox.lock); 308 return rc; 309 } 310 311 rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps); 312 if (rc) 313 goto free_leaf; 314 315 rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true); 316 if (rc) 317 goto free_leaf; 318 319 mutex_unlock(&nic->mbox.lock); 320 321 req->match_id = mark & 0xFFFFULL; 322 req->index = rq_idx; 323 req->op = NIX_RX_ACTIONOP_UCAST; 324 set_bit(rq_idx, &nic->rq_bmap); 325 node->is_act_police = true; 326 node->rq = rq_idx; 327 328 return 0; 329 330 free_leaf: 331 if (cn10k_free_leaf_profile(nic, node->leaf_profile)) 332 netdev_err(nic->netdev, 333 "Unable to free leaf bandwidth profile(%d)\n", 334 node->leaf_profile); 335 mutex_unlock(&nic->mbox.lock); 336 return rc; 337 } 338 339 static int otx2_tc_parse_actions(struct otx2_nic *nic, 340 struct flow_action *flow_action, 341 struct npc_install_flow_req *req, 342 struct flow_cls_offload *f, 343 struct otx2_tc_flow *node) 344 { 345 struct netlink_ext_ack *extack = f->common.extack; 346 struct flow_action_entry *act; 347 struct net_device *target; 348 struct otx2_nic *priv; 349 u32 burst, mark = 0; 350 u8 nr_police = 0; 351 bool pps = false; 352 u64 rate; 353 int err; 354 int i; 355 356 if (!flow_action_has_entries(flow_action)) { 357 NL_SET_ERR_MSG_MOD(extack, "no tc actions specified"); 358 return -EINVAL; 359 } 360 361 flow_action_for_each(i, act, flow_action) { 362 switch (act->id) { 363 case FLOW_ACTION_DROP: 364 req->op = NIX_RX_ACTIONOP_DROP; 365 return 0; 366 case FLOW_ACTION_ACCEPT: 367 req->op = NIX_RX_ACTION_DEFAULT; 368 return 0; 369 case FLOW_ACTION_REDIRECT_INGRESS: 370 target = act->dev; 371 priv = netdev_priv(target); 372 /* npc_install_flow_req doesn't support passing a target pcifunc */ 373 if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { 374 NL_SET_ERR_MSG_MOD(extack, 375 "can't redirect to other pf/vf"); 376 return -EOPNOTSUPP; 377 } 378 req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK; 379 380 /* if op is already set; avoid overwriting the same */ 381 if (!req->op) 382 req->op = NIX_RX_ACTION_DEFAULT; 383 break; 384 385 case FLOW_ACTION_VLAN_POP: 386 req->vtag0_valid = true; 387 /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */ 388 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7; 389 break; 390 case FLOW_ACTION_POLICE: 391 /* Ingress ratelimiting is not supported on OcteonTx2 */ 392 if (is_dev_otx2(nic->pdev)) { 393 NL_SET_ERR_MSG_MOD(extack, 394 "Ingress policing not supported on this platform"); 395 return -EOPNOTSUPP; 396 } 397 398 err = otx2_policer_validate(flow_action, act, extack); 399 if (err) 400 return err; 401 402 if (act->police.rate_bytes_ps > 0) { 403 rate = act->police.rate_bytes_ps * 8; 404 burst = act->police.burst; 405 } else if (act->police.rate_pkt_ps > 0) { 406 /* The algorithm used to calculate rate 407 * mantissa, exponent values for a given token 408 * rate (token can be byte or packet) requires 409 * token rate to be mutiplied by 8. 410 */ 411 rate = act->police.rate_pkt_ps * 8; 412 burst = act->police.burst_pkt; 413 pps = true; 414 } 415 nr_police++; 416 break; 417 case FLOW_ACTION_MARK: 418 mark = act->mark; 419 break; 420 421 case FLOW_ACTION_RX_QUEUE_MAPPING: 422 req->op = NIX_RX_ACTIONOP_UCAST; 423 req->index = act->rx_queue; 424 break; 425 426 default: 427 return -EOPNOTSUPP; 428 } 429 } 430 431 if (nr_police > 1) { 432 NL_SET_ERR_MSG_MOD(extack, 433 "rate limit police offload requires a single action"); 434 return -EOPNOTSUPP; 435 } 436 437 if (nr_police) 438 return otx2_tc_act_set_police(nic, node, f, rate, burst, 439 mark, req, pps); 440 441 return 0; 442 } 443 444 static int otx2_tc_process_vlan(struct otx2_nic *nic, struct flow_msg *flow_spec, 445 struct flow_msg *flow_mask, struct flow_rule *rule, 446 struct npc_install_flow_req *req, bool is_inner) 447 { 448 struct flow_match_vlan match; 449 u16 vlan_tci, vlan_tci_mask; 450 451 if (is_inner) 452 flow_rule_match_cvlan(rule, &match); 453 else 454 flow_rule_match_vlan(rule, &match); 455 456 if (!eth_type_vlan(match.key->vlan_tpid)) { 457 netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n", 458 ntohs(match.key->vlan_tpid)); 459 return -EOPNOTSUPP; 460 } 461 462 if (!match.mask->vlan_id) { 463 struct flow_action_entry *act; 464 int i; 465 466 flow_action_for_each(i, act, &rule->action) { 467 if (act->id == FLOW_ACTION_DROP) { 468 netdev_err(nic->netdev, 469 "vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n", 470 ntohs(match.key->vlan_tpid), match.key->vlan_id); 471 return -EOPNOTSUPP; 472 } 473 } 474 } 475 476 if (match.mask->vlan_id || 477 match.mask->vlan_dei || 478 match.mask->vlan_priority) { 479 vlan_tci = match.key->vlan_id | 480 match.key->vlan_dei << 12 | 481 match.key->vlan_priority << 13; 482 483 vlan_tci_mask = match.mask->vlan_id | 484 match.mask->vlan_dei << 12 | 485 match.mask->vlan_priority << 13; 486 if (is_inner) { 487 flow_spec->vlan_itci = htons(vlan_tci); 488 flow_mask->vlan_itci = htons(vlan_tci_mask); 489 req->features |= BIT_ULL(NPC_INNER_VID); 490 } else { 491 flow_spec->vlan_tci = htons(vlan_tci); 492 flow_mask->vlan_tci = htons(vlan_tci_mask); 493 req->features |= BIT_ULL(NPC_OUTER_VID); 494 } 495 } 496 497 return 0; 498 } 499 500 static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, 501 struct flow_cls_offload *f, 502 struct npc_install_flow_req *req) 503 { 504 struct netlink_ext_ack *extack = f->common.extack; 505 struct flow_msg *flow_spec = &req->packet; 506 struct flow_msg *flow_mask = &req->mask; 507 struct flow_dissector *dissector; 508 struct flow_rule *rule; 509 u8 ip_proto = 0; 510 511 rule = flow_cls_offload_flow_rule(f); 512 dissector = rule->match.dissector; 513 514 if ((dissector->used_keys & 515 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | 516 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | 517 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 518 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | 519 BIT(FLOW_DISSECTOR_KEY_CVLAN) | 520 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 521 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 522 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | 523 BIT(FLOW_DISSECTOR_KEY_IPSEC) | 524 BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) | 525 BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) { 526 netdev_info(nic->netdev, "unsupported flow used key 0x%llx", 527 dissector->used_keys); 528 return -EOPNOTSUPP; 529 } 530 531 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 532 struct flow_match_basic match; 533 534 flow_rule_match_basic(rule, &match); 535 536 /* All EtherTypes can be matched, no hw limitation */ 537 flow_spec->etype = match.key->n_proto; 538 flow_mask->etype = match.mask->n_proto; 539 req->features |= BIT_ULL(NPC_ETYPE); 540 541 if (match.mask->ip_proto && 542 (match.key->ip_proto != IPPROTO_TCP && 543 match.key->ip_proto != IPPROTO_UDP && 544 match.key->ip_proto != IPPROTO_SCTP && 545 match.key->ip_proto != IPPROTO_ICMP && 546 match.key->ip_proto != IPPROTO_ESP && 547 match.key->ip_proto != IPPROTO_AH && 548 match.key->ip_proto != IPPROTO_ICMPV6)) { 549 netdev_info(nic->netdev, 550 "ip_proto=0x%x not supported\n", 551 match.key->ip_proto); 552 return -EOPNOTSUPP; 553 } 554 if (match.mask->ip_proto) 555 ip_proto = match.key->ip_proto; 556 557 if (ip_proto == IPPROTO_UDP) 558 req->features |= BIT_ULL(NPC_IPPROTO_UDP); 559 else if (ip_proto == IPPROTO_TCP) 560 req->features |= BIT_ULL(NPC_IPPROTO_TCP); 561 else if (ip_proto == IPPROTO_SCTP) 562 req->features |= BIT_ULL(NPC_IPPROTO_SCTP); 563 else if (ip_proto == IPPROTO_ICMP) 564 req->features |= BIT_ULL(NPC_IPPROTO_ICMP); 565 else if (ip_proto == IPPROTO_ICMPV6) 566 req->features |= BIT_ULL(NPC_IPPROTO_ICMP6); 567 else if (ip_proto == IPPROTO_ESP) 568 req->features |= BIT_ULL(NPC_IPPROTO_ESP); 569 else if (ip_proto == IPPROTO_AH) 570 req->features |= BIT_ULL(NPC_IPPROTO_AH); 571 } 572 573 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 574 struct flow_match_control match; 575 576 flow_rule_match_control(rule, &match); 577 if (match.mask->flags & FLOW_DIS_FIRST_FRAG) { 578 NL_SET_ERR_MSG_MOD(extack, "HW doesn't support frag first/later"); 579 return -EOPNOTSUPP; 580 } 581 582 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { 583 if (ntohs(flow_spec->etype) == ETH_P_IP) { 584 flow_spec->ip_flag = IPV4_FLAG_MORE; 585 flow_mask->ip_flag = IPV4_FLAG_MORE; 586 req->features |= BIT_ULL(NPC_IPFRAG_IPV4); 587 } else if (ntohs(flow_spec->etype) == ETH_P_IPV6) { 588 flow_spec->next_header = IPPROTO_FRAGMENT; 589 flow_mask->next_header = 0xff; 590 req->features |= BIT_ULL(NPC_IPFRAG_IPV6); 591 } else { 592 NL_SET_ERR_MSG_MOD(extack, "flow-type should be either IPv4 and IPv6"); 593 return -EOPNOTSUPP; 594 } 595 } 596 } 597 598 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 599 struct flow_match_eth_addrs match; 600 601 flow_rule_match_eth_addrs(rule, &match); 602 if (!is_zero_ether_addr(match.mask->src)) { 603 NL_SET_ERR_MSG_MOD(extack, "src mac match not supported"); 604 return -EOPNOTSUPP; 605 } 606 607 if (!is_zero_ether_addr(match.mask->dst)) { 608 ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst); 609 ether_addr_copy(flow_mask->dmac, 610 (u8 *)&match.mask->dst); 611 req->features |= BIT_ULL(NPC_DMAC); 612 } 613 } 614 615 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPSEC)) { 616 struct flow_match_ipsec match; 617 618 flow_rule_match_ipsec(rule, &match); 619 if (!match.mask->spi) { 620 NL_SET_ERR_MSG_MOD(extack, "spi index not specified"); 621 return -EOPNOTSUPP; 622 } 623 if (ip_proto != IPPROTO_ESP && 624 ip_proto != IPPROTO_AH) { 625 NL_SET_ERR_MSG_MOD(extack, 626 "SPI index is valid only for ESP/AH proto"); 627 return -EOPNOTSUPP; 628 } 629 630 flow_spec->spi = match.key->spi; 631 flow_mask->spi = match.mask->spi; 632 req->features |= BIT_ULL(NPC_IPSEC_SPI); 633 } 634 635 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 636 struct flow_match_ip match; 637 638 flow_rule_match_ip(rule, &match); 639 if ((ntohs(flow_spec->etype) != ETH_P_IP) && 640 match.mask->tos) { 641 NL_SET_ERR_MSG_MOD(extack, "tos not supported"); 642 return -EOPNOTSUPP; 643 } 644 if (match.mask->ttl) { 645 NL_SET_ERR_MSG_MOD(extack, "ttl not supported"); 646 return -EOPNOTSUPP; 647 } 648 flow_spec->tos = match.key->tos; 649 flow_mask->tos = match.mask->tos; 650 req->features |= BIT_ULL(NPC_TOS); 651 } 652 653 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 654 int ret; 655 656 ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, false); 657 if (ret) 658 return ret; 659 } 660 661 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { 662 int ret; 663 664 ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, true); 665 if (ret) 666 return ret; 667 } 668 669 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { 670 struct flow_match_ipv4_addrs match; 671 672 flow_rule_match_ipv4_addrs(rule, &match); 673 674 flow_spec->ip4dst = match.key->dst; 675 flow_mask->ip4dst = match.mask->dst; 676 req->features |= BIT_ULL(NPC_DIP_IPV4); 677 678 flow_spec->ip4src = match.key->src; 679 flow_mask->ip4src = match.mask->src; 680 req->features |= BIT_ULL(NPC_SIP_IPV4); 681 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { 682 struct flow_match_ipv6_addrs match; 683 684 flow_rule_match_ipv6_addrs(rule, &match); 685 686 if (ipv6_addr_loopback(&match.key->dst) || 687 ipv6_addr_loopback(&match.key->src)) { 688 NL_SET_ERR_MSG_MOD(extack, 689 "Flow matching IPv6 loopback addr not supported"); 690 return -EOPNOTSUPP; 691 } 692 693 if (!ipv6_addr_any(&match.mask->dst)) { 694 memcpy(&flow_spec->ip6dst, 695 (struct in6_addr *)&match.key->dst, 696 sizeof(flow_spec->ip6dst)); 697 memcpy(&flow_mask->ip6dst, 698 (struct in6_addr *)&match.mask->dst, 699 sizeof(flow_spec->ip6dst)); 700 req->features |= BIT_ULL(NPC_DIP_IPV6); 701 } 702 703 if (!ipv6_addr_any(&match.mask->src)) { 704 memcpy(&flow_spec->ip6src, 705 (struct in6_addr *)&match.key->src, 706 sizeof(flow_spec->ip6src)); 707 memcpy(&flow_mask->ip6src, 708 (struct in6_addr *)&match.mask->src, 709 sizeof(flow_spec->ip6src)); 710 req->features |= BIT_ULL(NPC_SIP_IPV6); 711 } 712 } 713 714 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 715 struct flow_match_ports match; 716 717 flow_rule_match_ports(rule, &match); 718 719 flow_spec->dport = match.key->dst; 720 flow_mask->dport = match.mask->dst; 721 722 if (flow_mask->dport) { 723 if (ip_proto == IPPROTO_UDP) 724 req->features |= BIT_ULL(NPC_DPORT_UDP); 725 else if (ip_proto == IPPROTO_TCP) 726 req->features |= BIT_ULL(NPC_DPORT_TCP); 727 else if (ip_proto == IPPROTO_SCTP) 728 req->features |= BIT_ULL(NPC_DPORT_SCTP); 729 } 730 731 flow_spec->sport = match.key->src; 732 flow_mask->sport = match.mask->src; 733 734 if (flow_mask->sport) { 735 if (ip_proto == IPPROTO_UDP) 736 req->features |= BIT_ULL(NPC_SPORT_UDP); 737 else if (ip_proto == IPPROTO_TCP) 738 req->features |= BIT_ULL(NPC_SPORT_TCP); 739 else if (ip_proto == IPPROTO_SCTP) 740 req->features |= BIT_ULL(NPC_SPORT_SCTP); 741 } 742 } 743 744 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) { 745 struct flow_match_mpls match; 746 u8 bit; 747 748 flow_rule_match_mpls(rule, &match); 749 750 if (match.mask->used_lses & OTX2_UNSUPP_LSE_DEPTH) { 751 NL_SET_ERR_MSG_MOD(extack, 752 "unsupported LSE depth for MPLS match offload"); 753 return -EOPNOTSUPP; 754 } 755 756 for_each_set_bit(bit, (unsigned long *)&match.mask->used_lses, 757 FLOW_DIS_MPLS_MAX) { 758 /* check if any of the fields LABEL,TC,BOS are set */ 759 if (*((u32 *)&match.mask->ls[bit]) & 760 OTX2_FLOWER_MASK_MPLS_NON_TTL) { 761 /* Hardware will capture 4 byte MPLS header into 762 * two fields NPC_MPLSX_LBTCBOS and NPC_MPLSX_TTL. 763 * Derive the associated NPC key based on header 764 * index and offset. 765 */ 766 767 req->features |= BIT_ULL(NPC_MPLS1_LBTCBOS + 768 2 * bit); 769 flow_spec->mpls_lse[bit] = 770 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB, 771 match.key->ls[bit].mpls_label) | 772 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC, 773 match.key->ls[bit].mpls_tc) | 774 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS, 775 match.key->ls[bit].mpls_bos); 776 777 flow_mask->mpls_lse[bit] = 778 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB, 779 match.mask->ls[bit].mpls_label) | 780 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC, 781 match.mask->ls[bit].mpls_tc) | 782 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS, 783 match.mask->ls[bit].mpls_bos); 784 } 785 786 if (match.mask->ls[bit].mpls_ttl) { 787 req->features |= BIT_ULL(NPC_MPLS1_TTL + 788 2 * bit); 789 flow_spec->mpls_lse[bit] |= 790 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL, 791 match.key->ls[bit].mpls_ttl); 792 flow_mask->mpls_lse[bit] |= 793 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL, 794 match.mask->ls[bit].mpls_ttl); 795 } 796 } 797 } 798 799 return otx2_tc_parse_actions(nic, &rule->action, req, f, node); 800 } 801 802 static void otx2_destroy_tc_flow_list(struct otx2_nic *pfvf) 803 { 804 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; 805 struct otx2_tc_flow *iter, *tmp; 806 807 if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) 808 return; 809 810 list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list_tc, list) { 811 list_del(&iter->list); 812 kfree(iter); 813 flow_cfg->nr_flows--; 814 } 815 } 816 817 static struct otx2_tc_flow *otx2_tc_get_entry_by_cookie(struct otx2_flow_config *flow_cfg, 818 unsigned long cookie) 819 { 820 struct otx2_tc_flow *tmp; 821 822 list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) { 823 if (tmp->cookie == cookie) 824 return tmp; 825 } 826 827 return NULL; 828 } 829 830 static struct otx2_tc_flow *otx2_tc_get_entry_by_index(struct otx2_flow_config *flow_cfg, 831 int index) 832 { 833 struct otx2_tc_flow *tmp; 834 int i = 0; 835 836 list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) { 837 if (i == index) 838 return tmp; 839 i++; 840 } 841 842 return NULL; 843 } 844 845 static void otx2_tc_del_from_flow_list(struct otx2_flow_config *flow_cfg, 846 struct otx2_tc_flow *node) 847 { 848 struct list_head *pos, *n; 849 struct otx2_tc_flow *tmp; 850 851 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 852 tmp = list_entry(pos, struct otx2_tc_flow, list); 853 if (node == tmp) { 854 list_del(&node->list); 855 return; 856 } 857 } 858 } 859 860 static int otx2_tc_add_to_flow_list(struct otx2_flow_config *flow_cfg, 861 struct otx2_tc_flow *node) 862 { 863 struct list_head *pos, *n; 864 struct otx2_tc_flow *tmp; 865 int index = 0; 866 867 /* If the flow list is empty then add the new node */ 868 if (list_empty(&flow_cfg->flow_list_tc)) { 869 list_add(&node->list, &flow_cfg->flow_list_tc); 870 return index; 871 } 872 873 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 874 tmp = list_entry(pos, struct otx2_tc_flow, list); 875 if (node->prio < tmp->prio) 876 break; 877 index++; 878 } 879 880 list_add(&node->list, pos->prev); 881 return index; 882 } 883 884 static int otx2_add_mcam_flow_entry(struct otx2_nic *nic, struct npc_install_flow_req *req) 885 { 886 struct npc_install_flow_req *tmp_req; 887 int err; 888 889 mutex_lock(&nic->mbox.lock); 890 tmp_req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 891 if (!tmp_req) { 892 mutex_unlock(&nic->mbox.lock); 893 return -ENOMEM; 894 } 895 896 memcpy(tmp_req, req, sizeof(struct npc_install_flow_req)); 897 /* Send message to AF */ 898 err = otx2_sync_mbox_msg(&nic->mbox); 899 if (err) { 900 netdev_err(nic->netdev, "Failed to install MCAM flow entry %d\n", 901 req->entry); 902 mutex_unlock(&nic->mbox.lock); 903 return -EFAULT; 904 } 905 906 mutex_unlock(&nic->mbox.lock); 907 return 0; 908 } 909 910 static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry, u16 *cntr_val) 911 { 912 struct npc_delete_flow_rsp *rsp; 913 struct npc_delete_flow_req *req; 914 int err; 915 916 mutex_lock(&nic->mbox.lock); 917 req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox); 918 if (!req) { 919 mutex_unlock(&nic->mbox.lock); 920 return -ENOMEM; 921 } 922 923 req->entry = entry; 924 925 /* Send message to AF */ 926 err = otx2_sync_mbox_msg(&nic->mbox); 927 if (err) { 928 netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n", 929 entry); 930 mutex_unlock(&nic->mbox.lock); 931 return -EFAULT; 932 } 933 934 if (cntr_val) { 935 rsp = (struct npc_delete_flow_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox, 936 0, &req->hdr); 937 if (IS_ERR(rsp)) { 938 netdev_err(nic->netdev, "Failed to get MCAM delete response for entry %d\n", 939 entry); 940 mutex_unlock(&nic->mbox.lock); 941 return -EFAULT; 942 } 943 944 *cntr_val = rsp->cntr_val; 945 } 946 947 mutex_unlock(&nic->mbox.lock); 948 return 0; 949 } 950 951 static int otx2_tc_update_mcam_table_del_req(struct otx2_nic *nic, 952 struct otx2_flow_config *flow_cfg, 953 struct otx2_tc_flow *node) 954 { 955 struct list_head *pos, *n; 956 struct otx2_tc_flow *tmp; 957 int i = 0, index = 0; 958 u16 cntr_val = 0; 959 960 /* Find and delete the entry from the list and re-install 961 * all the entries from beginning to the index of the 962 * deleted entry to higher mcam indexes. 963 */ 964 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 965 tmp = list_entry(pos, struct otx2_tc_flow, list); 966 if (node == tmp) { 967 list_del(&tmp->list); 968 break; 969 } 970 971 otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val); 972 tmp->entry++; 973 tmp->req.entry = tmp->entry; 974 tmp->req.cntr_val = cntr_val; 975 index++; 976 } 977 978 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 979 if (i == index) 980 break; 981 982 tmp = list_entry(pos, struct otx2_tc_flow, list); 983 otx2_add_mcam_flow_entry(nic, &tmp->req); 984 i++; 985 } 986 987 return 0; 988 } 989 990 static int otx2_tc_update_mcam_table_add_req(struct otx2_nic *nic, 991 struct otx2_flow_config *flow_cfg, 992 struct otx2_tc_flow *node) 993 { 994 int mcam_idx = flow_cfg->max_flows - flow_cfg->nr_flows - 1; 995 struct otx2_tc_flow *tmp; 996 int list_idx, i; 997 u16 cntr_val = 0; 998 999 /* Find the index of the entry(list_idx) whose priority 1000 * is greater than the new entry and re-install all 1001 * the entries from beginning to list_idx to higher 1002 * mcam indexes. 1003 */ 1004 list_idx = otx2_tc_add_to_flow_list(flow_cfg, node); 1005 for (i = 0; i < list_idx; i++) { 1006 tmp = otx2_tc_get_entry_by_index(flow_cfg, i); 1007 if (!tmp) 1008 return -ENOMEM; 1009 1010 otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val); 1011 tmp->entry = flow_cfg->flow_ent[mcam_idx]; 1012 tmp->req.entry = tmp->entry; 1013 tmp->req.cntr_val = cntr_val; 1014 otx2_add_mcam_flow_entry(nic, &tmp->req); 1015 mcam_idx++; 1016 } 1017 1018 return mcam_idx; 1019 } 1020 1021 static int otx2_tc_update_mcam_table(struct otx2_nic *nic, 1022 struct otx2_flow_config *flow_cfg, 1023 struct otx2_tc_flow *node, 1024 bool add_req) 1025 { 1026 if (add_req) 1027 return otx2_tc_update_mcam_table_add_req(nic, flow_cfg, node); 1028 1029 return otx2_tc_update_mcam_table_del_req(nic, flow_cfg, node); 1030 } 1031 1032 static int otx2_tc_del_flow(struct otx2_nic *nic, 1033 struct flow_cls_offload *tc_flow_cmd) 1034 { 1035 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 1036 struct otx2_tc_flow *flow_node; 1037 int err; 1038 1039 flow_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); 1040 if (!flow_node) { 1041 netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n", 1042 tc_flow_cmd->cookie); 1043 return -EINVAL; 1044 } 1045 1046 if (flow_node->is_act_police) { 1047 mutex_lock(&nic->mbox.lock); 1048 1049 err = cn10k_map_unmap_rq_policer(nic, flow_node->rq, 1050 flow_node->leaf_profile, false); 1051 if (err) 1052 netdev_err(nic->netdev, 1053 "Unmapping RQ %d & profile %d failed\n", 1054 flow_node->rq, flow_node->leaf_profile); 1055 1056 err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile); 1057 if (err) 1058 netdev_err(nic->netdev, 1059 "Unable to free leaf bandwidth profile(%d)\n", 1060 flow_node->leaf_profile); 1061 1062 __clear_bit(flow_node->rq, &nic->rq_bmap); 1063 1064 mutex_unlock(&nic->mbox.lock); 1065 } 1066 1067 otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL); 1068 otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false); 1069 kfree_rcu(flow_node, rcu); 1070 flow_cfg->nr_flows--; 1071 return 0; 1072 } 1073 1074 static int otx2_tc_add_flow(struct otx2_nic *nic, 1075 struct flow_cls_offload *tc_flow_cmd) 1076 { 1077 struct netlink_ext_ack *extack = tc_flow_cmd->common.extack; 1078 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 1079 struct otx2_tc_flow *new_node, *old_node; 1080 struct npc_install_flow_req *req, dummy; 1081 int rc, err, mcam_idx; 1082 1083 if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) 1084 return -ENOMEM; 1085 1086 if (flow_cfg->nr_flows == flow_cfg->max_flows) { 1087 NL_SET_ERR_MSG_MOD(extack, 1088 "Free MCAM entry not available to add the flow"); 1089 return -ENOMEM; 1090 } 1091 1092 /* allocate memory for the new flow and it's node */ 1093 new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); 1094 if (!new_node) 1095 return -ENOMEM; 1096 spin_lock_init(&new_node->lock); 1097 new_node->cookie = tc_flow_cmd->cookie; 1098 new_node->prio = tc_flow_cmd->common.prio; 1099 1100 memset(&dummy, 0, sizeof(struct npc_install_flow_req)); 1101 1102 rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy); 1103 if (rc) { 1104 kfree_rcu(new_node, rcu); 1105 return rc; 1106 } 1107 1108 /* If a flow exists with the same cookie, delete it */ 1109 old_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); 1110 if (old_node) 1111 otx2_tc_del_flow(nic, tc_flow_cmd); 1112 1113 mcam_idx = otx2_tc_update_mcam_table(nic, flow_cfg, new_node, true); 1114 mutex_lock(&nic->mbox.lock); 1115 req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 1116 if (!req) { 1117 mutex_unlock(&nic->mbox.lock); 1118 rc = -ENOMEM; 1119 goto free_leaf; 1120 } 1121 1122 memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr)); 1123 memcpy(req, &dummy, sizeof(struct npc_install_flow_req)); 1124 req->channel = nic->hw.rx_chan_base; 1125 req->entry = flow_cfg->flow_ent[mcam_idx]; 1126 req->intf = NIX_INTF_RX; 1127 req->set_cntr = 1; 1128 new_node->entry = req->entry; 1129 1130 /* Send message to AF */ 1131 rc = otx2_sync_mbox_msg(&nic->mbox); 1132 if (rc) { 1133 NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry"); 1134 mutex_unlock(&nic->mbox.lock); 1135 goto free_leaf; 1136 } 1137 1138 mutex_unlock(&nic->mbox.lock); 1139 memcpy(&new_node->req, req, sizeof(struct npc_install_flow_req)); 1140 1141 flow_cfg->nr_flows++; 1142 return 0; 1143 1144 free_leaf: 1145 otx2_tc_del_from_flow_list(flow_cfg, new_node); 1146 kfree_rcu(new_node, rcu); 1147 if (new_node->is_act_police) { 1148 mutex_lock(&nic->mbox.lock); 1149 1150 err = cn10k_map_unmap_rq_policer(nic, new_node->rq, 1151 new_node->leaf_profile, false); 1152 if (err) 1153 netdev_err(nic->netdev, 1154 "Unmapping RQ %d & profile %d failed\n", 1155 new_node->rq, new_node->leaf_profile); 1156 err = cn10k_free_leaf_profile(nic, new_node->leaf_profile); 1157 if (err) 1158 netdev_err(nic->netdev, 1159 "Unable to free leaf bandwidth profile(%d)\n", 1160 new_node->leaf_profile); 1161 1162 __clear_bit(new_node->rq, &nic->rq_bmap); 1163 1164 mutex_unlock(&nic->mbox.lock); 1165 } 1166 1167 return rc; 1168 } 1169 1170 static int otx2_tc_get_flow_stats(struct otx2_nic *nic, 1171 struct flow_cls_offload *tc_flow_cmd) 1172 { 1173 struct npc_mcam_get_stats_req *req; 1174 struct npc_mcam_get_stats_rsp *rsp; 1175 struct otx2_tc_flow_stats *stats; 1176 struct otx2_tc_flow *flow_node; 1177 int err; 1178 1179 flow_node = otx2_tc_get_entry_by_cookie(nic->flow_cfg, tc_flow_cmd->cookie); 1180 if (!flow_node) { 1181 netdev_info(nic->netdev, "tc flow not found for cookie %lx", 1182 tc_flow_cmd->cookie); 1183 return -EINVAL; 1184 } 1185 1186 mutex_lock(&nic->mbox.lock); 1187 1188 req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox); 1189 if (!req) { 1190 mutex_unlock(&nic->mbox.lock); 1191 return -ENOMEM; 1192 } 1193 1194 req->entry = flow_node->entry; 1195 1196 err = otx2_sync_mbox_msg(&nic->mbox); 1197 if (err) { 1198 netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n", 1199 req->entry); 1200 mutex_unlock(&nic->mbox.lock); 1201 return -EFAULT; 1202 } 1203 1204 rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp 1205 (&nic->mbox.mbox, 0, &req->hdr); 1206 if (IS_ERR(rsp)) { 1207 mutex_unlock(&nic->mbox.lock); 1208 return PTR_ERR(rsp); 1209 } 1210 1211 mutex_unlock(&nic->mbox.lock); 1212 1213 if (!rsp->stat_ena) 1214 return -EINVAL; 1215 1216 stats = &flow_node->stats; 1217 1218 spin_lock(&flow_node->lock); 1219 flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0, 1220 FLOW_ACTION_HW_STATS_IMMEDIATE); 1221 stats->pkts = rsp->stat; 1222 spin_unlock(&flow_node->lock); 1223 1224 return 0; 1225 } 1226 1227 static int otx2_setup_tc_cls_flower(struct otx2_nic *nic, 1228 struct flow_cls_offload *cls_flower) 1229 { 1230 switch (cls_flower->command) { 1231 case FLOW_CLS_REPLACE: 1232 return otx2_tc_add_flow(nic, cls_flower); 1233 case FLOW_CLS_DESTROY: 1234 return otx2_tc_del_flow(nic, cls_flower); 1235 case FLOW_CLS_STATS: 1236 return otx2_tc_get_flow_stats(nic, cls_flower); 1237 default: 1238 return -EOPNOTSUPP; 1239 } 1240 } 1241 1242 static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic, 1243 struct tc_cls_matchall_offload *cls) 1244 { 1245 struct netlink_ext_ack *extack = cls->common.extack; 1246 struct flow_action *actions = &cls->rule->action; 1247 struct flow_action_entry *entry; 1248 u64 rate; 1249 int err; 1250 1251 err = otx2_tc_validate_flow(nic, actions, extack); 1252 if (err) 1253 return err; 1254 1255 if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) { 1256 NL_SET_ERR_MSG_MOD(extack, 1257 "Only one ingress MATCHALL ratelimitter can be offloaded"); 1258 return -ENOMEM; 1259 } 1260 1261 entry = &cls->rule->action.entries[0]; 1262 switch (entry->id) { 1263 case FLOW_ACTION_POLICE: 1264 /* Ingress ratelimiting is not supported on OcteonTx2 */ 1265 if (is_dev_otx2(nic->pdev)) { 1266 NL_SET_ERR_MSG_MOD(extack, 1267 "Ingress policing not supported on this platform"); 1268 return -EOPNOTSUPP; 1269 } 1270 1271 err = cn10k_alloc_matchall_ipolicer(nic); 1272 if (err) 1273 return err; 1274 1275 /* Convert to bits per second */ 1276 rate = entry->police.rate_bytes_ps * 8; 1277 err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate); 1278 if (err) 1279 return err; 1280 nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 1281 break; 1282 default: 1283 NL_SET_ERR_MSG_MOD(extack, 1284 "Only police action supported with Ingress MATCHALL offload"); 1285 return -EOPNOTSUPP; 1286 } 1287 1288 return 0; 1289 } 1290 1291 static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic, 1292 struct tc_cls_matchall_offload *cls) 1293 { 1294 struct netlink_ext_ack *extack = cls->common.extack; 1295 int err; 1296 1297 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 1298 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 1299 return -EINVAL; 1300 } 1301 1302 err = cn10k_free_matchall_ipolicer(nic); 1303 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 1304 return err; 1305 } 1306 1307 static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic, 1308 struct tc_cls_matchall_offload *cls_matchall) 1309 { 1310 switch (cls_matchall->command) { 1311 case TC_CLSMATCHALL_REPLACE: 1312 return otx2_tc_ingress_matchall_install(nic, cls_matchall); 1313 case TC_CLSMATCHALL_DESTROY: 1314 return otx2_tc_ingress_matchall_delete(nic, cls_matchall); 1315 case TC_CLSMATCHALL_STATS: 1316 default: 1317 break; 1318 } 1319 1320 return -EOPNOTSUPP; 1321 } 1322 1323 static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type, 1324 void *type_data, void *cb_priv) 1325 { 1326 struct otx2_nic *nic = cb_priv; 1327 bool ntuple; 1328 1329 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1330 return -EOPNOTSUPP; 1331 1332 ntuple = nic->netdev->features & NETIF_F_NTUPLE; 1333 switch (type) { 1334 case TC_SETUP_CLSFLOWER: 1335 if (ntuple) { 1336 netdev_warn(nic->netdev, 1337 "Can't install TC flower offload rule when NTUPLE is active"); 1338 return -EOPNOTSUPP; 1339 } 1340 1341 return otx2_setup_tc_cls_flower(nic, type_data); 1342 case TC_SETUP_CLSMATCHALL: 1343 return otx2_setup_tc_ingress_matchall(nic, type_data); 1344 default: 1345 break; 1346 } 1347 1348 return -EOPNOTSUPP; 1349 } 1350 1351 static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic, 1352 struct tc_cls_matchall_offload *cls_matchall) 1353 { 1354 switch (cls_matchall->command) { 1355 case TC_CLSMATCHALL_REPLACE: 1356 return otx2_tc_egress_matchall_install(nic, cls_matchall); 1357 case TC_CLSMATCHALL_DESTROY: 1358 return otx2_tc_egress_matchall_delete(nic, cls_matchall); 1359 case TC_CLSMATCHALL_STATS: 1360 default: 1361 break; 1362 } 1363 1364 return -EOPNOTSUPP; 1365 } 1366 1367 static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type, 1368 void *type_data, void *cb_priv) 1369 { 1370 struct otx2_nic *nic = cb_priv; 1371 1372 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1373 return -EOPNOTSUPP; 1374 1375 switch (type) { 1376 case TC_SETUP_CLSMATCHALL: 1377 return otx2_setup_tc_egress_matchall(nic, type_data); 1378 default: 1379 break; 1380 } 1381 1382 return -EOPNOTSUPP; 1383 } 1384 1385 static LIST_HEAD(otx2_block_cb_list); 1386 1387 static int otx2_setup_tc_block(struct net_device *netdev, 1388 struct flow_block_offload *f) 1389 { 1390 struct otx2_nic *nic = netdev_priv(netdev); 1391 flow_setup_cb_t *cb; 1392 bool ingress; 1393 1394 if (f->block_shared) 1395 return -EOPNOTSUPP; 1396 1397 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1398 cb = otx2_setup_tc_block_ingress_cb; 1399 ingress = true; 1400 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1401 cb = otx2_setup_tc_block_egress_cb; 1402 ingress = false; 1403 } else { 1404 return -EOPNOTSUPP; 1405 } 1406 1407 return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb, 1408 nic, nic, ingress); 1409 } 1410 1411 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, 1412 void *type_data) 1413 { 1414 switch (type) { 1415 case TC_SETUP_BLOCK: 1416 return otx2_setup_tc_block(netdev, type_data); 1417 case TC_SETUP_QDISC_HTB: 1418 return otx2_setup_tc_htb(netdev, type_data); 1419 default: 1420 return -EOPNOTSUPP; 1421 } 1422 } 1423 EXPORT_SYMBOL(otx2_setup_tc); 1424 1425 int otx2_init_tc(struct otx2_nic *nic) 1426 { 1427 /* Exclude receive queue 0 being used for police action */ 1428 set_bit(0, &nic->rq_bmap); 1429 1430 if (!nic->flow_cfg) { 1431 netdev_err(nic->netdev, 1432 "Can't init TC, nic->flow_cfg is not setup\n"); 1433 return -EINVAL; 1434 } 1435 1436 return 0; 1437 } 1438 EXPORT_SYMBOL(otx2_init_tc); 1439 1440 void otx2_shutdown_tc(struct otx2_nic *nic) 1441 { 1442 otx2_destroy_tc_flow_list(nic); 1443 } 1444 EXPORT_SYMBOL(otx2_shutdown_tc); 1445