1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2021 Marvell. 5 * 6 */ 7 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/inetdevice.h> 11 #include <linux/rhashtable.h> 12 #include <linux/bitfield.h> 13 #include <net/flow_dissector.h> 14 #include <net/pkt_cls.h> 15 #include <net/tc_act/tc_gact.h> 16 #include <net/tc_act/tc_mirred.h> 17 #include <net/tc_act/tc_vlan.h> 18 #include <net/ipv6.h> 19 20 #include "cn10k.h" 21 #include "otx2_common.h" 22 23 /* Egress rate limiting definitions */ 24 #define MAX_BURST_EXPONENT 0x0FULL 25 #define MAX_BURST_MANTISSA 0xFFULL 26 #define MAX_BURST_SIZE 130816ULL 27 #define MAX_RATE_DIVIDER_EXPONENT 12ULL 28 #define MAX_RATE_EXPONENT 0x0FULL 29 #define MAX_RATE_MANTISSA 0xFFULL 30 31 #define CN10K_MAX_BURST_MANTISSA 0x7FFFULL 32 #define CN10K_MAX_BURST_SIZE 8453888ULL 33 34 /* Bitfields in NIX_TLX_PIR register */ 35 #define TLX_RATE_MANTISSA GENMASK_ULL(8, 1) 36 #define TLX_RATE_EXPONENT GENMASK_ULL(12, 9) 37 #define TLX_RATE_DIVIDER_EXPONENT GENMASK_ULL(16, 13) 38 #define TLX_BURST_MANTISSA GENMASK_ULL(36, 29) 39 #define TLX_BURST_EXPONENT GENMASK_ULL(40, 37) 40 41 #define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29) 42 #define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44) 43 44 struct otx2_tc_flow_stats { 45 u64 bytes; 46 u64 pkts; 47 u64 used; 48 }; 49 50 struct otx2_tc_flow { 51 struct rhash_head node; 52 unsigned long cookie; 53 unsigned int bitpos; 54 struct rcu_head rcu; 55 struct otx2_tc_flow_stats stats; 56 spinlock_t lock; /* lock for stats */ 57 u16 rq; 58 u16 entry; 59 u16 leaf_profile; 60 bool is_act_police; 61 }; 62 63 int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic) 64 { 65 struct otx2_tc_info *tc = &nic->tc_info; 66 67 if (!nic->flow_cfg->max_flows) 68 return 0; 69 70 /* Max flows changed, free the existing bitmap */ 71 kfree(tc->tc_entries_bitmap); 72 73 tc->tc_entries_bitmap = 74 kcalloc(BITS_TO_LONGS(nic->flow_cfg->max_flows), 75 sizeof(long), GFP_KERNEL); 76 if (!tc->tc_entries_bitmap) { 77 netdev_err(nic->netdev, 78 "Unable to alloc TC flow entries bitmap\n"); 79 return -ENOMEM; 80 } 81 82 return 0; 83 } 84 EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap); 85 86 static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst, 87 u32 *burst_exp, u32 *burst_mantissa) 88 { 89 int max_burst, max_mantissa; 90 unsigned int tmp; 91 92 if (is_dev_otx2(nic->pdev)) { 93 max_burst = MAX_BURST_SIZE; 94 max_mantissa = MAX_BURST_MANTISSA; 95 } else { 96 max_burst = CN10K_MAX_BURST_SIZE; 97 max_mantissa = CN10K_MAX_BURST_MANTISSA; 98 } 99 100 /* Burst is calculated as 101 * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256 102 * Max supported burst size is 130,816 bytes. 103 */ 104 burst = min_t(u32, burst, max_burst); 105 if (burst) { 106 *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0; 107 tmp = burst - rounddown_pow_of_two(burst); 108 if (burst < max_mantissa) 109 *burst_mantissa = tmp * 2; 110 else 111 *burst_mantissa = tmp / (1ULL << (*burst_exp - 7)); 112 } else { 113 *burst_exp = MAX_BURST_EXPONENT; 114 *burst_mantissa = max_mantissa; 115 } 116 } 117 118 static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp, 119 u32 *mantissa, u32 *div_exp) 120 { 121 u64 tmp; 122 123 /* Rate calculation by hardware 124 * 125 * PIR_ADD = ((256 + mantissa) << exp) / 256 126 * rate = (2 * PIR_ADD) / ( 1 << div_exp) 127 * The resultant rate is in Mbps. 128 */ 129 130 /* 2Mbps to 100Gbps can be expressed with div_exp = 0. 131 * Setting this to '0' will ease the calculation of 132 * exponent and mantissa. 133 */ 134 *div_exp = 0; 135 136 if (maxrate) { 137 *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0; 138 tmp = maxrate - rounddown_pow_of_two(maxrate); 139 if (maxrate < MAX_RATE_MANTISSA) 140 *mantissa = tmp * 2; 141 else 142 *mantissa = tmp / (1ULL << (*exp - 7)); 143 } else { 144 /* Instead of disabling rate limiting, set all values to max */ 145 *exp = MAX_RATE_EXPONENT; 146 *mantissa = MAX_RATE_MANTISSA; 147 } 148 } 149 150 static u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic, 151 u64 maxrate, u32 burst) 152 { 153 u32 burst_exp, burst_mantissa; 154 u32 exp, mantissa, div_exp; 155 u64 regval = 0; 156 157 /* Get exponent and mantissa values from the desired rate */ 158 otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa); 159 otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp); 160 161 if (is_dev_otx2(nic->pdev)) { 162 regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) | 163 FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) | 164 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 165 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 166 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 167 } else { 168 regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) | 169 FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) | 170 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 171 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 172 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 173 } 174 175 return regval; 176 } 177 178 static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, 179 u32 burst, u64 maxrate) 180 { 181 struct otx2_hw *hw = &nic->hw; 182 struct nix_txschq_config *req; 183 int txschq, err; 184 185 /* All SQs share the same TL4, so pick the first scheduler */ 186 txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0]; 187 188 mutex_lock(&nic->mbox.lock); 189 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox); 190 if (!req) { 191 mutex_unlock(&nic->mbox.lock); 192 return -ENOMEM; 193 } 194 195 req->lvl = NIX_TXSCH_LVL_TL4; 196 req->num_regs = 1; 197 req->reg[0] = NIX_AF_TL4X_PIR(txschq); 198 req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst); 199 200 err = otx2_sync_mbox_msg(&nic->mbox); 201 mutex_unlock(&nic->mbox.lock); 202 return err; 203 } 204 205 static int otx2_tc_validate_flow(struct otx2_nic *nic, 206 struct flow_action *actions, 207 struct netlink_ext_ack *extack) 208 { 209 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 210 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 211 return -EINVAL; 212 } 213 214 if (!flow_action_has_entries(actions)) { 215 NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action"); 216 return -EINVAL; 217 } 218 219 if (!flow_offload_has_one_action(actions)) { 220 NL_SET_ERR_MSG_MOD(extack, 221 "Egress MATCHALL offload supports only 1 policing action"); 222 return -EINVAL; 223 } 224 return 0; 225 } 226 227 static int otx2_policer_validate(const struct flow_action *action, 228 const struct flow_action_entry *act, 229 struct netlink_ext_ack *extack) 230 { 231 if (act->police.exceed.act_id != FLOW_ACTION_DROP) { 232 NL_SET_ERR_MSG_MOD(extack, 233 "Offload not supported when exceed action is not drop"); 234 return -EOPNOTSUPP; 235 } 236 237 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && 238 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { 239 NL_SET_ERR_MSG_MOD(extack, 240 "Offload not supported when conform action is not pipe or ok"); 241 return -EOPNOTSUPP; 242 } 243 244 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && 245 !flow_action_is_last_entry(action, act)) { 246 NL_SET_ERR_MSG_MOD(extack, 247 "Offload not supported when conform action is ok, but action is not last"); 248 return -EOPNOTSUPP; 249 } 250 251 if (act->police.peakrate_bytes_ps || 252 act->police.avrate || act->police.overhead) { 253 NL_SET_ERR_MSG_MOD(extack, 254 "Offload not supported when peakrate/avrate/overhead is configured"); 255 return -EOPNOTSUPP; 256 } 257 258 return 0; 259 } 260 261 static int otx2_tc_egress_matchall_install(struct otx2_nic *nic, 262 struct tc_cls_matchall_offload *cls) 263 { 264 struct netlink_ext_ack *extack = cls->common.extack; 265 struct flow_action *actions = &cls->rule->action; 266 struct flow_action_entry *entry; 267 u64 rate; 268 int err; 269 270 err = otx2_tc_validate_flow(nic, actions, extack); 271 if (err) 272 return err; 273 274 if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) { 275 NL_SET_ERR_MSG_MOD(extack, 276 "Only one Egress MATCHALL ratelimiter can be offloaded"); 277 return -ENOMEM; 278 } 279 280 entry = &cls->rule->action.entries[0]; 281 switch (entry->id) { 282 case FLOW_ACTION_POLICE: 283 err = otx2_policer_validate(&cls->rule->action, entry, extack); 284 if (err) 285 return err; 286 287 if (entry->police.rate_pkt_ps) { 288 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); 289 return -EOPNOTSUPP; 290 } 291 /* Convert bytes per second to Mbps */ 292 rate = entry->police.rate_bytes_ps * 8; 293 rate = max_t(u64, rate / 1000000, 1); 294 err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate); 295 if (err) 296 return err; 297 nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 298 break; 299 default: 300 NL_SET_ERR_MSG_MOD(extack, 301 "Only police action is supported with Egress MATCHALL offload"); 302 return -EOPNOTSUPP; 303 } 304 305 return 0; 306 } 307 308 static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic, 309 struct tc_cls_matchall_offload *cls) 310 { 311 struct netlink_ext_ack *extack = cls->common.extack; 312 int err; 313 314 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 315 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 316 return -EINVAL; 317 } 318 319 err = otx2_set_matchall_egress_rate(nic, 0, 0); 320 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 321 return err; 322 } 323 324 static int otx2_tc_act_set_police(struct otx2_nic *nic, 325 struct otx2_tc_flow *node, 326 struct flow_cls_offload *f, 327 u64 rate, u32 burst, u32 mark, 328 struct npc_install_flow_req *req, bool pps) 329 { 330 struct netlink_ext_ack *extack = f->common.extack; 331 struct otx2_hw *hw = &nic->hw; 332 int rq_idx, rc; 333 334 rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues); 335 if (rq_idx >= hw->rx_queues) { 336 NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded"); 337 return -EINVAL; 338 } 339 340 mutex_lock(&nic->mbox.lock); 341 342 rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile); 343 if (rc) { 344 mutex_unlock(&nic->mbox.lock); 345 return rc; 346 } 347 348 rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps); 349 if (rc) 350 goto free_leaf; 351 352 rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true); 353 if (rc) 354 goto free_leaf; 355 356 mutex_unlock(&nic->mbox.lock); 357 358 req->match_id = mark & 0xFFFFULL; 359 req->index = rq_idx; 360 req->op = NIX_RX_ACTIONOP_UCAST; 361 set_bit(rq_idx, &nic->rq_bmap); 362 node->is_act_police = true; 363 node->rq = rq_idx; 364 365 return 0; 366 367 free_leaf: 368 if (cn10k_free_leaf_profile(nic, node->leaf_profile)) 369 netdev_err(nic->netdev, 370 "Unable to free leaf bandwidth profile(%d)\n", 371 node->leaf_profile); 372 mutex_unlock(&nic->mbox.lock); 373 return rc; 374 } 375 376 static int otx2_tc_parse_actions(struct otx2_nic *nic, 377 struct flow_action *flow_action, 378 struct npc_install_flow_req *req, 379 struct flow_cls_offload *f, 380 struct otx2_tc_flow *node) 381 { 382 struct netlink_ext_ack *extack = f->common.extack; 383 struct flow_action_entry *act; 384 struct net_device *target; 385 struct otx2_nic *priv; 386 u32 burst, mark = 0; 387 u8 nr_police = 0; 388 bool pps = false; 389 u64 rate; 390 int err; 391 int i; 392 393 if (!flow_action_has_entries(flow_action)) { 394 NL_SET_ERR_MSG_MOD(extack, "no tc actions specified"); 395 return -EINVAL; 396 } 397 398 flow_action_for_each(i, act, flow_action) { 399 switch (act->id) { 400 case FLOW_ACTION_DROP: 401 req->op = NIX_RX_ACTIONOP_DROP; 402 return 0; 403 case FLOW_ACTION_ACCEPT: 404 req->op = NIX_RX_ACTION_DEFAULT; 405 return 0; 406 case FLOW_ACTION_REDIRECT_INGRESS: 407 target = act->dev; 408 priv = netdev_priv(target); 409 /* npc_install_flow_req doesn't support passing a target pcifunc */ 410 if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { 411 NL_SET_ERR_MSG_MOD(extack, 412 "can't redirect to other pf/vf"); 413 return -EOPNOTSUPP; 414 } 415 req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK; 416 req->op = NIX_RX_ACTION_DEFAULT; 417 return 0; 418 case FLOW_ACTION_VLAN_POP: 419 req->vtag0_valid = true; 420 /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */ 421 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7; 422 break; 423 case FLOW_ACTION_POLICE: 424 /* Ingress ratelimiting is not supported on OcteonTx2 */ 425 if (is_dev_otx2(nic->pdev)) { 426 NL_SET_ERR_MSG_MOD(extack, 427 "Ingress policing not supported on this platform"); 428 return -EOPNOTSUPP; 429 } 430 431 err = otx2_policer_validate(flow_action, act, extack); 432 if (err) 433 return err; 434 435 if (act->police.rate_bytes_ps > 0) { 436 rate = act->police.rate_bytes_ps * 8; 437 burst = act->police.burst; 438 } else if (act->police.rate_pkt_ps > 0) { 439 /* The algorithm used to calculate rate 440 * mantissa, exponent values for a given token 441 * rate (token can be byte or packet) requires 442 * token rate to be mutiplied by 8. 443 */ 444 rate = act->police.rate_pkt_ps * 8; 445 burst = act->police.burst_pkt; 446 pps = true; 447 } 448 nr_police++; 449 break; 450 case FLOW_ACTION_MARK: 451 mark = act->mark; 452 break; 453 default: 454 return -EOPNOTSUPP; 455 } 456 } 457 458 if (nr_police > 1) { 459 NL_SET_ERR_MSG_MOD(extack, 460 "rate limit police offload requires a single action"); 461 return -EOPNOTSUPP; 462 } 463 464 if (nr_police) 465 return otx2_tc_act_set_police(nic, node, f, rate, burst, 466 mark, req, pps); 467 468 return 0; 469 } 470 471 static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, 472 struct flow_cls_offload *f, 473 struct npc_install_flow_req *req) 474 { 475 struct netlink_ext_ack *extack = f->common.extack; 476 struct flow_msg *flow_spec = &req->packet; 477 struct flow_msg *flow_mask = &req->mask; 478 struct flow_dissector *dissector; 479 struct flow_rule *rule; 480 u8 ip_proto = 0; 481 482 rule = flow_cls_offload_flow_rule(f); 483 dissector = rule->match.dissector; 484 485 if ((dissector->used_keys & 486 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 487 BIT(FLOW_DISSECTOR_KEY_BASIC) | 488 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 489 BIT(FLOW_DISSECTOR_KEY_VLAN) | 490 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 491 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 492 BIT(FLOW_DISSECTOR_KEY_PORTS) | 493 BIT(FLOW_DISSECTOR_KEY_IP)))) { 494 netdev_info(nic->netdev, "unsupported flow used key 0x%x", 495 dissector->used_keys); 496 return -EOPNOTSUPP; 497 } 498 499 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 500 struct flow_match_basic match; 501 502 flow_rule_match_basic(rule, &match); 503 504 /* All EtherTypes can be matched, no hw limitation */ 505 flow_spec->etype = match.key->n_proto; 506 flow_mask->etype = match.mask->n_proto; 507 req->features |= BIT_ULL(NPC_ETYPE); 508 509 if (match.mask->ip_proto && 510 (match.key->ip_proto != IPPROTO_TCP && 511 match.key->ip_proto != IPPROTO_UDP && 512 match.key->ip_proto != IPPROTO_SCTP && 513 match.key->ip_proto != IPPROTO_ICMP && 514 match.key->ip_proto != IPPROTO_ICMPV6)) { 515 netdev_info(nic->netdev, 516 "ip_proto=0x%x not supported\n", 517 match.key->ip_proto); 518 return -EOPNOTSUPP; 519 } 520 if (match.mask->ip_proto) 521 ip_proto = match.key->ip_proto; 522 523 if (ip_proto == IPPROTO_UDP) 524 req->features |= BIT_ULL(NPC_IPPROTO_UDP); 525 else if (ip_proto == IPPROTO_TCP) 526 req->features |= BIT_ULL(NPC_IPPROTO_TCP); 527 else if (ip_proto == IPPROTO_SCTP) 528 req->features |= BIT_ULL(NPC_IPPROTO_SCTP); 529 else if (ip_proto == IPPROTO_ICMP) 530 req->features |= BIT_ULL(NPC_IPPROTO_ICMP); 531 else if (ip_proto == IPPROTO_ICMPV6) 532 req->features |= BIT_ULL(NPC_IPPROTO_ICMP6); 533 } 534 535 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 536 struct flow_match_control match; 537 538 flow_rule_match_control(rule, &match); 539 if (match.mask->flags & FLOW_DIS_FIRST_FRAG) { 540 NL_SET_ERR_MSG_MOD(extack, "HW doesn't support frag first/later"); 541 return -EOPNOTSUPP; 542 } 543 544 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { 545 if (ntohs(flow_spec->etype) == ETH_P_IP) { 546 flow_spec->ip_flag = IPV4_FLAG_MORE; 547 flow_mask->ip_flag = IPV4_FLAG_MORE; 548 req->features |= BIT_ULL(NPC_IPFRAG_IPV4); 549 } else if (ntohs(flow_spec->etype) == ETH_P_IPV6) { 550 flow_spec->next_header = IPPROTO_FRAGMENT; 551 flow_mask->next_header = 0xff; 552 req->features |= BIT_ULL(NPC_IPFRAG_IPV6); 553 } else { 554 NL_SET_ERR_MSG_MOD(extack, "flow-type should be either IPv4 and IPv6"); 555 return -EOPNOTSUPP; 556 } 557 } 558 } 559 560 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 561 struct flow_match_eth_addrs match; 562 563 flow_rule_match_eth_addrs(rule, &match); 564 if (!is_zero_ether_addr(match.mask->src)) { 565 NL_SET_ERR_MSG_MOD(extack, "src mac match not supported"); 566 return -EOPNOTSUPP; 567 } 568 569 if (!is_zero_ether_addr(match.mask->dst)) { 570 ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst); 571 ether_addr_copy(flow_mask->dmac, 572 (u8 *)&match.mask->dst); 573 req->features |= BIT_ULL(NPC_DMAC); 574 } 575 } 576 577 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 578 struct flow_match_ip match; 579 580 flow_rule_match_ip(rule, &match); 581 if ((ntohs(flow_spec->etype) != ETH_P_IP) && 582 match.mask->tos) { 583 NL_SET_ERR_MSG_MOD(extack, "tos not supported"); 584 return -EOPNOTSUPP; 585 } 586 if (match.mask->ttl) { 587 NL_SET_ERR_MSG_MOD(extack, "ttl not supported"); 588 return -EOPNOTSUPP; 589 } 590 flow_spec->tos = match.key->tos; 591 flow_mask->tos = match.mask->tos; 592 req->features |= BIT_ULL(NPC_TOS); 593 } 594 595 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 596 struct flow_match_vlan match; 597 u16 vlan_tci, vlan_tci_mask; 598 599 flow_rule_match_vlan(rule, &match); 600 601 if (ntohs(match.key->vlan_tpid) != ETH_P_8021Q) { 602 netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n", 603 ntohs(match.key->vlan_tpid)); 604 return -EOPNOTSUPP; 605 } 606 607 if (match.mask->vlan_id || 608 match.mask->vlan_dei || 609 match.mask->vlan_priority) { 610 vlan_tci = match.key->vlan_id | 611 match.key->vlan_dei << 12 | 612 match.key->vlan_priority << 13; 613 614 vlan_tci_mask = match.mask->vlan_id | 615 match.mask->vlan_dei << 12 | 616 match.mask->vlan_priority << 13; 617 618 flow_spec->vlan_tci = htons(vlan_tci); 619 flow_mask->vlan_tci = htons(vlan_tci_mask); 620 req->features |= BIT_ULL(NPC_OUTER_VID); 621 } 622 } 623 624 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { 625 struct flow_match_ipv4_addrs match; 626 627 flow_rule_match_ipv4_addrs(rule, &match); 628 629 flow_spec->ip4dst = match.key->dst; 630 flow_mask->ip4dst = match.mask->dst; 631 req->features |= BIT_ULL(NPC_DIP_IPV4); 632 633 flow_spec->ip4src = match.key->src; 634 flow_mask->ip4src = match.mask->src; 635 req->features |= BIT_ULL(NPC_SIP_IPV4); 636 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { 637 struct flow_match_ipv6_addrs match; 638 639 flow_rule_match_ipv6_addrs(rule, &match); 640 641 if (ipv6_addr_loopback(&match.key->dst) || 642 ipv6_addr_loopback(&match.key->src)) { 643 NL_SET_ERR_MSG_MOD(extack, 644 "Flow matching IPv6 loopback addr not supported"); 645 return -EOPNOTSUPP; 646 } 647 648 if (!ipv6_addr_any(&match.mask->dst)) { 649 memcpy(&flow_spec->ip6dst, 650 (struct in6_addr *)&match.key->dst, 651 sizeof(flow_spec->ip6dst)); 652 memcpy(&flow_mask->ip6dst, 653 (struct in6_addr *)&match.mask->dst, 654 sizeof(flow_spec->ip6dst)); 655 req->features |= BIT_ULL(NPC_DIP_IPV6); 656 } 657 658 if (!ipv6_addr_any(&match.mask->src)) { 659 memcpy(&flow_spec->ip6src, 660 (struct in6_addr *)&match.key->src, 661 sizeof(flow_spec->ip6src)); 662 memcpy(&flow_mask->ip6src, 663 (struct in6_addr *)&match.mask->src, 664 sizeof(flow_spec->ip6src)); 665 req->features |= BIT_ULL(NPC_SIP_IPV6); 666 } 667 } 668 669 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 670 struct flow_match_ports match; 671 672 flow_rule_match_ports(rule, &match); 673 674 flow_spec->dport = match.key->dst; 675 flow_mask->dport = match.mask->dst; 676 677 if (flow_mask->dport) { 678 if (ip_proto == IPPROTO_UDP) 679 req->features |= BIT_ULL(NPC_DPORT_UDP); 680 else if (ip_proto == IPPROTO_TCP) 681 req->features |= BIT_ULL(NPC_DPORT_TCP); 682 else if (ip_proto == IPPROTO_SCTP) 683 req->features |= BIT_ULL(NPC_DPORT_SCTP); 684 } 685 686 flow_spec->sport = match.key->src; 687 flow_mask->sport = match.mask->src; 688 689 if (flow_mask->sport) { 690 if (ip_proto == IPPROTO_UDP) 691 req->features |= BIT_ULL(NPC_SPORT_UDP); 692 else if (ip_proto == IPPROTO_TCP) 693 req->features |= BIT_ULL(NPC_SPORT_TCP); 694 else if (ip_proto == IPPROTO_SCTP) 695 req->features |= BIT_ULL(NPC_SPORT_SCTP); 696 } 697 } 698 699 return otx2_tc_parse_actions(nic, &rule->action, req, f, node); 700 } 701 702 static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry) 703 { 704 struct npc_delete_flow_req *req; 705 int err; 706 707 mutex_lock(&nic->mbox.lock); 708 req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox); 709 if (!req) { 710 mutex_unlock(&nic->mbox.lock); 711 return -ENOMEM; 712 } 713 714 req->entry = entry; 715 716 /* Send message to AF */ 717 err = otx2_sync_mbox_msg(&nic->mbox); 718 if (err) { 719 netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n", 720 entry); 721 mutex_unlock(&nic->mbox.lock); 722 return -EFAULT; 723 } 724 mutex_unlock(&nic->mbox.lock); 725 726 return 0; 727 } 728 729 static int otx2_tc_del_flow(struct otx2_nic *nic, 730 struct flow_cls_offload *tc_flow_cmd) 731 { 732 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 733 struct otx2_tc_info *tc_info = &nic->tc_info; 734 struct otx2_tc_flow *flow_node; 735 int err; 736 737 flow_node = rhashtable_lookup_fast(&tc_info->flow_table, 738 &tc_flow_cmd->cookie, 739 tc_info->flow_ht_params); 740 if (!flow_node) { 741 netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n", 742 tc_flow_cmd->cookie); 743 return -EINVAL; 744 } 745 746 if (flow_node->is_act_police) { 747 mutex_lock(&nic->mbox.lock); 748 749 err = cn10k_map_unmap_rq_policer(nic, flow_node->rq, 750 flow_node->leaf_profile, false); 751 if (err) 752 netdev_err(nic->netdev, 753 "Unmapping RQ %d & profile %d failed\n", 754 flow_node->rq, flow_node->leaf_profile); 755 756 err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile); 757 if (err) 758 netdev_err(nic->netdev, 759 "Unable to free leaf bandwidth profile(%d)\n", 760 flow_node->leaf_profile); 761 762 __clear_bit(flow_node->rq, &nic->rq_bmap); 763 764 mutex_unlock(&nic->mbox.lock); 765 } 766 767 otx2_del_mcam_flow_entry(nic, flow_node->entry); 768 769 WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table, 770 &flow_node->node, 771 nic->tc_info.flow_ht_params)); 772 kfree_rcu(flow_node, rcu); 773 774 clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap); 775 flow_cfg->nr_flows--; 776 777 return 0; 778 } 779 780 static int otx2_tc_add_flow(struct otx2_nic *nic, 781 struct flow_cls_offload *tc_flow_cmd) 782 { 783 struct netlink_ext_ack *extack = tc_flow_cmd->common.extack; 784 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 785 struct otx2_tc_info *tc_info = &nic->tc_info; 786 struct otx2_tc_flow *new_node, *old_node; 787 struct npc_install_flow_req *req, dummy; 788 int rc, err; 789 790 if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) 791 return -ENOMEM; 792 793 if (bitmap_full(tc_info->tc_entries_bitmap, flow_cfg->max_flows)) { 794 NL_SET_ERR_MSG_MOD(extack, 795 "Free MCAM entry not available to add the flow"); 796 return -ENOMEM; 797 } 798 799 /* allocate memory for the new flow and it's node */ 800 new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); 801 if (!new_node) 802 return -ENOMEM; 803 spin_lock_init(&new_node->lock); 804 new_node->cookie = tc_flow_cmd->cookie; 805 806 memset(&dummy, 0, sizeof(struct npc_install_flow_req)); 807 808 rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy); 809 if (rc) { 810 kfree_rcu(new_node, rcu); 811 return rc; 812 } 813 814 /* If a flow exists with the same cookie, delete it */ 815 old_node = rhashtable_lookup_fast(&tc_info->flow_table, 816 &tc_flow_cmd->cookie, 817 tc_info->flow_ht_params); 818 if (old_node) 819 otx2_tc_del_flow(nic, tc_flow_cmd); 820 821 mutex_lock(&nic->mbox.lock); 822 req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 823 if (!req) { 824 mutex_unlock(&nic->mbox.lock); 825 rc = -ENOMEM; 826 goto free_leaf; 827 } 828 829 memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr)); 830 memcpy(req, &dummy, sizeof(struct npc_install_flow_req)); 831 832 new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap, 833 flow_cfg->max_flows); 834 req->channel = nic->hw.rx_chan_base; 835 req->entry = flow_cfg->flow_ent[flow_cfg->max_flows - new_node->bitpos - 1]; 836 req->intf = NIX_INTF_RX; 837 req->set_cntr = 1; 838 new_node->entry = req->entry; 839 840 /* Send message to AF */ 841 rc = otx2_sync_mbox_msg(&nic->mbox); 842 if (rc) { 843 NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry"); 844 mutex_unlock(&nic->mbox.lock); 845 kfree_rcu(new_node, rcu); 846 goto free_leaf; 847 } 848 mutex_unlock(&nic->mbox.lock); 849 850 /* add new flow to flow-table */ 851 rc = rhashtable_insert_fast(&nic->tc_info.flow_table, &new_node->node, 852 nic->tc_info.flow_ht_params); 853 if (rc) { 854 otx2_del_mcam_flow_entry(nic, req->entry); 855 kfree_rcu(new_node, rcu); 856 goto free_leaf; 857 } 858 859 set_bit(new_node->bitpos, tc_info->tc_entries_bitmap); 860 flow_cfg->nr_flows++; 861 862 return 0; 863 864 free_leaf: 865 if (new_node->is_act_police) { 866 mutex_lock(&nic->mbox.lock); 867 868 err = cn10k_map_unmap_rq_policer(nic, new_node->rq, 869 new_node->leaf_profile, false); 870 if (err) 871 netdev_err(nic->netdev, 872 "Unmapping RQ %d & profile %d failed\n", 873 new_node->rq, new_node->leaf_profile); 874 err = cn10k_free_leaf_profile(nic, new_node->leaf_profile); 875 if (err) 876 netdev_err(nic->netdev, 877 "Unable to free leaf bandwidth profile(%d)\n", 878 new_node->leaf_profile); 879 880 __clear_bit(new_node->rq, &nic->rq_bmap); 881 882 mutex_unlock(&nic->mbox.lock); 883 } 884 885 return rc; 886 } 887 888 static int otx2_tc_get_flow_stats(struct otx2_nic *nic, 889 struct flow_cls_offload *tc_flow_cmd) 890 { 891 struct otx2_tc_info *tc_info = &nic->tc_info; 892 struct npc_mcam_get_stats_req *req; 893 struct npc_mcam_get_stats_rsp *rsp; 894 struct otx2_tc_flow_stats *stats; 895 struct otx2_tc_flow *flow_node; 896 int err; 897 898 flow_node = rhashtable_lookup_fast(&tc_info->flow_table, 899 &tc_flow_cmd->cookie, 900 tc_info->flow_ht_params); 901 if (!flow_node) { 902 netdev_info(nic->netdev, "tc flow not found for cookie %lx", 903 tc_flow_cmd->cookie); 904 return -EINVAL; 905 } 906 907 mutex_lock(&nic->mbox.lock); 908 909 req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox); 910 if (!req) { 911 mutex_unlock(&nic->mbox.lock); 912 return -ENOMEM; 913 } 914 915 req->entry = flow_node->entry; 916 917 err = otx2_sync_mbox_msg(&nic->mbox); 918 if (err) { 919 netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n", 920 req->entry); 921 mutex_unlock(&nic->mbox.lock); 922 return -EFAULT; 923 } 924 925 rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp 926 (&nic->mbox.mbox, 0, &req->hdr); 927 if (IS_ERR(rsp)) { 928 mutex_unlock(&nic->mbox.lock); 929 return PTR_ERR(rsp); 930 } 931 932 mutex_unlock(&nic->mbox.lock); 933 934 if (!rsp->stat_ena) 935 return -EINVAL; 936 937 stats = &flow_node->stats; 938 939 spin_lock(&flow_node->lock); 940 flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0, 941 FLOW_ACTION_HW_STATS_IMMEDIATE); 942 stats->pkts = rsp->stat; 943 spin_unlock(&flow_node->lock); 944 945 return 0; 946 } 947 948 static int otx2_setup_tc_cls_flower(struct otx2_nic *nic, 949 struct flow_cls_offload *cls_flower) 950 { 951 switch (cls_flower->command) { 952 case FLOW_CLS_REPLACE: 953 return otx2_tc_add_flow(nic, cls_flower); 954 case FLOW_CLS_DESTROY: 955 return otx2_tc_del_flow(nic, cls_flower); 956 case FLOW_CLS_STATS: 957 return otx2_tc_get_flow_stats(nic, cls_flower); 958 default: 959 return -EOPNOTSUPP; 960 } 961 } 962 963 static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic, 964 struct tc_cls_matchall_offload *cls) 965 { 966 struct netlink_ext_ack *extack = cls->common.extack; 967 struct flow_action *actions = &cls->rule->action; 968 struct flow_action_entry *entry; 969 u64 rate; 970 int err; 971 972 err = otx2_tc_validate_flow(nic, actions, extack); 973 if (err) 974 return err; 975 976 if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) { 977 NL_SET_ERR_MSG_MOD(extack, 978 "Only one ingress MATCHALL ratelimitter can be offloaded"); 979 return -ENOMEM; 980 } 981 982 entry = &cls->rule->action.entries[0]; 983 switch (entry->id) { 984 case FLOW_ACTION_POLICE: 985 /* Ingress ratelimiting is not supported on OcteonTx2 */ 986 if (is_dev_otx2(nic->pdev)) { 987 NL_SET_ERR_MSG_MOD(extack, 988 "Ingress policing not supported on this platform"); 989 return -EOPNOTSUPP; 990 } 991 992 err = cn10k_alloc_matchall_ipolicer(nic); 993 if (err) 994 return err; 995 996 /* Convert to bits per second */ 997 rate = entry->police.rate_bytes_ps * 8; 998 err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate); 999 if (err) 1000 return err; 1001 nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 1002 break; 1003 default: 1004 NL_SET_ERR_MSG_MOD(extack, 1005 "Only police action supported with Ingress MATCHALL offload"); 1006 return -EOPNOTSUPP; 1007 } 1008 1009 return 0; 1010 } 1011 1012 static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic, 1013 struct tc_cls_matchall_offload *cls) 1014 { 1015 struct netlink_ext_ack *extack = cls->common.extack; 1016 int err; 1017 1018 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 1019 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 1020 return -EINVAL; 1021 } 1022 1023 err = cn10k_free_matchall_ipolicer(nic); 1024 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 1025 return err; 1026 } 1027 1028 static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic, 1029 struct tc_cls_matchall_offload *cls_matchall) 1030 { 1031 switch (cls_matchall->command) { 1032 case TC_CLSMATCHALL_REPLACE: 1033 return otx2_tc_ingress_matchall_install(nic, cls_matchall); 1034 case TC_CLSMATCHALL_DESTROY: 1035 return otx2_tc_ingress_matchall_delete(nic, cls_matchall); 1036 case TC_CLSMATCHALL_STATS: 1037 default: 1038 break; 1039 } 1040 1041 return -EOPNOTSUPP; 1042 } 1043 1044 static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type, 1045 void *type_data, void *cb_priv) 1046 { 1047 struct otx2_nic *nic = cb_priv; 1048 1049 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1050 return -EOPNOTSUPP; 1051 1052 switch (type) { 1053 case TC_SETUP_CLSFLOWER: 1054 return otx2_setup_tc_cls_flower(nic, type_data); 1055 case TC_SETUP_CLSMATCHALL: 1056 return otx2_setup_tc_ingress_matchall(nic, type_data); 1057 default: 1058 break; 1059 } 1060 1061 return -EOPNOTSUPP; 1062 } 1063 1064 static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic, 1065 struct tc_cls_matchall_offload *cls_matchall) 1066 { 1067 switch (cls_matchall->command) { 1068 case TC_CLSMATCHALL_REPLACE: 1069 return otx2_tc_egress_matchall_install(nic, cls_matchall); 1070 case TC_CLSMATCHALL_DESTROY: 1071 return otx2_tc_egress_matchall_delete(nic, cls_matchall); 1072 case TC_CLSMATCHALL_STATS: 1073 default: 1074 break; 1075 } 1076 1077 return -EOPNOTSUPP; 1078 } 1079 1080 static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type, 1081 void *type_data, void *cb_priv) 1082 { 1083 struct otx2_nic *nic = cb_priv; 1084 1085 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1086 return -EOPNOTSUPP; 1087 1088 switch (type) { 1089 case TC_SETUP_CLSMATCHALL: 1090 return otx2_setup_tc_egress_matchall(nic, type_data); 1091 default: 1092 break; 1093 } 1094 1095 return -EOPNOTSUPP; 1096 } 1097 1098 static LIST_HEAD(otx2_block_cb_list); 1099 1100 static int otx2_setup_tc_block(struct net_device *netdev, 1101 struct flow_block_offload *f) 1102 { 1103 struct otx2_nic *nic = netdev_priv(netdev); 1104 flow_setup_cb_t *cb; 1105 bool ingress; 1106 1107 if (f->block_shared) 1108 return -EOPNOTSUPP; 1109 1110 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1111 cb = otx2_setup_tc_block_ingress_cb; 1112 ingress = true; 1113 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1114 cb = otx2_setup_tc_block_egress_cb; 1115 ingress = false; 1116 } else { 1117 return -EOPNOTSUPP; 1118 } 1119 1120 return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb, 1121 nic, nic, ingress); 1122 } 1123 1124 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, 1125 void *type_data) 1126 { 1127 switch (type) { 1128 case TC_SETUP_BLOCK: 1129 return otx2_setup_tc_block(netdev, type_data); 1130 default: 1131 return -EOPNOTSUPP; 1132 } 1133 } 1134 EXPORT_SYMBOL(otx2_setup_tc); 1135 1136 static const struct rhashtable_params tc_flow_ht_params = { 1137 .head_offset = offsetof(struct otx2_tc_flow, node), 1138 .key_offset = offsetof(struct otx2_tc_flow, cookie), 1139 .key_len = sizeof(((struct otx2_tc_flow *)0)->cookie), 1140 .automatic_shrinking = true, 1141 }; 1142 1143 int otx2_init_tc(struct otx2_nic *nic) 1144 { 1145 struct otx2_tc_info *tc = &nic->tc_info; 1146 int err; 1147 1148 /* Exclude receive queue 0 being used for police action */ 1149 set_bit(0, &nic->rq_bmap); 1150 1151 if (!nic->flow_cfg) { 1152 netdev_err(nic->netdev, 1153 "Can't init TC, nic->flow_cfg is not setup\n"); 1154 return -EINVAL; 1155 } 1156 1157 err = otx2_tc_alloc_ent_bitmap(nic); 1158 if (err) 1159 return err; 1160 1161 tc->flow_ht_params = tc_flow_ht_params; 1162 err = rhashtable_init(&tc->flow_table, &tc->flow_ht_params); 1163 if (err) { 1164 kfree(tc->tc_entries_bitmap); 1165 tc->tc_entries_bitmap = NULL; 1166 } 1167 return err; 1168 } 1169 EXPORT_SYMBOL(otx2_init_tc); 1170 1171 void otx2_shutdown_tc(struct otx2_nic *nic) 1172 { 1173 struct otx2_tc_info *tc = &nic->tc_info; 1174 1175 kfree(tc->tc_entries_bitmap); 1176 rhashtable_destroy(&tc->flow_table); 1177 } 1178 EXPORT_SYMBOL(otx2_shutdown_tc); 1179