1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2021 Marvell. 5 * 6 */ 7 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/inetdevice.h> 11 #include <linux/rhashtable.h> 12 #include <linux/bitfield.h> 13 #include <net/flow_dissector.h> 14 #include <net/pkt_cls.h> 15 #include <net/tc_act/tc_gact.h> 16 #include <net/tc_act/tc_mirred.h> 17 #include <net/tc_act/tc_vlan.h> 18 #include <net/ipv6.h> 19 20 #include "cn10k.h" 21 #include "otx2_common.h" 22 #include "qos.h" 23 24 #define CN10K_MAX_BURST_MANTISSA 0x7FFFULL 25 #define CN10K_MAX_BURST_SIZE 8453888ULL 26 27 #define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29) 28 #define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44) 29 30 #define OTX2_UNSUPP_LSE_DEPTH GENMASK(6, 4) 31 32 #define MCAST_INVALID_GRP (-1U) 33 34 struct otx2_tc_flow_stats { 35 u64 bytes; 36 u64 pkts; 37 u64 used; 38 }; 39 40 struct otx2_tc_flow { 41 struct list_head list; 42 unsigned long cookie; 43 struct rcu_head rcu; 44 struct otx2_tc_flow_stats stats; 45 spinlock_t lock; /* lock for stats */ 46 u16 rq; 47 u16 entry; 48 u16 leaf_profile; 49 bool is_act_police; 50 u32 prio; 51 struct npc_install_flow_req req; 52 u32 mcast_grp_idx; 53 u64 rate; 54 u32 burst; 55 bool is_pps; 56 }; 57 58 static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst, 59 u32 *burst_exp, u32 *burst_mantissa) 60 { 61 int max_burst, max_mantissa; 62 unsigned int tmp; 63 64 if (is_dev_otx2(nic->pdev)) { 65 max_burst = MAX_BURST_SIZE; 66 max_mantissa = MAX_BURST_MANTISSA; 67 } else { 68 max_burst = CN10K_MAX_BURST_SIZE; 69 max_mantissa = CN10K_MAX_BURST_MANTISSA; 70 } 71 72 /* Burst is calculated as 73 * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256 74 * Max supported burst size is 130,816 bytes. 75 */ 76 burst = min_t(u32, burst, max_burst); 77 if (burst) { 78 *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0; 79 tmp = burst - rounddown_pow_of_two(burst); 80 if (burst < max_mantissa) 81 *burst_mantissa = tmp * 2; 82 else 83 *burst_mantissa = tmp / (1ULL << (*burst_exp - 7)); 84 } else { 85 *burst_exp = MAX_BURST_EXPONENT; 86 *burst_mantissa = max_mantissa; 87 } 88 } 89 90 static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp, 91 u32 *mantissa, u32 *div_exp) 92 { 93 u64 tmp; 94 95 /* Rate calculation by hardware 96 * 97 * PIR_ADD = ((256 + mantissa) << exp) / 256 98 * rate = (2 * PIR_ADD) / ( 1 << div_exp) 99 * The resultant rate is in Mbps. 100 */ 101 102 /* 2Mbps to 100Gbps can be expressed with div_exp = 0. 103 * Setting this to '0' will ease the calculation of 104 * exponent and mantissa. 105 */ 106 *div_exp = 0; 107 108 if (maxrate) { 109 *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0; 110 tmp = maxrate - rounddown_pow_of_two(maxrate); 111 if (maxrate < MAX_RATE_MANTISSA) 112 *mantissa = tmp * 2; 113 else 114 *mantissa = tmp / (1ULL << (*exp - 7)); 115 } else { 116 /* Instead of disabling rate limiting, set all values to max */ 117 *exp = MAX_RATE_EXPONENT; 118 *mantissa = MAX_RATE_MANTISSA; 119 } 120 } 121 122 u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic, 123 u64 maxrate, u32 burst) 124 { 125 u32 burst_exp, burst_mantissa; 126 u32 exp, mantissa, div_exp; 127 u64 regval = 0; 128 129 /* Get exponent and mantissa values from the desired rate */ 130 otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa); 131 otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp); 132 133 if (is_dev_otx2(nic->pdev)) { 134 regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) | 135 FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) | 136 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 137 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 138 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 139 } else { 140 regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) | 141 FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) | 142 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 143 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 144 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 145 } 146 147 return regval; 148 } 149 150 static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, 151 u32 burst, u64 maxrate) 152 { 153 struct otx2_hw *hw = &nic->hw; 154 struct nix_txschq_config *req; 155 int txschq, err; 156 157 /* All SQs share the same TL4, so pick the first scheduler */ 158 txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0]; 159 160 mutex_lock(&nic->mbox.lock); 161 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox); 162 if (!req) { 163 mutex_unlock(&nic->mbox.lock); 164 return -ENOMEM; 165 } 166 167 req->lvl = NIX_TXSCH_LVL_TL4; 168 req->num_regs = 1; 169 req->reg[0] = NIX_AF_TL4X_PIR(txschq); 170 req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst); 171 172 err = otx2_sync_mbox_msg(&nic->mbox); 173 mutex_unlock(&nic->mbox.lock); 174 return err; 175 } 176 177 static int otx2_tc_validate_flow(struct otx2_nic *nic, 178 struct flow_action *actions, 179 struct netlink_ext_ack *extack) 180 { 181 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 182 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 183 return -EINVAL; 184 } 185 186 if (!flow_action_has_entries(actions)) { 187 NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action"); 188 return -EINVAL; 189 } 190 191 if (!flow_offload_has_one_action(actions)) { 192 NL_SET_ERR_MSG_MOD(extack, 193 "Egress MATCHALL offload supports only 1 policing action"); 194 return -EINVAL; 195 } 196 return 0; 197 } 198 199 static int otx2_policer_validate(const struct flow_action *action, 200 const struct flow_action_entry *act, 201 struct netlink_ext_ack *extack) 202 { 203 if (act->police.exceed.act_id != FLOW_ACTION_DROP) { 204 NL_SET_ERR_MSG_MOD(extack, 205 "Offload not supported when exceed action is not drop"); 206 return -EOPNOTSUPP; 207 } 208 209 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && 210 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { 211 NL_SET_ERR_MSG_MOD(extack, 212 "Offload not supported when conform action is not pipe or ok"); 213 return -EOPNOTSUPP; 214 } 215 216 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && 217 !flow_action_is_last_entry(action, act)) { 218 NL_SET_ERR_MSG_MOD(extack, 219 "Offload not supported when conform action is ok, but action is not last"); 220 return -EOPNOTSUPP; 221 } 222 223 if (act->police.peakrate_bytes_ps || 224 act->police.avrate || act->police.overhead) { 225 NL_SET_ERR_MSG_MOD(extack, 226 "Offload not supported when peakrate/avrate/overhead is configured"); 227 return -EOPNOTSUPP; 228 } 229 230 return 0; 231 } 232 233 static int otx2_tc_egress_matchall_install(struct otx2_nic *nic, 234 struct tc_cls_matchall_offload *cls) 235 { 236 struct netlink_ext_ack *extack = cls->common.extack; 237 struct flow_action *actions = &cls->rule->action; 238 struct flow_action_entry *entry; 239 int err; 240 241 err = otx2_tc_validate_flow(nic, actions, extack); 242 if (err) 243 return err; 244 245 if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) { 246 NL_SET_ERR_MSG_MOD(extack, 247 "Only one Egress MATCHALL ratelimiter can be offloaded"); 248 return -ENOMEM; 249 } 250 251 entry = &cls->rule->action.entries[0]; 252 switch (entry->id) { 253 case FLOW_ACTION_POLICE: 254 err = otx2_policer_validate(&cls->rule->action, entry, extack); 255 if (err) 256 return err; 257 258 if (entry->police.rate_pkt_ps) { 259 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); 260 return -EOPNOTSUPP; 261 } 262 err = otx2_set_matchall_egress_rate(nic, entry->police.burst, 263 otx2_convert_rate(entry->police.rate_bytes_ps)); 264 if (err) 265 return err; 266 nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 267 break; 268 default: 269 NL_SET_ERR_MSG_MOD(extack, 270 "Only police action is supported with Egress MATCHALL offload"); 271 return -EOPNOTSUPP; 272 } 273 274 return 0; 275 } 276 277 static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic, 278 struct tc_cls_matchall_offload *cls) 279 { 280 struct netlink_ext_ack *extack = cls->common.extack; 281 int err; 282 283 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 284 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 285 return -EINVAL; 286 } 287 288 err = otx2_set_matchall_egress_rate(nic, 0, 0); 289 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 290 return err; 291 } 292 293 static int otx2_tc_act_set_hw_police(struct otx2_nic *nic, 294 struct otx2_tc_flow *node) 295 { 296 int rc; 297 298 mutex_lock(&nic->mbox.lock); 299 300 rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile); 301 if (rc) { 302 mutex_unlock(&nic->mbox.lock); 303 return rc; 304 } 305 306 rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, 307 node->burst, node->rate, node->is_pps); 308 if (rc) 309 goto free_leaf; 310 311 rc = cn10k_map_unmap_rq_policer(nic, node->rq, node->leaf_profile, true); 312 if (rc) 313 goto free_leaf; 314 315 mutex_unlock(&nic->mbox.lock); 316 317 return 0; 318 319 free_leaf: 320 if (cn10k_free_leaf_profile(nic, node->leaf_profile)) 321 netdev_err(nic->netdev, 322 "Unable to free leaf bandwidth profile(%d)\n", 323 node->leaf_profile); 324 mutex_unlock(&nic->mbox.lock); 325 return rc; 326 } 327 328 static int otx2_tc_act_set_police(struct otx2_nic *nic, 329 struct otx2_tc_flow *node, 330 struct flow_cls_offload *f, 331 u64 rate, u32 burst, u32 mark, 332 struct npc_install_flow_req *req, bool pps) 333 { 334 struct netlink_ext_ack *extack = f->common.extack; 335 struct otx2_hw *hw = &nic->hw; 336 int rq_idx, rc; 337 338 rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues); 339 if (rq_idx >= hw->rx_queues) { 340 NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded"); 341 return -EINVAL; 342 } 343 344 req->match_id = mark & 0xFFFFULL; 345 req->index = rq_idx; 346 req->op = NIX_RX_ACTIONOP_UCAST; 347 348 node->is_act_police = true; 349 node->rq = rq_idx; 350 node->burst = burst; 351 node->rate = rate; 352 node->is_pps = pps; 353 354 rc = otx2_tc_act_set_hw_police(nic, node); 355 if (!rc) 356 set_bit(rq_idx, &nic->rq_bmap); 357 358 return rc; 359 } 360 361 static int otx2_tc_update_mcast(struct otx2_nic *nic, 362 struct npc_install_flow_req *req, 363 struct netlink_ext_ack *extack, 364 struct otx2_tc_flow *node, 365 struct nix_mcast_grp_update_req *ureq, 366 u8 num_intf) 367 { 368 struct nix_mcast_grp_update_req *grp_update_req; 369 struct nix_mcast_grp_create_req *creq; 370 struct nix_mcast_grp_create_rsp *crsp; 371 u32 grp_index; 372 int rc; 373 374 mutex_lock(&nic->mbox.lock); 375 creq = otx2_mbox_alloc_msg_nix_mcast_grp_create(&nic->mbox); 376 if (!creq) { 377 rc = -ENOMEM; 378 goto error; 379 } 380 381 creq->dir = NIX_MCAST_INGRESS; 382 /* Send message to AF */ 383 rc = otx2_sync_mbox_msg(&nic->mbox); 384 if (rc) { 385 NL_SET_ERR_MSG_MOD(extack, "Failed to create multicast group"); 386 goto error; 387 } 388 389 crsp = (struct nix_mcast_grp_create_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox, 390 0, 391 &creq->hdr); 392 if (IS_ERR(crsp)) { 393 rc = PTR_ERR(crsp); 394 goto error; 395 } 396 397 grp_index = crsp->mcast_grp_idx; 398 grp_update_req = otx2_mbox_alloc_msg_nix_mcast_grp_update(&nic->mbox); 399 if (!grp_update_req) { 400 NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group"); 401 rc = -ENOMEM; 402 goto error; 403 } 404 405 ureq->op = NIX_MCAST_OP_ADD_ENTRY; 406 ureq->mcast_grp_idx = grp_index; 407 ureq->num_mce_entry = num_intf; 408 ureq->pcifunc[0] = nic->pcifunc; 409 ureq->channel[0] = nic->hw.tx_chan_base; 410 411 ureq->dest_type[0] = NIX_RX_RSS; 412 ureq->rq_rss_index[0] = 0; 413 memcpy(&ureq->hdr, &grp_update_req->hdr, sizeof(struct mbox_msghdr)); 414 memcpy(grp_update_req, ureq, sizeof(struct nix_mcast_grp_update_req)); 415 416 /* Send message to AF */ 417 rc = otx2_sync_mbox_msg(&nic->mbox); 418 if (rc) { 419 NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group"); 420 goto error; 421 } 422 423 mutex_unlock(&nic->mbox.lock); 424 req->op = NIX_RX_ACTIONOP_MCAST; 425 req->index = grp_index; 426 node->mcast_grp_idx = grp_index; 427 return 0; 428 429 error: 430 mutex_unlock(&nic->mbox.lock); 431 return rc; 432 } 433 434 static int otx2_tc_parse_actions(struct otx2_nic *nic, 435 struct flow_action *flow_action, 436 struct npc_install_flow_req *req, 437 struct flow_cls_offload *f, 438 struct otx2_tc_flow *node) 439 { 440 struct nix_mcast_grp_update_req dummy_grp_update_req = { 0 }; 441 struct netlink_ext_ack *extack = f->common.extack; 442 bool pps = false, mcast = false; 443 struct flow_action_entry *act; 444 struct net_device *target; 445 struct otx2_nic *priv; 446 u32 burst, mark = 0; 447 u8 nr_police = 0; 448 u8 num_intf = 1; 449 int err, i; 450 u64 rate; 451 452 if (!flow_action_has_entries(flow_action)) { 453 NL_SET_ERR_MSG_MOD(extack, "no tc actions specified"); 454 return -EINVAL; 455 } 456 457 flow_action_for_each(i, act, flow_action) { 458 switch (act->id) { 459 case FLOW_ACTION_DROP: 460 req->op = NIX_RX_ACTIONOP_DROP; 461 return 0; 462 case FLOW_ACTION_ACCEPT: 463 req->op = NIX_RX_ACTION_DEFAULT; 464 return 0; 465 case FLOW_ACTION_REDIRECT_INGRESS: 466 target = act->dev; 467 priv = netdev_priv(target); 468 /* npc_install_flow_req doesn't support passing a target pcifunc */ 469 if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { 470 NL_SET_ERR_MSG_MOD(extack, 471 "can't redirect to other pf/vf"); 472 return -EOPNOTSUPP; 473 } 474 req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK; 475 476 /* if op is already set; avoid overwriting the same */ 477 if (!req->op) 478 req->op = NIX_RX_ACTION_DEFAULT; 479 break; 480 481 case FLOW_ACTION_VLAN_POP: 482 req->vtag0_valid = true; 483 /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */ 484 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7; 485 break; 486 case FLOW_ACTION_POLICE: 487 /* Ingress ratelimiting is not supported on OcteonTx2 */ 488 if (is_dev_otx2(nic->pdev)) { 489 NL_SET_ERR_MSG_MOD(extack, 490 "Ingress policing not supported on this platform"); 491 return -EOPNOTSUPP; 492 } 493 494 err = otx2_policer_validate(flow_action, act, extack); 495 if (err) 496 return err; 497 498 if (act->police.rate_bytes_ps > 0) { 499 rate = act->police.rate_bytes_ps * 8; 500 burst = act->police.burst; 501 } else if (act->police.rate_pkt_ps > 0) { 502 /* The algorithm used to calculate rate 503 * mantissa, exponent values for a given token 504 * rate (token can be byte or packet) requires 505 * token rate to be mutiplied by 8. 506 */ 507 rate = act->police.rate_pkt_ps * 8; 508 burst = act->police.burst_pkt; 509 pps = true; 510 } 511 nr_police++; 512 break; 513 case FLOW_ACTION_MARK: 514 mark = act->mark; 515 break; 516 517 case FLOW_ACTION_RX_QUEUE_MAPPING: 518 req->op = NIX_RX_ACTIONOP_UCAST; 519 req->index = act->rx_queue; 520 break; 521 522 case FLOW_ACTION_MIRRED_INGRESS: 523 target = act->dev; 524 priv = netdev_priv(target); 525 dummy_grp_update_req.pcifunc[num_intf] = priv->pcifunc; 526 dummy_grp_update_req.channel[num_intf] = priv->hw.tx_chan_base; 527 dummy_grp_update_req.dest_type[num_intf] = NIX_RX_RSS; 528 dummy_grp_update_req.rq_rss_index[num_intf] = 0; 529 mcast = true; 530 num_intf++; 531 break; 532 533 default: 534 return -EOPNOTSUPP; 535 } 536 } 537 538 if (mcast) { 539 err = otx2_tc_update_mcast(nic, req, extack, node, 540 &dummy_grp_update_req, 541 num_intf); 542 if (err) 543 return err; 544 } 545 546 if (nr_police > 1) { 547 NL_SET_ERR_MSG_MOD(extack, 548 "rate limit police offload requires a single action"); 549 return -EOPNOTSUPP; 550 } 551 552 if (nr_police) 553 return otx2_tc_act_set_police(nic, node, f, rate, burst, 554 mark, req, pps); 555 556 return 0; 557 } 558 559 static int otx2_tc_process_vlan(struct otx2_nic *nic, struct flow_msg *flow_spec, 560 struct flow_msg *flow_mask, struct flow_rule *rule, 561 struct npc_install_flow_req *req, bool is_inner) 562 { 563 struct flow_match_vlan match; 564 u16 vlan_tci, vlan_tci_mask; 565 566 if (is_inner) 567 flow_rule_match_cvlan(rule, &match); 568 else 569 flow_rule_match_vlan(rule, &match); 570 571 if (!eth_type_vlan(match.key->vlan_tpid)) { 572 netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n", 573 ntohs(match.key->vlan_tpid)); 574 return -EOPNOTSUPP; 575 } 576 577 if (!match.mask->vlan_id) { 578 struct flow_action_entry *act; 579 int i; 580 581 flow_action_for_each(i, act, &rule->action) { 582 if (act->id == FLOW_ACTION_DROP) { 583 netdev_err(nic->netdev, 584 "vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n", 585 ntohs(match.key->vlan_tpid), match.key->vlan_id); 586 return -EOPNOTSUPP; 587 } 588 } 589 } 590 591 if (match.mask->vlan_id || 592 match.mask->vlan_dei || 593 match.mask->vlan_priority) { 594 vlan_tci = match.key->vlan_id | 595 match.key->vlan_dei << 12 | 596 match.key->vlan_priority << 13; 597 598 vlan_tci_mask = match.mask->vlan_id | 599 match.mask->vlan_dei << 12 | 600 match.mask->vlan_priority << 13; 601 if (is_inner) { 602 flow_spec->vlan_itci = htons(vlan_tci); 603 flow_mask->vlan_itci = htons(vlan_tci_mask); 604 req->features |= BIT_ULL(NPC_INNER_VID); 605 } else { 606 flow_spec->vlan_tci = htons(vlan_tci); 607 flow_mask->vlan_tci = htons(vlan_tci_mask); 608 req->features |= BIT_ULL(NPC_OUTER_VID); 609 } 610 } 611 612 return 0; 613 } 614 615 static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, 616 struct flow_cls_offload *f, 617 struct npc_install_flow_req *req) 618 { 619 struct netlink_ext_ack *extack = f->common.extack; 620 struct flow_msg *flow_spec = &req->packet; 621 struct flow_msg *flow_mask = &req->mask; 622 struct flow_dissector *dissector; 623 struct flow_rule *rule; 624 u8 ip_proto = 0; 625 626 rule = flow_cls_offload_flow_rule(f); 627 dissector = rule->match.dissector; 628 629 if ((dissector->used_keys & 630 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | 631 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | 632 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 633 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | 634 BIT(FLOW_DISSECTOR_KEY_CVLAN) | 635 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 636 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 637 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | 638 BIT(FLOW_DISSECTOR_KEY_IPSEC) | 639 BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) | 640 BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) | 641 BIT_ULL(FLOW_DISSECTOR_KEY_TCP) | 642 BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) { 643 netdev_info(nic->netdev, "unsupported flow used key 0x%llx", 644 dissector->used_keys); 645 return -EOPNOTSUPP; 646 } 647 648 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 649 struct flow_match_basic match; 650 651 flow_rule_match_basic(rule, &match); 652 653 /* All EtherTypes can be matched, no hw limitation */ 654 flow_spec->etype = match.key->n_proto; 655 flow_mask->etype = match.mask->n_proto; 656 req->features |= BIT_ULL(NPC_ETYPE); 657 658 if (match.mask->ip_proto && 659 (match.key->ip_proto != IPPROTO_TCP && 660 match.key->ip_proto != IPPROTO_UDP && 661 match.key->ip_proto != IPPROTO_SCTP && 662 match.key->ip_proto != IPPROTO_ICMP && 663 match.key->ip_proto != IPPROTO_ESP && 664 match.key->ip_proto != IPPROTO_AH && 665 match.key->ip_proto != IPPROTO_ICMPV6)) { 666 netdev_info(nic->netdev, 667 "ip_proto=0x%x not supported\n", 668 match.key->ip_proto); 669 return -EOPNOTSUPP; 670 } 671 if (match.mask->ip_proto) 672 ip_proto = match.key->ip_proto; 673 674 if (ip_proto == IPPROTO_UDP) 675 req->features |= BIT_ULL(NPC_IPPROTO_UDP); 676 else if (ip_proto == IPPROTO_TCP) 677 req->features |= BIT_ULL(NPC_IPPROTO_TCP); 678 else if (ip_proto == IPPROTO_SCTP) 679 req->features |= BIT_ULL(NPC_IPPROTO_SCTP); 680 else if (ip_proto == IPPROTO_ICMP) 681 req->features |= BIT_ULL(NPC_IPPROTO_ICMP); 682 else if (ip_proto == IPPROTO_ICMPV6) 683 req->features |= BIT_ULL(NPC_IPPROTO_ICMP6); 684 else if (ip_proto == IPPROTO_ESP) 685 req->features |= BIT_ULL(NPC_IPPROTO_ESP); 686 else if (ip_proto == IPPROTO_AH) 687 req->features |= BIT_ULL(NPC_IPPROTO_AH); 688 } 689 690 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 691 struct flow_match_control match; 692 693 flow_rule_match_control(rule, &match); 694 if (match.mask->flags & FLOW_DIS_FIRST_FRAG) { 695 NL_SET_ERR_MSG_MOD(extack, "HW doesn't support frag first/later"); 696 return -EOPNOTSUPP; 697 } 698 699 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { 700 if (ntohs(flow_spec->etype) == ETH_P_IP) { 701 flow_spec->ip_flag = IPV4_FLAG_MORE; 702 flow_mask->ip_flag = IPV4_FLAG_MORE; 703 req->features |= BIT_ULL(NPC_IPFRAG_IPV4); 704 } else if (ntohs(flow_spec->etype) == ETH_P_IPV6) { 705 flow_spec->next_header = IPPROTO_FRAGMENT; 706 flow_mask->next_header = 0xff; 707 req->features |= BIT_ULL(NPC_IPFRAG_IPV6); 708 } else { 709 NL_SET_ERR_MSG_MOD(extack, "flow-type should be either IPv4 and IPv6"); 710 return -EOPNOTSUPP; 711 } 712 } 713 } 714 715 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 716 struct flow_match_eth_addrs match; 717 718 flow_rule_match_eth_addrs(rule, &match); 719 if (!is_zero_ether_addr(match.mask->src)) { 720 NL_SET_ERR_MSG_MOD(extack, "src mac match not supported"); 721 return -EOPNOTSUPP; 722 } 723 724 if (!is_zero_ether_addr(match.mask->dst)) { 725 ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst); 726 ether_addr_copy(flow_mask->dmac, 727 (u8 *)&match.mask->dst); 728 req->features |= BIT_ULL(NPC_DMAC); 729 } 730 } 731 732 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPSEC)) { 733 struct flow_match_ipsec match; 734 735 flow_rule_match_ipsec(rule, &match); 736 if (!match.mask->spi) { 737 NL_SET_ERR_MSG_MOD(extack, "spi index not specified"); 738 return -EOPNOTSUPP; 739 } 740 if (ip_proto != IPPROTO_ESP && 741 ip_proto != IPPROTO_AH) { 742 NL_SET_ERR_MSG_MOD(extack, 743 "SPI index is valid only for ESP/AH proto"); 744 return -EOPNOTSUPP; 745 } 746 747 flow_spec->spi = match.key->spi; 748 flow_mask->spi = match.mask->spi; 749 req->features |= BIT_ULL(NPC_IPSEC_SPI); 750 } 751 752 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 753 struct flow_match_ip match; 754 755 flow_rule_match_ip(rule, &match); 756 if ((ntohs(flow_spec->etype) != ETH_P_IP) && 757 match.mask->tos) { 758 NL_SET_ERR_MSG_MOD(extack, "tos not supported"); 759 return -EOPNOTSUPP; 760 } 761 if (match.mask->ttl) { 762 NL_SET_ERR_MSG_MOD(extack, "ttl not supported"); 763 return -EOPNOTSUPP; 764 } 765 flow_spec->tos = match.key->tos; 766 flow_mask->tos = match.mask->tos; 767 req->features |= BIT_ULL(NPC_TOS); 768 } 769 770 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 771 int ret; 772 773 ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, false); 774 if (ret) 775 return ret; 776 } 777 778 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { 779 int ret; 780 781 ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, true); 782 if (ret) 783 return ret; 784 } 785 786 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { 787 struct flow_match_ipv4_addrs match; 788 789 flow_rule_match_ipv4_addrs(rule, &match); 790 791 flow_spec->ip4dst = match.key->dst; 792 flow_mask->ip4dst = match.mask->dst; 793 req->features |= BIT_ULL(NPC_DIP_IPV4); 794 795 flow_spec->ip4src = match.key->src; 796 flow_mask->ip4src = match.mask->src; 797 req->features |= BIT_ULL(NPC_SIP_IPV4); 798 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { 799 struct flow_match_ipv6_addrs match; 800 801 flow_rule_match_ipv6_addrs(rule, &match); 802 803 if (ipv6_addr_loopback(&match.key->dst) || 804 ipv6_addr_loopback(&match.key->src)) { 805 NL_SET_ERR_MSG_MOD(extack, 806 "Flow matching IPv6 loopback addr not supported"); 807 return -EOPNOTSUPP; 808 } 809 810 if (!ipv6_addr_any(&match.mask->dst)) { 811 memcpy(&flow_spec->ip6dst, 812 (struct in6_addr *)&match.key->dst, 813 sizeof(flow_spec->ip6dst)); 814 memcpy(&flow_mask->ip6dst, 815 (struct in6_addr *)&match.mask->dst, 816 sizeof(flow_spec->ip6dst)); 817 req->features |= BIT_ULL(NPC_DIP_IPV6); 818 } 819 820 if (!ipv6_addr_any(&match.mask->src)) { 821 memcpy(&flow_spec->ip6src, 822 (struct in6_addr *)&match.key->src, 823 sizeof(flow_spec->ip6src)); 824 memcpy(&flow_mask->ip6src, 825 (struct in6_addr *)&match.mask->src, 826 sizeof(flow_spec->ip6src)); 827 req->features |= BIT_ULL(NPC_SIP_IPV6); 828 } 829 } 830 831 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 832 struct flow_match_ports match; 833 834 flow_rule_match_ports(rule, &match); 835 836 flow_spec->dport = match.key->dst; 837 flow_mask->dport = match.mask->dst; 838 839 if (flow_mask->dport) { 840 if (ip_proto == IPPROTO_UDP) 841 req->features |= BIT_ULL(NPC_DPORT_UDP); 842 else if (ip_proto == IPPROTO_TCP) 843 req->features |= BIT_ULL(NPC_DPORT_TCP); 844 else if (ip_proto == IPPROTO_SCTP) 845 req->features |= BIT_ULL(NPC_DPORT_SCTP); 846 } 847 848 flow_spec->sport = match.key->src; 849 flow_mask->sport = match.mask->src; 850 851 if (flow_mask->sport) { 852 if (ip_proto == IPPROTO_UDP) 853 req->features |= BIT_ULL(NPC_SPORT_UDP); 854 else if (ip_proto == IPPROTO_TCP) 855 req->features |= BIT_ULL(NPC_SPORT_TCP); 856 else if (ip_proto == IPPROTO_SCTP) 857 req->features |= BIT_ULL(NPC_SPORT_SCTP); 858 } 859 } 860 861 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { 862 struct flow_match_tcp match; 863 864 flow_rule_match_tcp(rule, &match); 865 866 flow_spec->tcp_flags = match.key->flags; 867 flow_mask->tcp_flags = match.mask->flags; 868 req->features |= BIT_ULL(NPC_TCP_FLAGS); 869 } 870 871 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) { 872 struct flow_match_mpls match; 873 u8 bit; 874 875 flow_rule_match_mpls(rule, &match); 876 877 if (match.mask->used_lses & OTX2_UNSUPP_LSE_DEPTH) { 878 NL_SET_ERR_MSG_MOD(extack, 879 "unsupported LSE depth for MPLS match offload"); 880 return -EOPNOTSUPP; 881 } 882 883 for_each_set_bit(bit, (unsigned long *)&match.mask->used_lses, 884 FLOW_DIS_MPLS_MAX) { 885 /* check if any of the fields LABEL,TC,BOS are set */ 886 if (*((u32 *)&match.mask->ls[bit]) & 887 OTX2_FLOWER_MASK_MPLS_NON_TTL) { 888 /* Hardware will capture 4 byte MPLS header into 889 * two fields NPC_MPLSX_LBTCBOS and NPC_MPLSX_TTL. 890 * Derive the associated NPC key based on header 891 * index and offset. 892 */ 893 894 req->features |= BIT_ULL(NPC_MPLS1_LBTCBOS + 895 2 * bit); 896 flow_spec->mpls_lse[bit] = 897 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB, 898 match.key->ls[bit].mpls_label) | 899 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC, 900 match.key->ls[bit].mpls_tc) | 901 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS, 902 match.key->ls[bit].mpls_bos); 903 904 flow_mask->mpls_lse[bit] = 905 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB, 906 match.mask->ls[bit].mpls_label) | 907 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC, 908 match.mask->ls[bit].mpls_tc) | 909 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS, 910 match.mask->ls[bit].mpls_bos); 911 } 912 913 if (match.mask->ls[bit].mpls_ttl) { 914 req->features |= BIT_ULL(NPC_MPLS1_TTL + 915 2 * bit); 916 flow_spec->mpls_lse[bit] |= 917 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL, 918 match.key->ls[bit].mpls_ttl); 919 flow_mask->mpls_lse[bit] |= 920 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL, 921 match.mask->ls[bit].mpls_ttl); 922 } 923 } 924 } 925 926 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) { 927 struct flow_match_icmp match; 928 929 flow_rule_match_icmp(rule, &match); 930 931 flow_spec->icmp_type = match.key->type; 932 flow_mask->icmp_type = match.mask->type; 933 req->features |= BIT_ULL(NPC_TYPE_ICMP); 934 935 flow_spec->icmp_code = match.key->code; 936 flow_mask->icmp_code = match.mask->code; 937 req->features |= BIT_ULL(NPC_CODE_ICMP); 938 } 939 return otx2_tc_parse_actions(nic, &rule->action, req, f, node); 940 } 941 942 static void otx2_destroy_tc_flow_list(struct otx2_nic *pfvf) 943 { 944 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; 945 struct otx2_tc_flow *iter, *tmp; 946 947 if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) 948 return; 949 950 list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list_tc, list) { 951 list_del(&iter->list); 952 kfree(iter); 953 flow_cfg->nr_flows--; 954 } 955 } 956 957 static struct otx2_tc_flow *otx2_tc_get_entry_by_cookie(struct otx2_flow_config *flow_cfg, 958 unsigned long cookie) 959 { 960 struct otx2_tc_flow *tmp; 961 962 list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) { 963 if (tmp->cookie == cookie) 964 return tmp; 965 } 966 967 return NULL; 968 } 969 970 static struct otx2_tc_flow *otx2_tc_get_entry_by_index(struct otx2_flow_config *flow_cfg, 971 int index) 972 { 973 struct otx2_tc_flow *tmp; 974 int i = 0; 975 976 list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) { 977 if (i == index) 978 return tmp; 979 i++; 980 } 981 982 return NULL; 983 } 984 985 static void otx2_tc_del_from_flow_list(struct otx2_flow_config *flow_cfg, 986 struct otx2_tc_flow *node) 987 { 988 struct list_head *pos, *n; 989 struct otx2_tc_flow *tmp; 990 991 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 992 tmp = list_entry(pos, struct otx2_tc_flow, list); 993 if (node == tmp) { 994 list_del(&node->list); 995 return; 996 } 997 } 998 } 999 1000 static int otx2_tc_add_to_flow_list(struct otx2_flow_config *flow_cfg, 1001 struct otx2_tc_flow *node) 1002 { 1003 struct list_head *pos, *n; 1004 struct otx2_tc_flow *tmp; 1005 int index = 0; 1006 1007 /* If the flow list is empty then add the new node */ 1008 if (list_empty(&flow_cfg->flow_list_tc)) { 1009 list_add(&node->list, &flow_cfg->flow_list_tc); 1010 return index; 1011 } 1012 1013 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 1014 tmp = list_entry(pos, struct otx2_tc_flow, list); 1015 if (node->prio < tmp->prio) 1016 break; 1017 index++; 1018 } 1019 1020 list_add(&node->list, pos->prev); 1021 return index; 1022 } 1023 1024 static int otx2_add_mcam_flow_entry(struct otx2_nic *nic, struct npc_install_flow_req *req) 1025 { 1026 struct npc_install_flow_req *tmp_req; 1027 int err; 1028 1029 mutex_lock(&nic->mbox.lock); 1030 tmp_req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 1031 if (!tmp_req) { 1032 mutex_unlock(&nic->mbox.lock); 1033 return -ENOMEM; 1034 } 1035 1036 memcpy(tmp_req, req, sizeof(struct npc_install_flow_req)); 1037 /* Send message to AF */ 1038 err = otx2_sync_mbox_msg(&nic->mbox); 1039 if (err) { 1040 netdev_err(nic->netdev, "Failed to install MCAM flow entry %d\n", 1041 req->entry); 1042 mutex_unlock(&nic->mbox.lock); 1043 return -EFAULT; 1044 } 1045 1046 mutex_unlock(&nic->mbox.lock); 1047 return 0; 1048 } 1049 1050 static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry, u16 *cntr_val) 1051 { 1052 struct npc_delete_flow_rsp *rsp; 1053 struct npc_delete_flow_req *req; 1054 int err; 1055 1056 mutex_lock(&nic->mbox.lock); 1057 req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox); 1058 if (!req) { 1059 mutex_unlock(&nic->mbox.lock); 1060 return -ENOMEM; 1061 } 1062 1063 req->entry = entry; 1064 1065 /* Send message to AF */ 1066 err = otx2_sync_mbox_msg(&nic->mbox); 1067 if (err) { 1068 netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n", 1069 entry); 1070 mutex_unlock(&nic->mbox.lock); 1071 return -EFAULT; 1072 } 1073 1074 if (cntr_val) { 1075 rsp = (struct npc_delete_flow_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox, 1076 0, &req->hdr); 1077 if (IS_ERR(rsp)) { 1078 netdev_err(nic->netdev, "Failed to get MCAM delete response for entry %d\n", 1079 entry); 1080 mutex_unlock(&nic->mbox.lock); 1081 return -EFAULT; 1082 } 1083 1084 *cntr_val = rsp->cntr_val; 1085 } 1086 1087 mutex_unlock(&nic->mbox.lock); 1088 return 0; 1089 } 1090 1091 static int otx2_tc_update_mcam_table_del_req(struct otx2_nic *nic, 1092 struct otx2_flow_config *flow_cfg, 1093 struct otx2_tc_flow *node) 1094 { 1095 struct list_head *pos, *n; 1096 struct otx2_tc_flow *tmp; 1097 int i = 0, index = 0; 1098 u16 cntr_val = 0; 1099 1100 /* Find and delete the entry from the list and re-install 1101 * all the entries from beginning to the index of the 1102 * deleted entry to higher mcam indexes. 1103 */ 1104 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 1105 tmp = list_entry(pos, struct otx2_tc_flow, list); 1106 if (node == tmp) { 1107 list_del(&tmp->list); 1108 break; 1109 } 1110 1111 otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val); 1112 tmp->entry++; 1113 tmp->req.entry = tmp->entry; 1114 tmp->req.cntr_val = cntr_val; 1115 index++; 1116 } 1117 1118 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 1119 if (i == index) 1120 break; 1121 1122 tmp = list_entry(pos, struct otx2_tc_flow, list); 1123 otx2_add_mcam_flow_entry(nic, &tmp->req); 1124 i++; 1125 } 1126 1127 return 0; 1128 } 1129 1130 static int otx2_tc_update_mcam_table_add_req(struct otx2_nic *nic, 1131 struct otx2_flow_config *flow_cfg, 1132 struct otx2_tc_flow *node) 1133 { 1134 int mcam_idx = flow_cfg->max_flows - flow_cfg->nr_flows - 1; 1135 struct otx2_tc_flow *tmp; 1136 int list_idx, i; 1137 u16 cntr_val = 0; 1138 1139 /* Find the index of the entry(list_idx) whose priority 1140 * is greater than the new entry and re-install all 1141 * the entries from beginning to list_idx to higher 1142 * mcam indexes. 1143 */ 1144 list_idx = otx2_tc_add_to_flow_list(flow_cfg, node); 1145 for (i = 0; i < list_idx; i++) { 1146 tmp = otx2_tc_get_entry_by_index(flow_cfg, i); 1147 if (!tmp) 1148 return -ENOMEM; 1149 1150 otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val); 1151 tmp->entry = flow_cfg->flow_ent[mcam_idx]; 1152 tmp->req.entry = tmp->entry; 1153 tmp->req.cntr_val = cntr_val; 1154 otx2_add_mcam_flow_entry(nic, &tmp->req); 1155 mcam_idx++; 1156 } 1157 1158 return mcam_idx; 1159 } 1160 1161 static int otx2_tc_update_mcam_table(struct otx2_nic *nic, 1162 struct otx2_flow_config *flow_cfg, 1163 struct otx2_tc_flow *node, 1164 bool add_req) 1165 { 1166 if (add_req) 1167 return otx2_tc_update_mcam_table_add_req(nic, flow_cfg, node); 1168 1169 return otx2_tc_update_mcam_table_del_req(nic, flow_cfg, node); 1170 } 1171 1172 static int otx2_tc_del_flow(struct otx2_nic *nic, 1173 struct flow_cls_offload *tc_flow_cmd) 1174 { 1175 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 1176 struct nix_mcast_grp_destroy_req *grp_destroy_req; 1177 struct otx2_tc_flow *flow_node; 1178 int err; 1179 1180 flow_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); 1181 if (!flow_node) { 1182 netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n", 1183 tc_flow_cmd->cookie); 1184 return -EINVAL; 1185 } 1186 1187 if (flow_node->is_act_police) { 1188 __clear_bit(flow_node->rq, &nic->rq_bmap); 1189 1190 if (nic->flags & OTX2_FLAG_INTF_DOWN) 1191 goto free_mcam_flow; 1192 1193 mutex_lock(&nic->mbox.lock); 1194 1195 err = cn10k_map_unmap_rq_policer(nic, flow_node->rq, 1196 flow_node->leaf_profile, false); 1197 if (err) 1198 netdev_err(nic->netdev, 1199 "Unmapping RQ %d & profile %d failed\n", 1200 flow_node->rq, flow_node->leaf_profile); 1201 1202 err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile); 1203 if (err) 1204 netdev_err(nic->netdev, 1205 "Unable to free leaf bandwidth profile(%d)\n", 1206 flow_node->leaf_profile); 1207 1208 mutex_unlock(&nic->mbox.lock); 1209 } 1210 /* Remove the multicast/mirror related nodes */ 1211 if (flow_node->mcast_grp_idx != MCAST_INVALID_GRP) { 1212 mutex_lock(&nic->mbox.lock); 1213 grp_destroy_req = otx2_mbox_alloc_msg_nix_mcast_grp_destroy(&nic->mbox); 1214 grp_destroy_req->mcast_grp_idx = flow_node->mcast_grp_idx; 1215 otx2_sync_mbox_msg(&nic->mbox); 1216 mutex_unlock(&nic->mbox.lock); 1217 } 1218 1219 1220 free_mcam_flow: 1221 otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL); 1222 otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false); 1223 kfree_rcu(flow_node, rcu); 1224 flow_cfg->nr_flows--; 1225 return 0; 1226 } 1227 1228 static int otx2_tc_add_flow(struct otx2_nic *nic, 1229 struct flow_cls_offload *tc_flow_cmd) 1230 { 1231 struct netlink_ext_ack *extack = tc_flow_cmd->common.extack; 1232 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 1233 struct otx2_tc_flow *new_node, *old_node; 1234 struct npc_install_flow_req *req, dummy; 1235 int rc, err, mcam_idx; 1236 1237 if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) 1238 return -ENOMEM; 1239 1240 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 1241 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 1242 return -EINVAL; 1243 } 1244 1245 if (flow_cfg->nr_flows == flow_cfg->max_flows) { 1246 NL_SET_ERR_MSG_MOD(extack, 1247 "Free MCAM entry not available to add the flow"); 1248 return -ENOMEM; 1249 } 1250 1251 /* allocate memory for the new flow and it's node */ 1252 new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); 1253 if (!new_node) 1254 return -ENOMEM; 1255 spin_lock_init(&new_node->lock); 1256 new_node->cookie = tc_flow_cmd->cookie; 1257 new_node->prio = tc_flow_cmd->common.prio; 1258 new_node->mcast_grp_idx = MCAST_INVALID_GRP; 1259 1260 memset(&dummy, 0, sizeof(struct npc_install_flow_req)); 1261 1262 rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy); 1263 if (rc) { 1264 kfree_rcu(new_node, rcu); 1265 return rc; 1266 } 1267 1268 /* If a flow exists with the same cookie, delete it */ 1269 old_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); 1270 if (old_node) 1271 otx2_tc_del_flow(nic, tc_flow_cmd); 1272 1273 mcam_idx = otx2_tc_update_mcam_table(nic, flow_cfg, new_node, true); 1274 mutex_lock(&nic->mbox.lock); 1275 req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 1276 if (!req) { 1277 mutex_unlock(&nic->mbox.lock); 1278 rc = -ENOMEM; 1279 goto free_leaf; 1280 } 1281 1282 memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr)); 1283 memcpy(req, &dummy, sizeof(struct npc_install_flow_req)); 1284 req->channel = nic->hw.rx_chan_base; 1285 req->entry = flow_cfg->flow_ent[mcam_idx]; 1286 req->intf = NIX_INTF_RX; 1287 req->set_cntr = 1; 1288 new_node->entry = req->entry; 1289 1290 /* Send message to AF */ 1291 rc = otx2_sync_mbox_msg(&nic->mbox); 1292 if (rc) { 1293 NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry"); 1294 mutex_unlock(&nic->mbox.lock); 1295 goto free_leaf; 1296 } 1297 1298 mutex_unlock(&nic->mbox.lock); 1299 memcpy(&new_node->req, req, sizeof(struct npc_install_flow_req)); 1300 1301 flow_cfg->nr_flows++; 1302 return 0; 1303 1304 free_leaf: 1305 otx2_tc_del_from_flow_list(flow_cfg, new_node); 1306 kfree_rcu(new_node, rcu); 1307 if (new_node->is_act_police) { 1308 mutex_lock(&nic->mbox.lock); 1309 1310 err = cn10k_map_unmap_rq_policer(nic, new_node->rq, 1311 new_node->leaf_profile, false); 1312 if (err) 1313 netdev_err(nic->netdev, 1314 "Unmapping RQ %d & profile %d failed\n", 1315 new_node->rq, new_node->leaf_profile); 1316 err = cn10k_free_leaf_profile(nic, new_node->leaf_profile); 1317 if (err) 1318 netdev_err(nic->netdev, 1319 "Unable to free leaf bandwidth profile(%d)\n", 1320 new_node->leaf_profile); 1321 1322 __clear_bit(new_node->rq, &nic->rq_bmap); 1323 1324 mutex_unlock(&nic->mbox.lock); 1325 } 1326 1327 return rc; 1328 } 1329 1330 static int otx2_tc_get_flow_stats(struct otx2_nic *nic, 1331 struct flow_cls_offload *tc_flow_cmd) 1332 { 1333 struct npc_mcam_get_stats_req *req; 1334 struct npc_mcam_get_stats_rsp *rsp; 1335 struct otx2_tc_flow_stats *stats; 1336 struct otx2_tc_flow *flow_node; 1337 int err; 1338 1339 flow_node = otx2_tc_get_entry_by_cookie(nic->flow_cfg, tc_flow_cmd->cookie); 1340 if (!flow_node) { 1341 netdev_info(nic->netdev, "tc flow not found for cookie %lx", 1342 tc_flow_cmd->cookie); 1343 return -EINVAL; 1344 } 1345 1346 mutex_lock(&nic->mbox.lock); 1347 1348 req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox); 1349 if (!req) { 1350 mutex_unlock(&nic->mbox.lock); 1351 return -ENOMEM; 1352 } 1353 1354 req->entry = flow_node->entry; 1355 1356 err = otx2_sync_mbox_msg(&nic->mbox); 1357 if (err) { 1358 netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n", 1359 req->entry); 1360 mutex_unlock(&nic->mbox.lock); 1361 return -EFAULT; 1362 } 1363 1364 rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp 1365 (&nic->mbox.mbox, 0, &req->hdr); 1366 if (IS_ERR(rsp)) { 1367 mutex_unlock(&nic->mbox.lock); 1368 return PTR_ERR(rsp); 1369 } 1370 1371 mutex_unlock(&nic->mbox.lock); 1372 1373 if (!rsp->stat_ena) 1374 return -EINVAL; 1375 1376 stats = &flow_node->stats; 1377 1378 spin_lock(&flow_node->lock); 1379 flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0, 1380 FLOW_ACTION_HW_STATS_IMMEDIATE); 1381 stats->pkts = rsp->stat; 1382 spin_unlock(&flow_node->lock); 1383 1384 return 0; 1385 } 1386 1387 static int otx2_setup_tc_cls_flower(struct otx2_nic *nic, 1388 struct flow_cls_offload *cls_flower) 1389 { 1390 switch (cls_flower->command) { 1391 case FLOW_CLS_REPLACE: 1392 return otx2_tc_add_flow(nic, cls_flower); 1393 case FLOW_CLS_DESTROY: 1394 return otx2_tc_del_flow(nic, cls_flower); 1395 case FLOW_CLS_STATS: 1396 return otx2_tc_get_flow_stats(nic, cls_flower); 1397 default: 1398 return -EOPNOTSUPP; 1399 } 1400 } 1401 1402 static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic, 1403 struct tc_cls_matchall_offload *cls) 1404 { 1405 struct netlink_ext_ack *extack = cls->common.extack; 1406 struct flow_action *actions = &cls->rule->action; 1407 struct flow_action_entry *entry; 1408 u64 rate; 1409 int err; 1410 1411 err = otx2_tc_validate_flow(nic, actions, extack); 1412 if (err) 1413 return err; 1414 1415 if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) { 1416 NL_SET_ERR_MSG_MOD(extack, 1417 "Only one ingress MATCHALL ratelimitter can be offloaded"); 1418 return -ENOMEM; 1419 } 1420 1421 entry = &cls->rule->action.entries[0]; 1422 switch (entry->id) { 1423 case FLOW_ACTION_POLICE: 1424 /* Ingress ratelimiting is not supported on OcteonTx2 */ 1425 if (is_dev_otx2(nic->pdev)) { 1426 NL_SET_ERR_MSG_MOD(extack, 1427 "Ingress policing not supported on this platform"); 1428 return -EOPNOTSUPP; 1429 } 1430 1431 err = cn10k_alloc_matchall_ipolicer(nic); 1432 if (err) 1433 return err; 1434 1435 /* Convert to bits per second */ 1436 rate = entry->police.rate_bytes_ps * 8; 1437 err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate); 1438 if (err) 1439 return err; 1440 nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 1441 break; 1442 default: 1443 NL_SET_ERR_MSG_MOD(extack, 1444 "Only police action supported with Ingress MATCHALL offload"); 1445 return -EOPNOTSUPP; 1446 } 1447 1448 return 0; 1449 } 1450 1451 static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic, 1452 struct tc_cls_matchall_offload *cls) 1453 { 1454 struct netlink_ext_ack *extack = cls->common.extack; 1455 int err; 1456 1457 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 1458 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 1459 return -EINVAL; 1460 } 1461 1462 err = cn10k_free_matchall_ipolicer(nic); 1463 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 1464 return err; 1465 } 1466 1467 static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic, 1468 struct tc_cls_matchall_offload *cls_matchall) 1469 { 1470 switch (cls_matchall->command) { 1471 case TC_CLSMATCHALL_REPLACE: 1472 return otx2_tc_ingress_matchall_install(nic, cls_matchall); 1473 case TC_CLSMATCHALL_DESTROY: 1474 return otx2_tc_ingress_matchall_delete(nic, cls_matchall); 1475 case TC_CLSMATCHALL_STATS: 1476 default: 1477 break; 1478 } 1479 1480 return -EOPNOTSUPP; 1481 } 1482 1483 static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type, 1484 void *type_data, void *cb_priv) 1485 { 1486 struct otx2_nic *nic = cb_priv; 1487 bool ntuple; 1488 1489 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1490 return -EOPNOTSUPP; 1491 1492 ntuple = nic->netdev->features & NETIF_F_NTUPLE; 1493 switch (type) { 1494 case TC_SETUP_CLSFLOWER: 1495 if (ntuple) { 1496 netdev_warn(nic->netdev, 1497 "Can't install TC flower offload rule when NTUPLE is active"); 1498 return -EOPNOTSUPP; 1499 } 1500 1501 return otx2_setup_tc_cls_flower(nic, type_data); 1502 case TC_SETUP_CLSMATCHALL: 1503 return otx2_setup_tc_ingress_matchall(nic, type_data); 1504 default: 1505 break; 1506 } 1507 1508 return -EOPNOTSUPP; 1509 } 1510 1511 static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic, 1512 struct tc_cls_matchall_offload *cls_matchall) 1513 { 1514 switch (cls_matchall->command) { 1515 case TC_CLSMATCHALL_REPLACE: 1516 return otx2_tc_egress_matchall_install(nic, cls_matchall); 1517 case TC_CLSMATCHALL_DESTROY: 1518 return otx2_tc_egress_matchall_delete(nic, cls_matchall); 1519 case TC_CLSMATCHALL_STATS: 1520 default: 1521 break; 1522 } 1523 1524 return -EOPNOTSUPP; 1525 } 1526 1527 static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type, 1528 void *type_data, void *cb_priv) 1529 { 1530 struct otx2_nic *nic = cb_priv; 1531 1532 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1533 return -EOPNOTSUPP; 1534 1535 switch (type) { 1536 case TC_SETUP_CLSMATCHALL: 1537 return otx2_setup_tc_egress_matchall(nic, type_data); 1538 default: 1539 break; 1540 } 1541 1542 return -EOPNOTSUPP; 1543 } 1544 1545 static LIST_HEAD(otx2_block_cb_list); 1546 1547 static int otx2_setup_tc_block(struct net_device *netdev, 1548 struct flow_block_offload *f) 1549 { 1550 struct otx2_nic *nic = netdev_priv(netdev); 1551 flow_setup_cb_t *cb; 1552 bool ingress; 1553 1554 if (f->block_shared) 1555 return -EOPNOTSUPP; 1556 1557 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1558 cb = otx2_setup_tc_block_ingress_cb; 1559 ingress = true; 1560 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1561 cb = otx2_setup_tc_block_egress_cb; 1562 ingress = false; 1563 } else { 1564 return -EOPNOTSUPP; 1565 } 1566 1567 return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb, 1568 nic, nic, ingress); 1569 } 1570 1571 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, 1572 void *type_data) 1573 { 1574 switch (type) { 1575 case TC_SETUP_BLOCK: 1576 return otx2_setup_tc_block(netdev, type_data); 1577 case TC_SETUP_QDISC_HTB: 1578 return otx2_setup_tc_htb(netdev, type_data); 1579 default: 1580 return -EOPNOTSUPP; 1581 } 1582 } 1583 EXPORT_SYMBOL(otx2_setup_tc); 1584 1585 int otx2_init_tc(struct otx2_nic *nic) 1586 { 1587 /* Exclude receive queue 0 being used for police action */ 1588 set_bit(0, &nic->rq_bmap); 1589 1590 if (!nic->flow_cfg) { 1591 netdev_err(nic->netdev, 1592 "Can't init TC, nic->flow_cfg is not setup\n"); 1593 return -EINVAL; 1594 } 1595 1596 return 0; 1597 } 1598 EXPORT_SYMBOL(otx2_init_tc); 1599 1600 void otx2_shutdown_tc(struct otx2_nic *nic) 1601 { 1602 otx2_destroy_tc_flow_list(nic); 1603 } 1604 EXPORT_SYMBOL(otx2_shutdown_tc); 1605 1606 static void otx2_tc_config_ingress_rule(struct otx2_nic *nic, 1607 struct otx2_tc_flow *node) 1608 { 1609 struct npc_install_flow_req *req; 1610 1611 if (otx2_tc_act_set_hw_police(nic, node)) 1612 return; 1613 1614 mutex_lock(&nic->mbox.lock); 1615 1616 req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 1617 if (!req) 1618 goto err; 1619 1620 memcpy(req, &node->req, sizeof(struct npc_install_flow_req)); 1621 1622 if (otx2_sync_mbox_msg(&nic->mbox)) 1623 netdev_err(nic->netdev, 1624 "Failed to install MCAM flow entry for ingress rule"); 1625 err: 1626 mutex_unlock(&nic->mbox.lock); 1627 } 1628 1629 void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic) 1630 { 1631 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 1632 struct otx2_tc_flow *node; 1633 1634 /* If any ingress policer rules exist for the interface then 1635 * apply those rules. Ingress policer rules depend on bandwidth 1636 * profiles linked to the receive queues. Since no receive queues 1637 * exist when interface is down, ingress policer rules are stored 1638 * and configured in hardware after all receive queues are allocated 1639 * in otx2_open. 1640 */ 1641 list_for_each_entry(node, &flow_cfg->flow_list_tc, list) { 1642 if (node->is_act_police) 1643 otx2_tc_config_ingress_rule(nic, node); 1644 } 1645 } 1646 EXPORT_SYMBOL(otx2_tc_apply_ingress_police_rules); 1647