1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2021 Marvell. 5 * 6 */ 7 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/inetdevice.h> 11 #include <linux/rhashtable.h> 12 #include <linux/bitfield.h> 13 #include <net/flow_dissector.h> 14 #include <net/pkt_cls.h> 15 #include <net/tc_act/tc_gact.h> 16 #include <net/tc_act/tc_mirred.h> 17 #include <net/tc_act/tc_vlan.h> 18 #include <net/ipv6.h> 19 20 #include "cn10k.h" 21 #include "otx2_common.h" 22 #include "qos.h" 23 24 #define CN10K_MAX_BURST_MANTISSA 0x7FFFULL 25 #define CN10K_MAX_BURST_SIZE 8453888ULL 26 27 #define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29) 28 #define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44) 29 30 #define OTX2_UNSUPP_LSE_DEPTH GENMASK(6, 4) 31 32 #define MCAST_INVALID_GRP (-1U) 33 34 struct otx2_tc_flow_stats { 35 u64 bytes; 36 u64 pkts; 37 u64 used; 38 }; 39 40 struct otx2_tc_flow { 41 struct list_head list; 42 unsigned long cookie; 43 struct rcu_head rcu; 44 struct otx2_tc_flow_stats stats; 45 spinlock_t lock; /* lock for stats */ 46 u16 rq; 47 u16 entry; 48 u16 leaf_profile; 49 bool is_act_police; 50 u32 prio; 51 struct npc_install_flow_req req; 52 u32 mcast_grp_idx; 53 u64 rate; 54 u32 burst; 55 bool is_pps; 56 }; 57 58 static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst, 59 u32 *burst_exp, u32 *burst_mantissa) 60 { 61 int max_burst, max_mantissa; 62 unsigned int tmp; 63 64 if (is_dev_otx2(nic->pdev)) { 65 max_burst = MAX_BURST_SIZE; 66 max_mantissa = MAX_BURST_MANTISSA; 67 } else { 68 max_burst = CN10K_MAX_BURST_SIZE; 69 max_mantissa = CN10K_MAX_BURST_MANTISSA; 70 } 71 72 /* Burst is calculated as 73 * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256 74 * Max supported burst size is 130,816 bytes. 75 */ 76 burst = min_t(u32, burst, max_burst); 77 if (burst) { 78 *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0; 79 tmp = burst - rounddown_pow_of_two(burst); 80 if (burst < max_mantissa) 81 *burst_mantissa = tmp * 2; 82 else 83 *burst_mantissa = tmp / (1ULL << (*burst_exp - 7)); 84 } else { 85 *burst_exp = MAX_BURST_EXPONENT; 86 *burst_mantissa = max_mantissa; 87 } 88 } 89 90 static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp, 91 u32 *mantissa, u32 *div_exp) 92 { 93 u64 tmp; 94 95 /* Rate calculation by hardware 96 * 97 * PIR_ADD = ((256 + mantissa) << exp) / 256 98 * rate = (2 * PIR_ADD) / ( 1 << div_exp) 99 * The resultant rate is in Mbps. 100 */ 101 102 /* 2Mbps to 100Gbps can be expressed with div_exp = 0. 103 * Setting this to '0' will ease the calculation of 104 * exponent and mantissa. 105 */ 106 *div_exp = 0; 107 108 if (maxrate) { 109 *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0; 110 tmp = maxrate - rounddown_pow_of_two(maxrate); 111 if (maxrate < MAX_RATE_MANTISSA) 112 *mantissa = tmp * 2; 113 else 114 *mantissa = tmp / (1ULL << (*exp - 7)); 115 } else { 116 /* Instead of disabling rate limiting, set all values to max */ 117 *exp = MAX_RATE_EXPONENT; 118 *mantissa = MAX_RATE_MANTISSA; 119 } 120 } 121 122 u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic, 123 u64 maxrate, u32 burst) 124 { 125 u32 burst_exp, burst_mantissa; 126 u32 exp, mantissa, div_exp; 127 u64 regval = 0; 128 129 /* Get exponent and mantissa values from the desired rate */ 130 otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa); 131 otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp); 132 133 if (is_dev_otx2(nic->pdev)) { 134 regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) | 135 FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) | 136 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 137 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 138 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 139 } else { 140 regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) | 141 FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) | 142 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 143 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 144 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 145 } 146 147 return regval; 148 } 149 150 static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, 151 u32 burst, u64 maxrate) 152 { 153 struct otx2_hw *hw = &nic->hw; 154 struct nix_txschq_config *req; 155 int txschq, err; 156 157 /* All SQs share the same TL4, so pick the first scheduler */ 158 txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0]; 159 160 mutex_lock(&nic->mbox.lock); 161 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox); 162 if (!req) { 163 mutex_unlock(&nic->mbox.lock); 164 return -ENOMEM; 165 } 166 167 req->lvl = NIX_TXSCH_LVL_TL4; 168 req->num_regs = 1; 169 req->reg[0] = NIX_AF_TL4X_PIR(txschq); 170 req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst); 171 172 err = otx2_sync_mbox_msg(&nic->mbox); 173 mutex_unlock(&nic->mbox.lock); 174 return err; 175 } 176 177 static int otx2_tc_validate_flow(struct otx2_nic *nic, 178 struct flow_action *actions, 179 struct netlink_ext_ack *extack) 180 { 181 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 182 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 183 return -EINVAL; 184 } 185 186 if (!flow_action_has_entries(actions)) { 187 NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action"); 188 return -EINVAL; 189 } 190 191 if (!flow_offload_has_one_action(actions)) { 192 NL_SET_ERR_MSG_MOD(extack, 193 "Egress MATCHALL offload supports only 1 policing action"); 194 return -EINVAL; 195 } 196 return 0; 197 } 198 199 static int otx2_policer_validate(const struct flow_action *action, 200 const struct flow_action_entry *act, 201 struct netlink_ext_ack *extack) 202 { 203 if (act->police.exceed.act_id != FLOW_ACTION_DROP) { 204 NL_SET_ERR_MSG_MOD(extack, 205 "Offload not supported when exceed action is not drop"); 206 return -EOPNOTSUPP; 207 } 208 209 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && 210 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { 211 NL_SET_ERR_MSG_MOD(extack, 212 "Offload not supported when conform action is not pipe or ok"); 213 return -EOPNOTSUPP; 214 } 215 216 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && 217 !flow_action_is_last_entry(action, act)) { 218 NL_SET_ERR_MSG_MOD(extack, 219 "Offload not supported when conform action is ok, but action is not last"); 220 return -EOPNOTSUPP; 221 } 222 223 if (act->police.peakrate_bytes_ps || 224 act->police.avrate || act->police.overhead) { 225 NL_SET_ERR_MSG_MOD(extack, 226 "Offload not supported when peakrate/avrate/overhead is configured"); 227 return -EOPNOTSUPP; 228 } 229 230 return 0; 231 } 232 233 static int otx2_tc_egress_matchall_install(struct otx2_nic *nic, 234 struct tc_cls_matchall_offload *cls) 235 { 236 struct netlink_ext_ack *extack = cls->common.extack; 237 struct flow_action *actions = &cls->rule->action; 238 struct flow_action_entry *entry; 239 int err; 240 241 err = otx2_tc_validate_flow(nic, actions, extack); 242 if (err) 243 return err; 244 245 if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) { 246 NL_SET_ERR_MSG_MOD(extack, 247 "Only one Egress MATCHALL ratelimiter can be offloaded"); 248 return -ENOMEM; 249 } 250 251 entry = &cls->rule->action.entries[0]; 252 switch (entry->id) { 253 case FLOW_ACTION_POLICE: 254 err = otx2_policer_validate(&cls->rule->action, entry, extack); 255 if (err) 256 return err; 257 258 if (entry->police.rate_pkt_ps) { 259 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); 260 return -EOPNOTSUPP; 261 } 262 err = otx2_set_matchall_egress_rate(nic, entry->police.burst, 263 otx2_convert_rate(entry->police.rate_bytes_ps)); 264 if (err) 265 return err; 266 nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 267 break; 268 default: 269 NL_SET_ERR_MSG_MOD(extack, 270 "Only police action is supported with Egress MATCHALL offload"); 271 return -EOPNOTSUPP; 272 } 273 274 return 0; 275 } 276 277 static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic, 278 struct tc_cls_matchall_offload *cls) 279 { 280 struct netlink_ext_ack *extack = cls->common.extack; 281 int err; 282 283 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 284 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 285 return -EINVAL; 286 } 287 288 err = otx2_set_matchall_egress_rate(nic, 0, 0); 289 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 290 return err; 291 } 292 293 static int otx2_tc_act_set_hw_police(struct otx2_nic *nic, 294 struct otx2_tc_flow *node) 295 { 296 int rc; 297 298 mutex_lock(&nic->mbox.lock); 299 300 rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile); 301 if (rc) { 302 mutex_unlock(&nic->mbox.lock); 303 return rc; 304 } 305 306 rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, 307 node->burst, node->rate, node->is_pps); 308 if (rc) 309 goto free_leaf; 310 311 rc = cn10k_map_unmap_rq_policer(nic, node->rq, node->leaf_profile, true); 312 if (rc) 313 goto free_leaf; 314 315 mutex_unlock(&nic->mbox.lock); 316 317 return 0; 318 319 free_leaf: 320 if (cn10k_free_leaf_profile(nic, node->leaf_profile)) 321 netdev_err(nic->netdev, 322 "Unable to free leaf bandwidth profile(%d)\n", 323 node->leaf_profile); 324 mutex_unlock(&nic->mbox.lock); 325 return rc; 326 } 327 328 static int otx2_tc_act_set_police(struct otx2_nic *nic, 329 struct otx2_tc_flow *node, 330 struct flow_cls_offload *f, 331 u64 rate, u32 burst, u32 mark, 332 struct npc_install_flow_req *req, bool pps) 333 { 334 struct netlink_ext_ack *extack = f->common.extack; 335 struct otx2_hw *hw = &nic->hw; 336 int rq_idx, rc; 337 338 rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues); 339 if (rq_idx >= hw->rx_queues) { 340 NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded"); 341 return -EINVAL; 342 } 343 344 req->match_id = mark & 0xFFFFULL; 345 req->index = rq_idx; 346 req->op = NIX_RX_ACTIONOP_UCAST; 347 348 node->is_act_police = true; 349 node->rq = rq_idx; 350 node->burst = burst; 351 node->rate = rate; 352 node->is_pps = pps; 353 354 rc = otx2_tc_act_set_hw_police(nic, node); 355 if (!rc) 356 set_bit(rq_idx, &nic->rq_bmap); 357 358 return rc; 359 } 360 361 static int otx2_tc_update_mcast(struct otx2_nic *nic, 362 struct npc_install_flow_req *req, 363 struct netlink_ext_ack *extack, 364 struct otx2_tc_flow *node, 365 struct nix_mcast_grp_update_req *ureq, 366 u8 num_intf) 367 { 368 struct nix_mcast_grp_update_req *grp_update_req; 369 struct nix_mcast_grp_create_req *creq; 370 struct nix_mcast_grp_create_rsp *crsp; 371 u32 grp_index; 372 int rc; 373 374 mutex_lock(&nic->mbox.lock); 375 creq = otx2_mbox_alloc_msg_nix_mcast_grp_create(&nic->mbox); 376 if (!creq) { 377 rc = -ENOMEM; 378 goto error; 379 } 380 381 creq->dir = NIX_MCAST_INGRESS; 382 /* Send message to AF */ 383 rc = otx2_sync_mbox_msg(&nic->mbox); 384 if (rc) { 385 NL_SET_ERR_MSG_MOD(extack, "Failed to create multicast group"); 386 goto error; 387 } 388 389 crsp = (struct nix_mcast_grp_create_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox, 390 0, 391 &creq->hdr); 392 if (IS_ERR(crsp)) { 393 rc = PTR_ERR(crsp); 394 goto error; 395 } 396 397 grp_index = crsp->mcast_grp_idx; 398 grp_update_req = otx2_mbox_alloc_msg_nix_mcast_grp_update(&nic->mbox); 399 if (!grp_update_req) { 400 NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group"); 401 rc = -ENOMEM; 402 goto error; 403 } 404 405 ureq->op = NIX_MCAST_OP_ADD_ENTRY; 406 ureq->mcast_grp_idx = grp_index; 407 ureq->num_mce_entry = num_intf; 408 ureq->pcifunc[0] = nic->pcifunc; 409 ureq->channel[0] = nic->hw.tx_chan_base; 410 411 ureq->dest_type[0] = NIX_RX_RSS; 412 ureq->rq_rss_index[0] = 0; 413 memcpy(&ureq->hdr, &grp_update_req->hdr, sizeof(struct mbox_msghdr)); 414 memcpy(grp_update_req, ureq, sizeof(struct nix_mcast_grp_update_req)); 415 416 /* Send message to AF */ 417 rc = otx2_sync_mbox_msg(&nic->mbox); 418 if (rc) { 419 NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group"); 420 goto error; 421 } 422 423 mutex_unlock(&nic->mbox.lock); 424 req->op = NIX_RX_ACTIONOP_MCAST; 425 req->index = grp_index; 426 node->mcast_grp_idx = grp_index; 427 return 0; 428 429 error: 430 mutex_unlock(&nic->mbox.lock); 431 return rc; 432 } 433 434 static int otx2_tc_parse_actions(struct otx2_nic *nic, 435 struct flow_action *flow_action, 436 struct npc_install_flow_req *req, 437 struct flow_cls_offload *f, 438 struct otx2_tc_flow *node) 439 { 440 struct nix_mcast_grp_update_req dummy_grp_update_req = { 0 }; 441 struct netlink_ext_ack *extack = f->common.extack; 442 bool pps = false, mcast = false; 443 struct flow_action_entry *act; 444 struct net_device *target; 445 struct otx2_nic *priv; 446 struct rep_dev *rdev; 447 u32 burst, mark = 0; 448 u8 nr_police = 0; 449 u8 num_intf = 1; 450 int err, i; 451 u64 rate; 452 453 if (!flow_action_has_entries(flow_action)) { 454 NL_SET_ERR_MSG_MOD(extack, "no tc actions specified"); 455 return -EINVAL; 456 } 457 458 flow_action_for_each(i, act, flow_action) { 459 switch (act->id) { 460 case FLOW_ACTION_DROP: 461 req->op = NIX_RX_ACTIONOP_DROP; 462 return 0; 463 case FLOW_ACTION_ACCEPT: 464 req->op = NIX_RX_ACTION_DEFAULT; 465 return 0; 466 case FLOW_ACTION_REDIRECT_INGRESS: 467 target = act->dev; 468 if (target->dev.parent) { 469 priv = netdev_priv(target); 470 if (rvu_get_pf(nic->pdev, nic->pcifunc) != 471 rvu_get_pf(nic->pdev, priv->pcifunc)) { 472 NL_SET_ERR_MSG_MOD(extack, 473 "can't redirect to other pf/vf"); 474 return -EOPNOTSUPP; 475 } 476 req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK; 477 } else { 478 rdev = netdev_priv(target); 479 req->vf = rdev->pcifunc & RVU_PFVF_FUNC_MASK; 480 } 481 482 /* if op is already set; avoid overwriting the same */ 483 if (!req->op) 484 req->op = NIX_RX_ACTION_DEFAULT; 485 break; 486 487 case FLOW_ACTION_VLAN_POP: 488 req->vtag0_valid = true; 489 /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */ 490 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7; 491 break; 492 case FLOW_ACTION_POLICE: 493 /* Ingress ratelimiting is not supported on OcteonTx2 */ 494 if (is_dev_otx2(nic->pdev)) { 495 NL_SET_ERR_MSG_MOD(extack, 496 "Ingress policing not supported on this platform"); 497 return -EOPNOTSUPP; 498 } 499 500 err = otx2_policer_validate(flow_action, act, extack); 501 if (err) 502 return err; 503 504 if (act->police.rate_bytes_ps > 0) { 505 rate = act->police.rate_bytes_ps * 8; 506 burst = act->police.burst; 507 } else if (act->police.rate_pkt_ps > 0) { 508 /* The algorithm used to calculate rate 509 * mantissa, exponent values for a given token 510 * rate (token can be byte or packet) requires 511 * token rate to be mutiplied by 8. 512 */ 513 rate = act->police.rate_pkt_ps * 8; 514 burst = act->police.burst_pkt; 515 pps = true; 516 } 517 nr_police++; 518 break; 519 case FLOW_ACTION_MARK: 520 if (act->mark & ~OTX2_RX_MATCH_ID_MASK) { 521 NL_SET_ERR_MSG_MOD(extack, "Bad flow mark, only 16 bit supported"); 522 return -EOPNOTSUPP; 523 } 524 mark = act->mark; 525 req->match_id = mark & OTX2_RX_MATCH_ID_MASK; 526 req->op = NIX_RX_ACTION_DEFAULT; 527 nic->flags |= OTX2_FLAG_TC_MARK_ENABLED; 528 refcount_inc(&nic->flow_cfg->mark_flows); 529 break; 530 531 case FLOW_ACTION_RX_QUEUE_MAPPING: 532 req->op = NIX_RX_ACTIONOP_UCAST; 533 req->index = act->rx_queue; 534 break; 535 536 case FLOW_ACTION_MIRRED_INGRESS: 537 target = act->dev; 538 priv = netdev_priv(target); 539 dummy_grp_update_req.pcifunc[num_intf] = priv->pcifunc; 540 dummy_grp_update_req.channel[num_intf] = priv->hw.tx_chan_base; 541 dummy_grp_update_req.dest_type[num_intf] = NIX_RX_RSS; 542 dummy_grp_update_req.rq_rss_index[num_intf] = 0; 543 mcast = true; 544 num_intf++; 545 break; 546 547 default: 548 return -EOPNOTSUPP; 549 } 550 } 551 552 if (mcast) { 553 err = otx2_tc_update_mcast(nic, req, extack, node, 554 &dummy_grp_update_req, 555 num_intf); 556 if (err) 557 return err; 558 } 559 560 if (nr_police > 1) { 561 NL_SET_ERR_MSG_MOD(extack, 562 "rate limit police offload requires a single action"); 563 return -EOPNOTSUPP; 564 } 565 566 if (nr_police) 567 return otx2_tc_act_set_police(nic, node, f, rate, burst, 568 mark, req, pps); 569 570 return 0; 571 } 572 573 static int otx2_tc_process_vlan(struct otx2_nic *nic, struct flow_msg *flow_spec, 574 struct flow_msg *flow_mask, struct flow_rule *rule, 575 struct npc_install_flow_req *req, bool is_inner) 576 { 577 struct flow_match_vlan match; 578 u16 vlan_tci, vlan_tci_mask; 579 580 if (is_inner) 581 flow_rule_match_cvlan(rule, &match); 582 else 583 flow_rule_match_vlan(rule, &match); 584 585 if (!eth_type_vlan(match.key->vlan_tpid)) { 586 netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n", 587 ntohs(match.key->vlan_tpid)); 588 return -EOPNOTSUPP; 589 } 590 591 if (!match.mask->vlan_id) { 592 struct flow_action_entry *act; 593 int i; 594 595 flow_action_for_each(i, act, &rule->action) { 596 if (act->id == FLOW_ACTION_DROP) { 597 netdev_err(nic->netdev, 598 "vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n", 599 ntohs(match.key->vlan_tpid), match.key->vlan_id); 600 return -EOPNOTSUPP; 601 } 602 } 603 } 604 605 if (match.mask->vlan_id || 606 match.mask->vlan_dei || 607 match.mask->vlan_priority) { 608 vlan_tci = match.key->vlan_id | 609 match.key->vlan_dei << 12 | 610 match.key->vlan_priority << 13; 611 612 vlan_tci_mask = match.mask->vlan_id | 613 match.mask->vlan_dei << 12 | 614 match.mask->vlan_priority << 13; 615 if (is_inner) { 616 flow_spec->vlan_itci = htons(vlan_tci); 617 flow_mask->vlan_itci = htons(vlan_tci_mask); 618 req->features |= BIT_ULL(NPC_INNER_VID); 619 } else { 620 flow_spec->vlan_tci = htons(vlan_tci); 621 flow_mask->vlan_tci = htons(vlan_tci_mask); 622 req->features |= BIT_ULL(NPC_OUTER_VID); 623 } 624 } 625 626 return 0; 627 } 628 629 static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, 630 struct flow_cls_offload *f, 631 struct npc_install_flow_req *req) 632 { 633 struct netlink_ext_ack *extack = f->common.extack; 634 struct flow_msg *flow_spec = &req->packet; 635 struct flow_msg *flow_mask = &req->mask; 636 struct flow_dissector *dissector; 637 struct flow_rule *rule; 638 u8 ip_proto = 0; 639 640 rule = flow_cls_offload_flow_rule(f); 641 dissector = rule->match.dissector; 642 643 if ((dissector->used_keys & 644 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | 645 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | 646 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 647 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | 648 BIT(FLOW_DISSECTOR_KEY_CVLAN) | 649 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 650 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 651 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | 652 BIT(FLOW_DISSECTOR_KEY_IPSEC) | 653 BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) | 654 BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) | 655 BIT_ULL(FLOW_DISSECTOR_KEY_TCP) | 656 BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) { 657 netdev_info(nic->netdev, "unsupported flow used key 0x%llx", 658 dissector->used_keys); 659 return -EOPNOTSUPP; 660 } 661 662 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 663 struct flow_match_basic match; 664 665 flow_rule_match_basic(rule, &match); 666 667 /* All EtherTypes can be matched, no hw limitation */ 668 flow_spec->etype = match.key->n_proto; 669 flow_mask->etype = match.mask->n_proto; 670 req->features |= BIT_ULL(NPC_ETYPE); 671 672 if (match.mask->ip_proto && 673 (match.key->ip_proto != IPPROTO_TCP && 674 match.key->ip_proto != IPPROTO_UDP && 675 match.key->ip_proto != IPPROTO_SCTP && 676 match.key->ip_proto != IPPROTO_ICMP && 677 match.key->ip_proto != IPPROTO_ESP && 678 match.key->ip_proto != IPPROTO_AH && 679 match.key->ip_proto != IPPROTO_ICMPV6)) { 680 netdev_info(nic->netdev, 681 "ip_proto=0x%x not supported\n", 682 match.key->ip_proto); 683 return -EOPNOTSUPP; 684 } 685 if (match.mask->ip_proto) 686 ip_proto = match.key->ip_proto; 687 688 if (ip_proto == IPPROTO_UDP) 689 req->features |= BIT_ULL(NPC_IPPROTO_UDP); 690 else if (ip_proto == IPPROTO_TCP) 691 req->features |= BIT_ULL(NPC_IPPROTO_TCP); 692 else if (ip_proto == IPPROTO_SCTP) 693 req->features |= BIT_ULL(NPC_IPPROTO_SCTP); 694 else if (ip_proto == IPPROTO_ICMP) 695 req->features |= BIT_ULL(NPC_IPPROTO_ICMP); 696 else if (ip_proto == IPPROTO_ICMPV6) 697 req->features |= BIT_ULL(NPC_IPPROTO_ICMP6); 698 else if (ip_proto == IPPROTO_ESP) 699 req->features |= BIT_ULL(NPC_IPPROTO_ESP); 700 else if (ip_proto == IPPROTO_AH) 701 req->features |= BIT_ULL(NPC_IPPROTO_AH); 702 } 703 704 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 705 struct flow_match_control match; 706 u32 val; 707 708 flow_rule_match_control(rule, &match); 709 710 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { 711 val = match.key->flags & FLOW_DIS_IS_FRAGMENT; 712 if (ntohs(flow_spec->etype) == ETH_P_IP) { 713 flow_spec->ip_flag = val ? IPV4_FLAG_MORE : 0; 714 flow_mask->ip_flag = IPV4_FLAG_MORE; 715 req->features |= BIT_ULL(NPC_IPFRAG_IPV4); 716 } else if (ntohs(flow_spec->etype) == ETH_P_IPV6) { 717 flow_spec->next_header = val ? 718 IPPROTO_FRAGMENT : 0; 719 flow_mask->next_header = 0xff; 720 req->features |= BIT_ULL(NPC_IPFRAG_IPV6); 721 } else { 722 NL_SET_ERR_MSG_MOD(extack, "flow-type should be either IPv4 and IPv6"); 723 return -EOPNOTSUPP; 724 } 725 } 726 727 if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT, 728 match.mask->flags, extack)) 729 return -EOPNOTSUPP; 730 } 731 732 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 733 struct flow_match_eth_addrs match; 734 735 flow_rule_match_eth_addrs(rule, &match); 736 if (!is_zero_ether_addr(match.mask->src)) { 737 NL_SET_ERR_MSG_MOD(extack, "src mac match not supported"); 738 return -EOPNOTSUPP; 739 } 740 741 if (!is_zero_ether_addr(match.mask->dst)) { 742 ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst); 743 ether_addr_copy(flow_mask->dmac, 744 (u8 *)&match.mask->dst); 745 req->features |= BIT_ULL(NPC_DMAC); 746 } 747 } 748 749 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPSEC)) { 750 struct flow_match_ipsec match; 751 752 flow_rule_match_ipsec(rule, &match); 753 if (!match.mask->spi) { 754 NL_SET_ERR_MSG_MOD(extack, "spi index not specified"); 755 return -EOPNOTSUPP; 756 } 757 if (ip_proto != IPPROTO_ESP && 758 ip_proto != IPPROTO_AH) { 759 NL_SET_ERR_MSG_MOD(extack, 760 "SPI index is valid only for ESP/AH proto"); 761 return -EOPNOTSUPP; 762 } 763 764 flow_spec->spi = match.key->spi; 765 flow_mask->spi = match.mask->spi; 766 req->features |= BIT_ULL(NPC_IPSEC_SPI); 767 } 768 769 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 770 struct flow_match_ip match; 771 772 flow_rule_match_ip(rule, &match); 773 if ((ntohs(flow_spec->etype) != ETH_P_IP) && 774 match.mask->tos) { 775 NL_SET_ERR_MSG_MOD(extack, "tos not supported"); 776 return -EOPNOTSUPP; 777 } 778 if (match.mask->ttl) { 779 NL_SET_ERR_MSG_MOD(extack, "ttl not supported"); 780 return -EOPNOTSUPP; 781 } 782 flow_spec->tos = match.key->tos; 783 flow_mask->tos = match.mask->tos; 784 req->features |= BIT_ULL(NPC_TOS); 785 } 786 787 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 788 int ret; 789 790 ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, false); 791 if (ret) 792 return ret; 793 } 794 795 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { 796 int ret; 797 798 ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, true); 799 if (ret) 800 return ret; 801 } 802 803 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { 804 struct flow_match_ipv4_addrs match; 805 806 flow_rule_match_ipv4_addrs(rule, &match); 807 808 flow_spec->ip4dst = match.key->dst; 809 flow_mask->ip4dst = match.mask->dst; 810 req->features |= BIT_ULL(NPC_DIP_IPV4); 811 812 flow_spec->ip4src = match.key->src; 813 flow_mask->ip4src = match.mask->src; 814 req->features |= BIT_ULL(NPC_SIP_IPV4); 815 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { 816 struct flow_match_ipv6_addrs match; 817 818 flow_rule_match_ipv6_addrs(rule, &match); 819 820 if (ipv6_addr_loopback(&match.key->dst) || 821 ipv6_addr_loopback(&match.key->src)) { 822 NL_SET_ERR_MSG_MOD(extack, 823 "Flow matching IPv6 loopback addr not supported"); 824 return -EOPNOTSUPP; 825 } 826 827 if (!ipv6_addr_any(&match.mask->dst)) { 828 memcpy(&flow_spec->ip6dst, 829 (struct in6_addr *)&match.key->dst, 830 sizeof(flow_spec->ip6dst)); 831 memcpy(&flow_mask->ip6dst, 832 (struct in6_addr *)&match.mask->dst, 833 sizeof(flow_spec->ip6dst)); 834 req->features |= BIT_ULL(NPC_DIP_IPV6); 835 } 836 837 if (!ipv6_addr_any(&match.mask->src)) { 838 memcpy(&flow_spec->ip6src, 839 (struct in6_addr *)&match.key->src, 840 sizeof(flow_spec->ip6src)); 841 memcpy(&flow_mask->ip6src, 842 (struct in6_addr *)&match.mask->src, 843 sizeof(flow_spec->ip6src)); 844 req->features |= BIT_ULL(NPC_SIP_IPV6); 845 } 846 } 847 848 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 849 struct flow_match_ports match; 850 851 flow_rule_match_ports(rule, &match); 852 853 flow_spec->dport = match.key->dst; 854 flow_mask->dport = match.mask->dst; 855 856 if (flow_mask->dport) { 857 if (ip_proto == IPPROTO_UDP) 858 req->features |= BIT_ULL(NPC_DPORT_UDP); 859 else if (ip_proto == IPPROTO_TCP) 860 req->features |= BIT_ULL(NPC_DPORT_TCP); 861 else if (ip_proto == IPPROTO_SCTP) 862 req->features |= BIT_ULL(NPC_DPORT_SCTP); 863 } 864 865 flow_spec->sport = match.key->src; 866 flow_mask->sport = match.mask->src; 867 868 if (flow_mask->sport) { 869 if (ip_proto == IPPROTO_UDP) 870 req->features |= BIT_ULL(NPC_SPORT_UDP); 871 else if (ip_proto == IPPROTO_TCP) 872 req->features |= BIT_ULL(NPC_SPORT_TCP); 873 else if (ip_proto == IPPROTO_SCTP) 874 req->features |= BIT_ULL(NPC_SPORT_SCTP); 875 } 876 } 877 878 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { 879 struct flow_match_tcp match; 880 881 flow_rule_match_tcp(rule, &match); 882 883 flow_spec->tcp_flags = match.key->flags; 884 flow_mask->tcp_flags = match.mask->flags; 885 req->features |= BIT_ULL(NPC_TCP_FLAGS); 886 } 887 888 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) { 889 struct flow_match_mpls match; 890 u8 bit; 891 892 flow_rule_match_mpls(rule, &match); 893 894 if (match.mask->used_lses & OTX2_UNSUPP_LSE_DEPTH) { 895 NL_SET_ERR_MSG_MOD(extack, 896 "unsupported LSE depth for MPLS match offload"); 897 return -EOPNOTSUPP; 898 } 899 900 for_each_set_bit(bit, (unsigned long *)&match.mask->used_lses, 901 FLOW_DIS_MPLS_MAX) { 902 /* check if any of the fields LABEL,TC,BOS are set */ 903 if (*((u32 *)&match.mask->ls[bit]) & 904 OTX2_FLOWER_MASK_MPLS_NON_TTL) { 905 /* Hardware will capture 4 byte MPLS header into 906 * two fields NPC_MPLSX_LBTCBOS and NPC_MPLSX_TTL. 907 * Derive the associated NPC key based on header 908 * index and offset. 909 */ 910 911 req->features |= BIT_ULL(NPC_MPLS1_LBTCBOS + 912 2 * bit); 913 flow_spec->mpls_lse[bit] = 914 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB, 915 match.key->ls[bit].mpls_label) | 916 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC, 917 match.key->ls[bit].mpls_tc) | 918 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS, 919 match.key->ls[bit].mpls_bos); 920 921 flow_mask->mpls_lse[bit] = 922 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB, 923 match.mask->ls[bit].mpls_label) | 924 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC, 925 match.mask->ls[bit].mpls_tc) | 926 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS, 927 match.mask->ls[bit].mpls_bos); 928 } 929 930 if (match.mask->ls[bit].mpls_ttl) { 931 req->features |= BIT_ULL(NPC_MPLS1_TTL + 932 2 * bit); 933 flow_spec->mpls_lse[bit] |= 934 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL, 935 match.key->ls[bit].mpls_ttl); 936 flow_mask->mpls_lse[bit] |= 937 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL, 938 match.mask->ls[bit].mpls_ttl); 939 } 940 } 941 } 942 943 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) { 944 struct flow_match_icmp match; 945 946 flow_rule_match_icmp(rule, &match); 947 948 flow_spec->icmp_type = match.key->type; 949 flow_mask->icmp_type = match.mask->type; 950 req->features |= BIT_ULL(NPC_TYPE_ICMP); 951 952 flow_spec->icmp_code = match.key->code; 953 flow_mask->icmp_code = match.mask->code; 954 req->features |= BIT_ULL(NPC_CODE_ICMP); 955 } 956 return otx2_tc_parse_actions(nic, &rule->action, req, f, node); 957 } 958 959 static void otx2_destroy_tc_flow_list(struct otx2_nic *pfvf) 960 { 961 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; 962 struct otx2_tc_flow *iter, *tmp; 963 964 if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) 965 return; 966 967 list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list_tc, list) { 968 list_del(&iter->list); 969 kfree(iter); 970 flow_cfg->nr_flows--; 971 } 972 } 973 974 static struct otx2_tc_flow *otx2_tc_get_entry_by_cookie(struct otx2_flow_config *flow_cfg, 975 unsigned long cookie) 976 { 977 struct otx2_tc_flow *tmp; 978 979 list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) { 980 if (tmp->cookie == cookie) 981 return tmp; 982 } 983 984 return NULL; 985 } 986 987 static struct otx2_tc_flow *otx2_tc_get_entry_by_index(struct otx2_flow_config *flow_cfg, 988 int index) 989 { 990 struct otx2_tc_flow *tmp; 991 int i = 0; 992 993 list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) { 994 if (i == index) 995 return tmp; 996 i++; 997 } 998 999 return NULL; 1000 } 1001 1002 static void otx2_tc_del_from_flow_list(struct otx2_flow_config *flow_cfg, 1003 struct otx2_tc_flow *node) 1004 { 1005 struct list_head *pos, *n; 1006 struct otx2_tc_flow *tmp; 1007 1008 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 1009 tmp = list_entry(pos, struct otx2_tc_flow, list); 1010 if (node == tmp) { 1011 list_del(&node->list); 1012 return; 1013 } 1014 } 1015 } 1016 1017 static int otx2_tc_add_to_flow_list(struct otx2_flow_config *flow_cfg, 1018 struct otx2_tc_flow *node) 1019 { 1020 struct list_head *pos, *n; 1021 struct otx2_tc_flow *tmp; 1022 int index = 0; 1023 1024 /* If the flow list is empty then add the new node */ 1025 if (list_empty(&flow_cfg->flow_list_tc)) { 1026 list_add(&node->list, &flow_cfg->flow_list_tc); 1027 return index; 1028 } 1029 1030 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 1031 tmp = list_entry(pos, struct otx2_tc_flow, list); 1032 if (node->prio < tmp->prio) 1033 break; 1034 index++; 1035 } 1036 1037 list_add(&node->list, pos->prev); 1038 return index; 1039 } 1040 1041 static int otx2_add_mcam_flow_entry(struct otx2_nic *nic, struct npc_install_flow_req *req) 1042 { 1043 struct npc_install_flow_req *tmp_req; 1044 int err; 1045 1046 mutex_lock(&nic->mbox.lock); 1047 tmp_req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 1048 if (!tmp_req) { 1049 mutex_unlock(&nic->mbox.lock); 1050 return -ENOMEM; 1051 } 1052 1053 memcpy(tmp_req, req, sizeof(struct npc_install_flow_req)); 1054 /* Send message to AF */ 1055 err = otx2_sync_mbox_msg(&nic->mbox); 1056 if (err) { 1057 netdev_err(nic->netdev, "Failed to install MCAM flow entry %d\n", 1058 req->entry); 1059 mutex_unlock(&nic->mbox.lock); 1060 return -EFAULT; 1061 } 1062 1063 mutex_unlock(&nic->mbox.lock); 1064 return 0; 1065 } 1066 1067 static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry, u16 *cntr_val) 1068 { 1069 struct npc_delete_flow_rsp *rsp; 1070 struct npc_delete_flow_req *req; 1071 int err; 1072 1073 mutex_lock(&nic->mbox.lock); 1074 req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox); 1075 if (!req) { 1076 mutex_unlock(&nic->mbox.lock); 1077 return -ENOMEM; 1078 } 1079 1080 req->entry = entry; 1081 1082 /* Send message to AF */ 1083 err = otx2_sync_mbox_msg(&nic->mbox); 1084 if (err) { 1085 netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n", 1086 entry); 1087 mutex_unlock(&nic->mbox.lock); 1088 return -EFAULT; 1089 } 1090 1091 if (cntr_val) { 1092 rsp = (struct npc_delete_flow_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox, 1093 0, &req->hdr); 1094 if (IS_ERR(rsp)) { 1095 netdev_err(nic->netdev, "Failed to get MCAM delete response for entry %d\n", 1096 entry); 1097 mutex_unlock(&nic->mbox.lock); 1098 return -EFAULT; 1099 } 1100 1101 *cntr_val = rsp->cntr_val; 1102 } 1103 1104 mutex_unlock(&nic->mbox.lock); 1105 return 0; 1106 } 1107 1108 static int otx2_tc_update_mcam_table_del_req(struct otx2_nic *nic, 1109 struct otx2_flow_config *flow_cfg, 1110 struct otx2_tc_flow *node) 1111 { 1112 struct list_head *pos, *n; 1113 struct otx2_tc_flow *tmp; 1114 int i = 0, index = 0; 1115 u16 cntr_val = 0; 1116 1117 /* Find and delete the entry from the list and re-install 1118 * all the entries from beginning to the index of the 1119 * deleted entry to higher mcam indexes. 1120 */ 1121 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 1122 tmp = list_entry(pos, struct otx2_tc_flow, list); 1123 if (node == tmp) { 1124 list_del(&tmp->list); 1125 break; 1126 } 1127 1128 otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val); 1129 tmp->entry++; 1130 tmp->req.entry = tmp->entry; 1131 tmp->req.cntr_val = cntr_val; 1132 index++; 1133 } 1134 1135 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 1136 if (i == index) 1137 break; 1138 1139 tmp = list_entry(pos, struct otx2_tc_flow, list); 1140 otx2_add_mcam_flow_entry(nic, &tmp->req); 1141 i++; 1142 } 1143 1144 return 0; 1145 } 1146 1147 static int otx2_tc_update_mcam_table_add_req(struct otx2_nic *nic, 1148 struct otx2_flow_config *flow_cfg, 1149 struct otx2_tc_flow *node) 1150 { 1151 int mcam_idx = flow_cfg->max_flows - flow_cfg->nr_flows - 1; 1152 struct otx2_tc_flow *tmp; 1153 int list_idx, i; 1154 u16 cntr_val = 0; 1155 1156 /* Find the index of the entry(list_idx) whose priority 1157 * is greater than the new entry and re-install all 1158 * the entries from beginning to list_idx to higher 1159 * mcam indexes. 1160 */ 1161 list_idx = otx2_tc_add_to_flow_list(flow_cfg, node); 1162 for (i = 0; i < list_idx; i++) { 1163 tmp = otx2_tc_get_entry_by_index(flow_cfg, i); 1164 if (!tmp) 1165 return -ENOMEM; 1166 1167 otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val); 1168 tmp->entry = flow_cfg->flow_ent[mcam_idx]; 1169 tmp->req.entry = tmp->entry; 1170 tmp->req.cntr_val = cntr_val; 1171 otx2_add_mcam_flow_entry(nic, &tmp->req); 1172 mcam_idx++; 1173 } 1174 1175 return mcam_idx; 1176 } 1177 1178 static int otx2_tc_update_mcam_table(struct otx2_nic *nic, 1179 struct otx2_flow_config *flow_cfg, 1180 struct otx2_tc_flow *node, 1181 bool add_req) 1182 { 1183 if (add_req) 1184 return otx2_tc_update_mcam_table_add_req(nic, flow_cfg, node); 1185 1186 return otx2_tc_update_mcam_table_del_req(nic, flow_cfg, node); 1187 } 1188 1189 static int otx2_tc_del_flow(struct otx2_nic *nic, 1190 struct flow_cls_offload *tc_flow_cmd) 1191 { 1192 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 1193 struct nix_mcast_grp_destroy_req *grp_destroy_req; 1194 struct otx2_tc_flow *flow_node; 1195 int err; 1196 1197 flow_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); 1198 if (!flow_node) { 1199 netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n", 1200 tc_flow_cmd->cookie); 1201 return -EINVAL; 1202 } 1203 1204 /* Disable TC MARK flag if they are no rules with skbedit mark action */ 1205 if (flow_node->req.match_id) 1206 if (!refcount_dec_and_test(&flow_cfg->mark_flows)) 1207 nic->flags &= ~OTX2_FLAG_TC_MARK_ENABLED; 1208 1209 if (flow_node->is_act_police) { 1210 __clear_bit(flow_node->rq, &nic->rq_bmap); 1211 1212 if (nic->flags & OTX2_FLAG_INTF_DOWN) 1213 goto free_mcam_flow; 1214 1215 mutex_lock(&nic->mbox.lock); 1216 1217 err = cn10k_map_unmap_rq_policer(nic, flow_node->rq, 1218 flow_node->leaf_profile, false); 1219 if (err) 1220 netdev_err(nic->netdev, 1221 "Unmapping RQ %d & profile %d failed\n", 1222 flow_node->rq, flow_node->leaf_profile); 1223 1224 err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile); 1225 if (err) 1226 netdev_err(nic->netdev, 1227 "Unable to free leaf bandwidth profile(%d)\n", 1228 flow_node->leaf_profile); 1229 1230 mutex_unlock(&nic->mbox.lock); 1231 } 1232 /* Remove the multicast/mirror related nodes */ 1233 if (flow_node->mcast_grp_idx != MCAST_INVALID_GRP) { 1234 mutex_lock(&nic->mbox.lock); 1235 grp_destroy_req = otx2_mbox_alloc_msg_nix_mcast_grp_destroy(&nic->mbox); 1236 grp_destroy_req->mcast_grp_idx = flow_node->mcast_grp_idx; 1237 otx2_sync_mbox_msg(&nic->mbox); 1238 mutex_unlock(&nic->mbox.lock); 1239 } 1240 1241 1242 free_mcam_flow: 1243 otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL); 1244 otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false); 1245 kfree_rcu(flow_node, rcu); 1246 flow_cfg->nr_flows--; 1247 return 0; 1248 } 1249 1250 static int otx2_tc_add_flow(struct otx2_nic *nic, 1251 struct flow_cls_offload *tc_flow_cmd) 1252 { 1253 struct netlink_ext_ack *extack = tc_flow_cmd->common.extack; 1254 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 1255 struct otx2_tc_flow *new_node, *old_node; 1256 struct npc_install_flow_req *req, dummy; 1257 int rc, err, mcam_idx; 1258 1259 if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) 1260 return -ENOMEM; 1261 1262 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 1263 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 1264 return -EINVAL; 1265 } 1266 1267 if (flow_cfg->nr_flows == flow_cfg->max_flows) { 1268 NL_SET_ERR_MSG_MOD(extack, 1269 "Free MCAM entry not available to add the flow"); 1270 return -ENOMEM; 1271 } 1272 1273 /* allocate memory for the new flow and it's node */ 1274 new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); 1275 if (!new_node) 1276 return -ENOMEM; 1277 spin_lock_init(&new_node->lock); 1278 new_node->cookie = tc_flow_cmd->cookie; 1279 new_node->prio = tc_flow_cmd->common.prio; 1280 new_node->mcast_grp_idx = MCAST_INVALID_GRP; 1281 1282 memset(&dummy, 0, sizeof(struct npc_install_flow_req)); 1283 1284 rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy); 1285 if (rc) { 1286 kfree_rcu(new_node, rcu); 1287 return rc; 1288 } 1289 1290 /* If a flow exists with the same cookie, delete it */ 1291 old_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); 1292 if (old_node) 1293 otx2_tc_del_flow(nic, tc_flow_cmd); 1294 1295 mcam_idx = otx2_tc_update_mcam_table(nic, flow_cfg, new_node, true); 1296 mutex_lock(&nic->mbox.lock); 1297 req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 1298 if (!req) { 1299 mutex_unlock(&nic->mbox.lock); 1300 rc = -ENOMEM; 1301 goto free_leaf; 1302 } 1303 1304 memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr)); 1305 memcpy(req, &dummy, sizeof(struct npc_install_flow_req)); 1306 req->channel = nic->hw.rx_chan_base; 1307 req->entry = flow_cfg->flow_ent[mcam_idx]; 1308 req->intf = NIX_INTF_RX; 1309 req->vf = nic->pcifunc; 1310 req->set_cntr = 1; 1311 new_node->entry = req->entry; 1312 1313 /* Send message to AF */ 1314 rc = otx2_sync_mbox_msg(&nic->mbox); 1315 if (rc) { 1316 NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry"); 1317 mutex_unlock(&nic->mbox.lock); 1318 goto free_leaf; 1319 } 1320 1321 mutex_unlock(&nic->mbox.lock); 1322 memcpy(&new_node->req, req, sizeof(struct npc_install_flow_req)); 1323 1324 flow_cfg->nr_flows++; 1325 return 0; 1326 1327 free_leaf: 1328 otx2_tc_del_from_flow_list(flow_cfg, new_node); 1329 kfree_rcu(new_node, rcu); 1330 if (new_node->is_act_police) { 1331 mutex_lock(&nic->mbox.lock); 1332 1333 err = cn10k_map_unmap_rq_policer(nic, new_node->rq, 1334 new_node->leaf_profile, false); 1335 if (err) 1336 netdev_err(nic->netdev, 1337 "Unmapping RQ %d & profile %d failed\n", 1338 new_node->rq, new_node->leaf_profile); 1339 err = cn10k_free_leaf_profile(nic, new_node->leaf_profile); 1340 if (err) 1341 netdev_err(nic->netdev, 1342 "Unable to free leaf bandwidth profile(%d)\n", 1343 new_node->leaf_profile); 1344 1345 __clear_bit(new_node->rq, &nic->rq_bmap); 1346 1347 mutex_unlock(&nic->mbox.lock); 1348 } 1349 1350 return rc; 1351 } 1352 1353 static int otx2_tc_get_flow_stats(struct otx2_nic *nic, 1354 struct flow_cls_offload *tc_flow_cmd) 1355 { 1356 struct npc_mcam_get_stats_req *req; 1357 struct npc_mcam_get_stats_rsp *rsp; 1358 struct otx2_tc_flow_stats *stats; 1359 struct otx2_tc_flow *flow_node; 1360 int err; 1361 1362 flow_node = otx2_tc_get_entry_by_cookie(nic->flow_cfg, tc_flow_cmd->cookie); 1363 if (!flow_node) { 1364 netdev_info(nic->netdev, "tc flow not found for cookie %lx", 1365 tc_flow_cmd->cookie); 1366 return -EINVAL; 1367 } 1368 1369 mutex_lock(&nic->mbox.lock); 1370 1371 req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox); 1372 if (!req) { 1373 mutex_unlock(&nic->mbox.lock); 1374 return -ENOMEM; 1375 } 1376 1377 req->entry = flow_node->entry; 1378 1379 err = otx2_sync_mbox_msg(&nic->mbox); 1380 if (err) { 1381 netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n", 1382 req->entry); 1383 mutex_unlock(&nic->mbox.lock); 1384 return -EFAULT; 1385 } 1386 1387 rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp 1388 (&nic->mbox.mbox, 0, &req->hdr); 1389 if (IS_ERR(rsp)) { 1390 mutex_unlock(&nic->mbox.lock); 1391 return PTR_ERR(rsp); 1392 } 1393 1394 mutex_unlock(&nic->mbox.lock); 1395 1396 if (!rsp->stat_ena) 1397 return -EINVAL; 1398 1399 stats = &flow_node->stats; 1400 1401 spin_lock(&flow_node->lock); 1402 flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0, 1403 FLOW_ACTION_HW_STATS_IMMEDIATE); 1404 stats->pkts = rsp->stat; 1405 spin_unlock(&flow_node->lock); 1406 1407 return 0; 1408 } 1409 1410 int otx2_setup_tc_cls_flower(struct otx2_nic *nic, 1411 struct flow_cls_offload *cls_flower) 1412 { 1413 switch (cls_flower->command) { 1414 case FLOW_CLS_REPLACE: 1415 return otx2_tc_add_flow(nic, cls_flower); 1416 case FLOW_CLS_DESTROY: 1417 return otx2_tc_del_flow(nic, cls_flower); 1418 case FLOW_CLS_STATS: 1419 return otx2_tc_get_flow_stats(nic, cls_flower); 1420 default: 1421 return -EOPNOTSUPP; 1422 } 1423 } 1424 EXPORT_SYMBOL(otx2_setup_tc_cls_flower); 1425 1426 static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic, 1427 struct tc_cls_matchall_offload *cls) 1428 { 1429 struct netlink_ext_ack *extack = cls->common.extack; 1430 struct flow_action *actions = &cls->rule->action; 1431 struct flow_action_entry *entry; 1432 u64 rate; 1433 int err; 1434 1435 err = otx2_tc_validate_flow(nic, actions, extack); 1436 if (err) 1437 return err; 1438 1439 if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) { 1440 NL_SET_ERR_MSG_MOD(extack, 1441 "Only one ingress MATCHALL ratelimitter can be offloaded"); 1442 return -ENOMEM; 1443 } 1444 1445 entry = &cls->rule->action.entries[0]; 1446 switch (entry->id) { 1447 case FLOW_ACTION_POLICE: 1448 /* Ingress ratelimiting is not supported on OcteonTx2 */ 1449 if (is_dev_otx2(nic->pdev)) { 1450 NL_SET_ERR_MSG_MOD(extack, 1451 "Ingress policing not supported on this platform"); 1452 return -EOPNOTSUPP; 1453 } 1454 1455 err = cn10k_alloc_matchall_ipolicer(nic); 1456 if (err) 1457 return err; 1458 1459 /* Convert to bits per second */ 1460 rate = entry->police.rate_bytes_ps * 8; 1461 err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate); 1462 if (err) 1463 return err; 1464 nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 1465 break; 1466 default: 1467 NL_SET_ERR_MSG_MOD(extack, 1468 "Only police action supported with Ingress MATCHALL offload"); 1469 return -EOPNOTSUPP; 1470 } 1471 1472 return 0; 1473 } 1474 1475 static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic, 1476 struct tc_cls_matchall_offload *cls) 1477 { 1478 struct netlink_ext_ack *extack = cls->common.extack; 1479 int err; 1480 1481 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 1482 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 1483 return -EINVAL; 1484 } 1485 1486 err = cn10k_free_matchall_ipolicer(nic); 1487 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 1488 return err; 1489 } 1490 1491 static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic, 1492 struct tc_cls_matchall_offload *cls_matchall) 1493 { 1494 switch (cls_matchall->command) { 1495 case TC_CLSMATCHALL_REPLACE: 1496 return otx2_tc_ingress_matchall_install(nic, cls_matchall); 1497 case TC_CLSMATCHALL_DESTROY: 1498 return otx2_tc_ingress_matchall_delete(nic, cls_matchall); 1499 case TC_CLSMATCHALL_STATS: 1500 default: 1501 break; 1502 } 1503 1504 return -EOPNOTSUPP; 1505 } 1506 1507 static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type, 1508 void *type_data, void *cb_priv) 1509 { 1510 struct otx2_nic *nic = cb_priv; 1511 bool ntuple; 1512 1513 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1514 return -EOPNOTSUPP; 1515 1516 ntuple = nic->netdev->features & NETIF_F_NTUPLE; 1517 switch (type) { 1518 case TC_SETUP_CLSFLOWER: 1519 if (ntuple) { 1520 netdev_warn(nic->netdev, 1521 "Can't install TC flower offload rule when NTUPLE is active"); 1522 return -EOPNOTSUPP; 1523 } 1524 1525 return otx2_setup_tc_cls_flower(nic, type_data); 1526 case TC_SETUP_CLSMATCHALL: 1527 return otx2_setup_tc_ingress_matchall(nic, type_data); 1528 default: 1529 break; 1530 } 1531 1532 return -EOPNOTSUPP; 1533 } 1534 1535 static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic, 1536 struct tc_cls_matchall_offload *cls_matchall) 1537 { 1538 switch (cls_matchall->command) { 1539 case TC_CLSMATCHALL_REPLACE: 1540 return otx2_tc_egress_matchall_install(nic, cls_matchall); 1541 case TC_CLSMATCHALL_DESTROY: 1542 return otx2_tc_egress_matchall_delete(nic, cls_matchall); 1543 case TC_CLSMATCHALL_STATS: 1544 default: 1545 break; 1546 } 1547 1548 return -EOPNOTSUPP; 1549 } 1550 1551 static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type, 1552 void *type_data, void *cb_priv) 1553 { 1554 struct otx2_nic *nic = cb_priv; 1555 1556 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1557 return -EOPNOTSUPP; 1558 1559 switch (type) { 1560 case TC_SETUP_CLSMATCHALL: 1561 return otx2_setup_tc_egress_matchall(nic, type_data); 1562 default: 1563 break; 1564 } 1565 1566 return -EOPNOTSUPP; 1567 } 1568 1569 static LIST_HEAD(otx2_block_cb_list); 1570 1571 static int otx2_setup_tc_block(struct net_device *netdev, 1572 struct flow_block_offload *f) 1573 { 1574 struct otx2_nic *nic = netdev_priv(netdev); 1575 flow_setup_cb_t *cb; 1576 bool ingress; 1577 1578 if (f->block_shared) 1579 return -EOPNOTSUPP; 1580 1581 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1582 cb = otx2_setup_tc_block_ingress_cb; 1583 ingress = true; 1584 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1585 cb = otx2_setup_tc_block_egress_cb; 1586 ingress = false; 1587 } else { 1588 return -EOPNOTSUPP; 1589 } 1590 1591 return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb, 1592 nic, nic, ingress); 1593 } 1594 1595 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, 1596 void *type_data) 1597 { 1598 switch (type) { 1599 case TC_SETUP_BLOCK: 1600 return otx2_setup_tc_block(netdev, type_data); 1601 case TC_SETUP_QDISC_HTB: 1602 return otx2_setup_tc_htb(netdev, type_data); 1603 default: 1604 return -EOPNOTSUPP; 1605 } 1606 } 1607 EXPORT_SYMBOL(otx2_setup_tc); 1608 1609 int otx2_init_tc(struct otx2_nic *nic) 1610 { 1611 /* Exclude receive queue 0 being used for police action */ 1612 set_bit(0, &nic->rq_bmap); 1613 1614 if (!nic->flow_cfg) { 1615 netdev_err(nic->netdev, 1616 "Can't init TC, nic->flow_cfg is not setup\n"); 1617 return -EINVAL; 1618 } 1619 1620 return 0; 1621 } 1622 EXPORT_SYMBOL(otx2_init_tc); 1623 1624 void otx2_shutdown_tc(struct otx2_nic *nic) 1625 { 1626 otx2_destroy_tc_flow_list(nic); 1627 } 1628 EXPORT_SYMBOL(otx2_shutdown_tc); 1629 1630 static void otx2_tc_config_ingress_rule(struct otx2_nic *nic, 1631 struct otx2_tc_flow *node) 1632 { 1633 struct npc_install_flow_req *req; 1634 1635 if (otx2_tc_act_set_hw_police(nic, node)) 1636 return; 1637 1638 mutex_lock(&nic->mbox.lock); 1639 1640 req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 1641 if (!req) 1642 goto err; 1643 1644 memcpy(req, &node->req, sizeof(struct npc_install_flow_req)); 1645 1646 if (otx2_sync_mbox_msg(&nic->mbox)) 1647 netdev_err(nic->netdev, 1648 "Failed to install MCAM flow entry for ingress rule"); 1649 err: 1650 mutex_unlock(&nic->mbox.lock); 1651 } 1652 1653 void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic) 1654 { 1655 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 1656 struct otx2_tc_flow *node; 1657 1658 /* If any ingress policer rules exist for the interface then 1659 * apply those rules. Ingress policer rules depend on bandwidth 1660 * profiles linked to the receive queues. Since no receive queues 1661 * exist when interface is down, ingress policer rules are stored 1662 * and configured in hardware after all receive queues are allocated 1663 * in otx2_open. 1664 */ 1665 list_for_each_entry(node, &flow_cfg->flow_list_tc, list) { 1666 if (node->is_act_police) 1667 otx2_tc_config_ingress_rule(nic, node); 1668 } 1669 } 1670 EXPORT_SYMBOL(otx2_tc_apply_ingress_police_rules); 1671