1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2021 Marvell. 5 * 6 */ 7 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/inetdevice.h> 11 #include <linux/rhashtable.h> 12 #include <linux/bitfield.h> 13 #include <net/flow_dissector.h> 14 #include <net/pkt_cls.h> 15 #include <net/tc_act/tc_gact.h> 16 #include <net/tc_act/tc_mirred.h> 17 #include <net/tc_act/tc_vlan.h> 18 #include <net/ipv6.h> 19 20 #include "cn10k.h" 21 #include "otx2_common.h" 22 #include "qos.h" 23 24 #define CN10K_MAX_BURST_MANTISSA 0x7FFFULL 25 #define CN10K_MAX_BURST_SIZE 8453888ULL 26 27 #define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29) 28 #define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44) 29 30 #define OTX2_UNSUPP_LSE_DEPTH GENMASK(6, 4) 31 32 #define MCAST_INVALID_GRP (-1U) 33 34 struct otx2_tc_flow_stats { 35 u64 bytes; 36 u64 pkts; 37 u64 used; 38 }; 39 40 struct otx2_tc_flow { 41 struct list_head list; 42 unsigned long cookie; 43 struct rcu_head rcu; 44 struct otx2_tc_flow_stats stats; 45 spinlock_t lock; /* lock for stats */ 46 u16 rq; 47 u16 entry; 48 u16 leaf_profile; 49 bool is_act_police; 50 u32 prio; 51 struct npc_install_flow_req req; 52 u32 mcast_grp_idx; 53 u64 rate; 54 u32 burst; 55 bool is_pps; 56 }; 57 58 static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst, 59 u32 *burst_exp, u32 *burst_mantissa) 60 { 61 int max_burst, max_mantissa; 62 unsigned int tmp; 63 64 if (is_dev_otx2(nic->pdev)) { 65 max_burst = MAX_BURST_SIZE; 66 max_mantissa = MAX_BURST_MANTISSA; 67 } else { 68 max_burst = CN10K_MAX_BURST_SIZE; 69 max_mantissa = CN10K_MAX_BURST_MANTISSA; 70 } 71 72 /* Burst is calculated as 73 * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256 74 * Max supported burst size is 130,816 bytes. 75 */ 76 burst = min_t(u32, burst, max_burst); 77 if (burst) { 78 *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0; 79 tmp = burst - rounddown_pow_of_two(burst); 80 if (burst < max_mantissa) 81 *burst_mantissa = tmp * 2; 82 else 83 *burst_mantissa = tmp / (1ULL << (*burst_exp - 7)); 84 } else { 85 *burst_exp = MAX_BURST_EXPONENT; 86 *burst_mantissa = max_mantissa; 87 } 88 } 89 90 static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp, 91 u32 *mantissa, u32 *div_exp) 92 { 93 u64 tmp; 94 95 /* Rate calculation by hardware 96 * 97 * PIR_ADD = ((256 + mantissa) << exp) / 256 98 * rate = (2 * PIR_ADD) / ( 1 << div_exp) 99 * The resultant rate is in Mbps. 100 */ 101 102 /* 2Mbps to 100Gbps can be expressed with div_exp = 0. 103 * Setting this to '0' will ease the calculation of 104 * exponent and mantissa. 105 */ 106 *div_exp = 0; 107 108 if (maxrate) { 109 *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0; 110 tmp = maxrate - rounddown_pow_of_two(maxrate); 111 if (maxrate < MAX_RATE_MANTISSA) 112 *mantissa = tmp * 2; 113 else 114 *mantissa = tmp / (1ULL << (*exp - 7)); 115 } else { 116 /* Instead of disabling rate limiting, set all values to max */ 117 *exp = MAX_RATE_EXPONENT; 118 *mantissa = MAX_RATE_MANTISSA; 119 } 120 } 121 122 u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic, 123 u64 maxrate, u32 burst) 124 { 125 u32 burst_exp, burst_mantissa; 126 u32 exp, mantissa, div_exp; 127 u64 regval = 0; 128 129 /* Get exponent and mantissa values from the desired rate */ 130 otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa); 131 otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp); 132 133 if (is_dev_otx2(nic->pdev)) { 134 regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) | 135 FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) | 136 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 137 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 138 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 139 } else { 140 regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) | 141 FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) | 142 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 143 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 144 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 145 } 146 147 return regval; 148 } 149 150 static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, 151 u32 burst, u64 maxrate) 152 { 153 struct otx2_hw *hw = &nic->hw; 154 struct nix_txschq_config *req; 155 int txschq, err; 156 157 /* All SQs share the same TL4, so pick the first scheduler */ 158 txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0]; 159 160 mutex_lock(&nic->mbox.lock); 161 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox); 162 if (!req) { 163 mutex_unlock(&nic->mbox.lock); 164 return -ENOMEM; 165 } 166 167 req->lvl = NIX_TXSCH_LVL_TL4; 168 req->num_regs = 1; 169 req->reg[0] = NIX_AF_TL4X_PIR(txschq); 170 req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst); 171 172 err = otx2_sync_mbox_msg(&nic->mbox); 173 mutex_unlock(&nic->mbox.lock); 174 return err; 175 } 176 177 static int otx2_tc_validate_flow(struct otx2_nic *nic, 178 struct flow_action *actions, 179 struct netlink_ext_ack *extack) 180 { 181 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 182 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 183 return -EINVAL; 184 } 185 186 if (!flow_action_has_entries(actions)) { 187 NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action"); 188 return -EINVAL; 189 } 190 191 if (!flow_offload_has_one_action(actions)) { 192 NL_SET_ERR_MSG_MOD(extack, 193 "Egress MATCHALL offload supports only 1 policing action"); 194 return -EINVAL; 195 } 196 return 0; 197 } 198 199 static int otx2_policer_validate(const struct flow_action *action, 200 const struct flow_action_entry *act, 201 struct netlink_ext_ack *extack) 202 { 203 if (act->police.exceed.act_id != FLOW_ACTION_DROP) { 204 NL_SET_ERR_MSG_MOD(extack, 205 "Offload not supported when exceed action is not drop"); 206 return -EOPNOTSUPP; 207 } 208 209 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && 210 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { 211 NL_SET_ERR_MSG_MOD(extack, 212 "Offload not supported when conform action is not pipe or ok"); 213 return -EOPNOTSUPP; 214 } 215 216 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && 217 !flow_action_is_last_entry(action, act)) { 218 NL_SET_ERR_MSG_MOD(extack, 219 "Offload not supported when conform action is ok, but action is not last"); 220 return -EOPNOTSUPP; 221 } 222 223 if (act->police.peakrate_bytes_ps || 224 act->police.avrate || act->police.overhead) { 225 NL_SET_ERR_MSG_MOD(extack, 226 "Offload not supported when peakrate/avrate/overhead is configured"); 227 return -EOPNOTSUPP; 228 } 229 230 return 0; 231 } 232 233 static int otx2_tc_egress_matchall_install(struct otx2_nic *nic, 234 struct tc_cls_matchall_offload *cls) 235 { 236 struct netlink_ext_ack *extack = cls->common.extack; 237 struct flow_action *actions = &cls->rule->action; 238 struct flow_action_entry *entry; 239 int err; 240 241 err = otx2_tc_validate_flow(nic, actions, extack); 242 if (err) 243 return err; 244 245 if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) { 246 NL_SET_ERR_MSG_MOD(extack, 247 "Only one Egress MATCHALL ratelimiter can be offloaded"); 248 return -ENOMEM; 249 } 250 251 entry = &cls->rule->action.entries[0]; 252 switch (entry->id) { 253 case FLOW_ACTION_POLICE: 254 err = otx2_policer_validate(&cls->rule->action, entry, extack); 255 if (err) 256 return err; 257 258 if (entry->police.rate_pkt_ps) { 259 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); 260 return -EOPNOTSUPP; 261 } 262 err = otx2_set_matchall_egress_rate(nic, entry->police.burst, 263 otx2_convert_rate(entry->police.rate_bytes_ps)); 264 if (err) 265 return err; 266 nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 267 break; 268 default: 269 NL_SET_ERR_MSG_MOD(extack, 270 "Only police action is supported with Egress MATCHALL offload"); 271 return -EOPNOTSUPP; 272 } 273 274 return 0; 275 } 276 277 static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic, 278 struct tc_cls_matchall_offload *cls) 279 { 280 struct netlink_ext_ack *extack = cls->common.extack; 281 int err; 282 283 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 284 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 285 return -EINVAL; 286 } 287 288 err = otx2_set_matchall_egress_rate(nic, 0, 0); 289 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 290 return err; 291 } 292 293 static int otx2_tc_act_set_hw_police(struct otx2_nic *nic, 294 struct otx2_tc_flow *node) 295 { 296 int rc; 297 298 mutex_lock(&nic->mbox.lock); 299 300 rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile); 301 if (rc) { 302 mutex_unlock(&nic->mbox.lock); 303 return rc; 304 } 305 306 rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, 307 node->burst, node->rate, node->is_pps); 308 if (rc) 309 goto free_leaf; 310 311 rc = cn10k_map_unmap_rq_policer(nic, node->rq, node->leaf_profile, true); 312 if (rc) 313 goto free_leaf; 314 315 mutex_unlock(&nic->mbox.lock); 316 317 return 0; 318 319 free_leaf: 320 if (cn10k_free_leaf_profile(nic, node->leaf_profile)) 321 netdev_err(nic->netdev, 322 "Unable to free leaf bandwidth profile(%d)\n", 323 node->leaf_profile); 324 mutex_unlock(&nic->mbox.lock); 325 return rc; 326 } 327 328 static int otx2_tc_act_set_police(struct otx2_nic *nic, 329 struct otx2_tc_flow *node, 330 struct flow_cls_offload *f, 331 u64 rate, u32 burst, u32 mark, 332 struct npc_install_flow_req *req, bool pps) 333 { 334 struct netlink_ext_ack *extack = f->common.extack; 335 struct otx2_hw *hw = &nic->hw; 336 int rq_idx, rc; 337 338 rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues); 339 if (rq_idx >= hw->rx_queues) { 340 NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded"); 341 return -EINVAL; 342 } 343 344 req->match_id = mark & 0xFFFFULL; 345 req->index = rq_idx; 346 req->op = NIX_RX_ACTIONOP_UCAST; 347 348 node->is_act_police = true; 349 node->rq = rq_idx; 350 node->burst = burst; 351 node->rate = rate; 352 node->is_pps = pps; 353 354 rc = otx2_tc_act_set_hw_police(nic, node); 355 if (!rc) 356 set_bit(rq_idx, &nic->rq_bmap); 357 358 return rc; 359 } 360 361 static int otx2_tc_update_mcast(struct otx2_nic *nic, 362 struct npc_install_flow_req *req, 363 struct netlink_ext_ack *extack, 364 struct otx2_tc_flow *node, 365 struct nix_mcast_grp_update_req *ureq, 366 u8 num_intf) 367 { 368 struct nix_mcast_grp_update_req *grp_update_req; 369 struct nix_mcast_grp_create_req *creq; 370 struct nix_mcast_grp_create_rsp *crsp; 371 u32 grp_index; 372 int rc; 373 374 mutex_lock(&nic->mbox.lock); 375 creq = otx2_mbox_alloc_msg_nix_mcast_grp_create(&nic->mbox); 376 if (!creq) { 377 rc = -ENOMEM; 378 goto error; 379 } 380 381 creq->dir = NIX_MCAST_INGRESS; 382 /* Send message to AF */ 383 rc = otx2_sync_mbox_msg(&nic->mbox); 384 if (rc) { 385 NL_SET_ERR_MSG_MOD(extack, "Failed to create multicast group"); 386 goto error; 387 } 388 389 crsp = (struct nix_mcast_grp_create_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox, 390 0, 391 &creq->hdr); 392 if (IS_ERR(crsp)) { 393 rc = PTR_ERR(crsp); 394 goto error; 395 } 396 397 grp_index = crsp->mcast_grp_idx; 398 grp_update_req = otx2_mbox_alloc_msg_nix_mcast_grp_update(&nic->mbox); 399 if (!grp_update_req) { 400 NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group"); 401 rc = -ENOMEM; 402 goto error; 403 } 404 405 ureq->op = NIX_MCAST_OP_ADD_ENTRY; 406 ureq->mcast_grp_idx = grp_index; 407 ureq->num_mce_entry = num_intf; 408 ureq->pcifunc[0] = nic->pcifunc; 409 ureq->channel[0] = nic->hw.tx_chan_base; 410 411 ureq->dest_type[0] = NIX_RX_RSS; 412 ureq->rq_rss_index[0] = 0; 413 memcpy(&ureq->hdr, &grp_update_req->hdr, sizeof(struct mbox_msghdr)); 414 memcpy(grp_update_req, ureq, sizeof(struct nix_mcast_grp_update_req)); 415 416 /* Send message to AF */ 417 rc = otx2_sync_mbox_msg(&nic->mbox); 418 if (rc) { 419 NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group"); 420 goto error; 421 } 422 423 mutex_unlock(&nic->mbox.lock); 424 req->op = NIX_RX_ACTIONOP_MCAST; 425 req->index = grp_index; 426 node->mcast_grp_idx = grp_index; 427 return 0; 428 429 error: 430 mutex_unlock(&nic->mbox.lock); 431 return rc; 432 } 433 434 static int otx2_tc_parse_actions(struct otx2_nic *nic, 435 struct flow_action *flow_action, 436 struct npc_install_flow_req *req, 437 struct flow_cls_offload *f, 438 struct otx2_tc_flow *node) 439 { 440 struct nix_mcast_grp_update_req dummy_grp_update_req = { 0 }; 441 struct netlink_ext_ack *extack = f->common.extack; 442 bool pps = false, mcast = false; 443 struct flow_action_entry *act; 444 struct net_device *target; 445 struct otx2_nic *priv; 446 struct rep_dev *rdev; 447 u32 burst, mark = 0; 448 u8 nr_police = 0; 449 u8 num_intf = 1; 450 int err, i; 451 u64 rate; 452 453 if (!flow_action_has_entries(flow_action)) { 454 NL_SET_ERR_MSG_MOD(extack, "no tc actions specified"); 455 return -EINVAL; 456 } 457 458 flow_action_for_each(i, act, flow_action) { 459 switch (act->id) { 460 case FLOW_ACTION_DROP: 461 req->op = NIX_RX_ACTIONOP_DROP; 462 return 0; 463 case FLOW_ACTION_ACCEPT: 464 req->op = NIX_RX_ACTION_DEFAULT; 465 return 0; 466 case FLOW_ACTION_REDIRECT_INGRESS: 467 target = act->dev; 468 if (target->dev.parent) { 469 priv = netdev_priv(target); 470 if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { 471 NL_SET_ERR_MSG_MOD(extack, 472 "can't redirect to other pf/vf"); 473 return -EOPNOTSUPP; 474 } 475 req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK; 476 } else { 477 rdev = netdev_priv(target); 478 req->vf = rdev->pcifunc & RVU_PFVF_FUNC_MASK; 479 } 480 481 /* if op is already set; avoid overwriting the same */ 482 if (!req->op) 483 req->op = NIX_RX_ACTION_DEFAULT; 484 break; 485 486 case FLOW_ACTION_VLAN_POP: 487 req->vtag0_valid = true; 488 /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */ 489 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7; 490 break; 491 case FLOW_ACTION_POLICE: 492 /* Ingress ratelimiting is not supported on OcteonTx2 */ 493 if (is_dev_otx2(nic->pdev)) { 494 NL_SET_ERR_MSG_MOD(extack, 495 "Ingress policing not supported on this platform"); 496 return -EOPNOTSUPP; 497 } 498 499 err = otx2_policer_validate(flow_action, act, extack); 500 if (err) 501 return err; 502 503 if (act->police.rate_bytes_ps > 0) { 504 rate = act->police.rate_bytes_ps * 8; 505 burst = act->police.burst; 506 } else if (act->police.rate_pkt_ps > 0) { 507 /* The algorithm used to calculate rate 508 * mantissa, exponent values for a given token 509 * rate (token can be byte or packet) requires 510 * token rate to be mutiplied by 8. 511 */ 512 rate = act->police.rate_pkt_ps * 8; 513 burst = act->police.burst_pkt; 514 pps = true; 515 } 516 nr_police++; 517 break; 518 case FLOW_ACTION_MARK: 519 if (act->mark & ~OTX2_RX_MATCH_ID_MASK) { 520 NL_SET_ERR_MSG_MOD(extack, "Bad flow mark, only 16 bit supported"); 521 return -EOPNOTSUPP; 522 } 523 mark = act->mark; 524 req->match_id = mark & OTX2_RX_MATCH_ID_MASK; 525 req->op = NIX_RX_ACTION_DEFAULT; 526 nic->flags |= OTX2_FLAG_TC_MARK_ENABLED; 527 refcount_inc(&nic->flow_cfg->mark_flows); 528 break; 529 530 case FLOW_ACTION_RX_QUEUE_MAPPING: 531 req->op = NIX_RX_ACTIONOP_UCAST; 532 req->index = act->rx_queue; 533 break; 534 535 case FLOW_ACTION_MIRRED_INGRESS: 536 target = act->dev; 537 priv = netdev_priv(target); 538 dummy_grp_update_req.pcifunc[num_intf] = priv->pcifunc; 539 dummy_grp_update_req.channel[num_intf] = priv->hw.tx_chan_base; 540 dummy_grp_update_req.dest_type[num_intf] = NIX_RX_RSS; 541 dummy_grp_update_req.rq_rss_index[num_intf] = 0; 542 mcast = true; 543 num_intf++; 544 break; 545 546 default: 547 return -EOPNOTSUPP; 548 } 549 } 550 551 if (mcast) { 552 err = otx2_tc_update_mcast(nic, req, extack, node, 553 &dummy_grp_update_req, 554 num_intf); 555 if (err) 556 return err; 557 } 558 559 if (nr_police > 1) { 560 NL_SET_ERR_MSG_MOD(extack, 561 "rate limit police offload requires a single action"); 562 return -EOPNOTSUPP; 563 } 564 565 if (nr_police) 566 return otx2_tc_act_set_police(nic, node, f, rate, burst, 567 mark, req, pps); 568 569 return 0; 570 } 571 572 static int otx2_tc_process_vlan(struct otx2_nic *nic, struct flow_msg *flow_spec, 573 struct flow_msg *flow_mask, struct flow_rule *rule, 574 struct npc_install_flow_req *req, bool is_inner) 575 { 576 struct flow_match_vlan match; 577 u16 vlan_tci, vlan_tci_mask; 578 579 if (is_inner) 580 flow_rule_match_cvlan(rule, &match); 581 else 582 flow_rule_match_vlan(rule, &match); 583 584 if (!eth_type_vlan(match.key->vlan_tpid)) { 585 netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n", 586 ntohs(match.key->vlan_tpid)); 587 return -EOPNOTSUPP; 588 } 589 590 if (!match.mask->vlan_id) { 591 struct flow_action_entry *act; 592 int i; 593 594 flow_action_for_each(i, act, &rule->action) { 595 if (act->id == FLOW_ACTION_DROP) { 596 netdev_err(nic->netdev, 597 "vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n", 598 ntohs(match.key->vlan_tpid), match.key->vlan_id); 599 return -EOPNOTSUPP; 600 } 601 } 602 } 603 604 if (match.mask->vlan_id || 605 match.mask->vlan_dei || 606 match.mask->vlan_priority) { 607 vlan_tci = match.key->vlan_id | 608 match.key->vlan_dei << 12 | 609 match.key->vlan_priority << 13; 610 611 vlan_tci_mask = match.mask->vlan_id | 612 match.mask->vlan_dei << 12 | 613 match.mask->vlan_priority << 13; 614 if (is_inner) { 615 flow_spec->vlan_itci = htons(vlan_tci); 616 flow_mask->vlan_itci = htons(vlan_tci_mask); 617 req->features |= BIT_ULL(NPC_INNER_VID); 618 } else { 619 flow_spec->vlan_tci = htons(vlan_tci); 620 flow_mask->vlan_tci = htons(vlan_tci_mask); 621 req->features |= BIT_ULL(NPC_OUTER_VID); 622 } 623 } 624 625 return 0; 626 } 627 628 static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, 629 struct flow_cls_offload *f, 630 struct npc_install_flow_req *req) 631 { 632 struct netlink_ext_ack *extack = f->common.extack; 633 struct flow_msg *flow_spec = &req->packet; 634 struct flow_msg *flow_mask = &req->mask; 635 struct flow_dissector *dissector; 636 struct flow_rule *rule; 637 u8 ip_proto = 0; 638 639 rule = flow_cls_offload_flow_rule(f); 640 dissector = rule->match.dissector; 641 642 if ((dissector->used_keys & 643 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | 644 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | 645 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 646 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | 647 BIT(FLOW_DISSECTOR_KEY_CVLAN) | 648 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 649 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 650 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | 651 BIT(FLOW_DISSECTOR_KEY_IPSEC) | 652 BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) | 653 BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) | 654 BIT_ULL(FLOW_DISSECTOR_KEY_TCP) | 655 BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) { 656 netdev_info(nic->netdev, "unsupported flow used key 0x%llx", 657 dissector->used_keys); 658 return -EOPNOTSUPP; 659 } 660 661 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 662 struct flow_match_basic match; 663 664 flow_rule_match_basic(rule, &match); 665 666 /* All EtherTypes can be matched, no hw limitation */ 667 flow_spec->etype = match.key->n_proto; 668 flow_mask->etype = match.mask->n_proto; 669 req->features |= BIT_ULL(NPC_ETYPE); 670 671 if (match.mask->ip_proto && 672 (match.key->ip_proto != IPPROTO_TCP && 673 match.key->ip_proto != IPPROTO_UDP && 674 match.key->ip_proto != IPPROTO_SCTP && 675 match.key->ip_proto != IPPROTO_ICMP && 676 match.key->ip_proto != IPPROTO_ESP && 677 match.key->ip_proto != IPPROTO_AH && 678 match.key->ip_proto != IPPROTO_ICMPV6)) { 679 netdev_info(nic->netdev, 680 "ip_proto=0x%x not supported\n", 681 match.key->ip_proto); 682 return -EOPNOTSUPP; 683 } 684 if (match.mask->ip_proto) 685 ip_proto = match.key->ip_proto; 686 687 if (ip_proto == IPPROTO_UDP) 688 req->features |= BIT_ULL(NPC_IPPROTO_UDP); 689 else if (ip_proto == IPPROTO_TCP) 690 req->features |= BIT_ULL(NPC_IPPROTO_TCP); 691 else if (ip_proto == IPPROTO_SCTP) 692 req->features |= BIT_ULL(NPC_IPPROTO_SCTP); 693 else if (ip_proto == IPPROTO_ICMP) 694 req->features |= BIT_ULL(NPC_IPPROTO_ICMP); 695 else if (ip_proto == IPPROTO_ICMPV6) 696 req->features |= BIT_ULL(NPC_IPPROTO_ICMP6); 697 else if (ip_proto == IPPROTO_ESP) 698 req->features |= BIT_ULL(NPC_IPPROTO_ESP); 699 else if (ip_proto == IPPROTO_AH) 700 req->features |= BIT_ULL(NPC_IPPROTO_AH); 701 } 702 703 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 704 struct flow_match_control match; 705 u32 val; 706 707 flow_rule_match_control(rule, &match); 708 709 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { 710 val = match.key->flags & FLOW_DIS_IS_FRAGMENT; 711 if (ntohs(flow_spec->etype) == ETH_P_IP) { 712 flow_spec->ip_flag = val ? IPV4_FLAG_MORE : 0; 713 flow_mask->ip_flag = IPV4_FLAG_MORE; 714 req->features |= BIT_ULL(NPC_IPFRAG_IPV4); 715 } else if (ntohs(flow_spec->etype) == ETH_P_IPV6) { 716 flow_spec->next_header = val ? 717 IPPROTO_FRAGMENT : 0; 718 flow_mask->next_header = 0xff; 719 req->features |= BIT_ULL(NPC_IPFRAG_IPV6); 720 } else { 721 NL_SET_ERR_MSG_MOD(extack, "flow-type should be either IPv4 and IPv6"); 722 return -EOPNOTSUPP; 723 } 724 } 725 726 if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT, 727 match.mask->flags, extack)) 728 return -EOPNOTSUPP; 729 } 730 731 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 732 struct flow_match_eth_addrs match; 733 734 flow_rule_match_eth_addrs(rule, &match); 735 if (!is_zero_ether_addr(match.mask->src)) { 736 NL_SET_ERR_MSG_MOD(extack, "src mac match not supported"); 737 return -EOPNOTSUPP; 738 } 739 740 if (!is_zero_ether_addr(match.mask->dst)) { 741 ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst); 742 ether_addr_copy(flow_mask->dmac, 743 (u8 *)&match.mask->dst); 744 req->features |= BIT_ULL(NPC_DMAC); 745 } 746 } 747 748 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPSEC)) { 749 struct flow_match_ipsec match; 750 751 flow_rule_match_ipsec(rule, &match); 752 if (!match.mask->spi) { 753 NL_SET_ERR_MSG_MOD(extack, "spi index not specified"); 754 return -EOPNOTSUPP; 755 } 756 if (ip_proto != IPPROTO_ESP && 757 ip_proto != IPPROTO_AH) { 758 NL_SET_ERR_MSG_MOD(extack, 759 "SPI index is valid only for ESP/AH proto"); 760 return -EOPNOTSUPP; 761 } 762 763 flow_spec->spi = match.key->spi; 764 flow_mask->spi = match.mask->spi; 765 req->features |= BIT_ULL(NPC_IPSEC_SPI); 766 } 767 768 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 769 struct flow_match_ip match; 770 771 flow_rule_match_ip(rule, &match); 772 if ((ntohs(flow_spec->etype) != ETH_P_IP) && 773 match.mask->tos) { 774 NL_SET_ERR_MSG_MOD(extack, "tos not supported"); 775 return -EOPNOTSUPP; 776 } 777 if (match.mask->ttl) { 778 NL_SET_ERR_MSG_MOD(extack, "ttl not supported"); 779 return -EOPNOTSUPP; 780 } 781 flow_spec->tos = match.key->tos; 782 flow_mask->tos = match.mask->tos; 783 req->features |= BIT_ULL(NPC_TOS); 784 } 785 786 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 787 int ret; 788 789 ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, false); 790 if (ret) 791 return ret; 792 } 793 794 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { 795 int ret; 796 797 ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, true); 798 if (ret) 799 return ret; 800 } 801 802 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { 803 struct flow_match_ipv4_addrs match; 804 805 flow_rule_match_ipv4_addrs(rule, &match); 806 807 flow_spec->ip4dst = match.key->dst; 808 flow_mask->ip4dst = match.mask->dst; 809 req->features |= BIT_ULL(NPC_DIP_IPV4); 810 811 flow_spec->ip4src = match.key->src; 812 flow_mask->ip4src = match.mask->src; 813 req->features |= BIT_ULL(NPC_SIP_IPV4); 814 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { 815 struct flow_match_ipv6_addrs match; 816 817 flow_rule_match_ipv6_addrs(rule, &match); 818 819 if (ipv6_addr_loopback(&match.key->dst) || 820 ipv6_addr_loopback(&match.key->src)) { 821 NL_SET_ERR_MSG_MOD(extack, 822 "Flow matching IPv6 loopback addr not supported"); 823 return -EOPNOTSUPP; 824 } 825 826 if (!ipv6_addr_any(&match.mask->dst)) { 827 memcpy(&flow_spec->ip6dst, 828 (struct in6_addr *)&match.key->dst, 829 sizeof(flow_spec->ip6dst)); 830 memcpy(&flow_mask->ip6dst, 831 (struct in6_addr *)&match.mask->dst, 832 sizeof(flow_spec->ip6dst)); 833 req->features |= BIT_ULL(NPC_DIP_IPV6); 834 } 835 836 if (!ipv6_addr_any(&match.mask->src)) { 837 memcpy(&flow_spec->ip6src, 838 (struct in6_addr *)&match.key->src, 839 sizeof(flow_spec->ip6src)); 840 memcpy(&flow_mask->ip6src, 841 (struct in6_addr *)&match.mask->src, 842 sizeof(flow_spec->ip6src)); 843 req->features |= BIT_ULL(NPC_SIP_IPV6); 844 } 845 } 846 847 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 848 struct flow_match_ports match; 849 850 flow_rule_match_ports(rule, &match); 851 852 flow_spec->dport = match.key->dst; 853 flow_mask->dport = match.mask->dst; 854 855 if (flow_mask->dport) { 856 if (ip_proto == IPPROTO_UDP) 857 req->features |= BIT_ULL(NPC_DPORT_UDP); 858 else if (ip_proto == IPPROTO_TCP) 859 req->features |= BIT_ULL(NPC_DPORT_TCP); 860 else if (ip_proto == IPPROTO_SCTP) 861 req->features |= BIT_ULL(NPC_DPORT_SCTP); 862 } 863 864 flow_spec->sport = match.key->src; 865 flow_mask->sport = match.mask->src; 866 867 if (flow_mask->sport) { 868 if (ip_proto == IPPROTO_UDP) 869 req->features |= BIT_ULL(NPC_SPORT_UDP); 870 else if (ip_proto == IPPROTO_TCP) 871 req->features |= BIT_ULL(NPC_SPORT_TCP); 872 else if (ip_proto == IPPROTO_SCTP) 873 req->features |= BIT_ULL(NPC_SPORT_SCTP); 874 } 875 } 876 877 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { 878 struct flow_match_tcp match; 879 880 flow_rule_match_tcp(rule, &match); 881 882 flow_spec->tcp_flags = match.key->flags; 883 flow_mask->tcp_flags = match.mask->flags; 884 req->features |= BIT_ULL(NPC_TCP_FLAGS); 885 } 886 887 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) { 888 struct flow_match_mpls match; 889 u8 bit; 890 891 flow_rule_match_mpls(rule, &match); 892 893 if (match.mask->used_lses & OTX2_UNSUPP_LSE_DEPTH) { 894 NL_SET_ERR_MSG_MOD(extack, 895 "unsupported LSE depth for MPLS match offload"); 896 return -EOPNOTSUPP; 897 } 898 899 for_each_set_bit(bit, (unsigned long *)&match.mask->used_lses, 900 FLOW_DIS_MPLS_MAX) { 901 /* check if any of the fields LABEL,TC,BOS are set */ 902 if (*((u32 *)&match.mask->ls[bit]) & 903 OTX2_FLOWER_MASK_MPLS_NON_TTL) { 904 /* Hardware will capture 4 byte MPLS header into 905 * two fields NPC_MPLSX_LBTCBOS and NPC_MPLSX_TTL. 906 * Derive the associated NPC key based on header 907 * index and offset. 908 */ 909 910 req->features |= BIT_ULL(NPC_MPLS1_LBTCBOS + 911 2 * bit); 912 flow_spec->mpls_lse[bit] = 913 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB, 914 match.key->ls[bit].mpls_label) | 915 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC, 916 match.key->ls[bit].mpls_tc) | 917 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS, 918 match.key->ls[bit].mpls_bos); 919 920 flow_mask->mpls_lse[bit] = 921 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB, 922 match.mask->ls[bit].mpls_label) | 923 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC, 924 match.mask->ls[bit].mpls_tc) | 925 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS, 926 match.mask->ls[bit].mpls_bos); 927 } 928 929 if (match.mask->ls[bit].mpls_ttl) { 930 req->features |= BIT_ULL(NPC_MPLS1_TTL + 931 2 * bit); 932 flow_spec->mpls_lse[bit] |= 933 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL, 934 match.key->ls[bit].mpls_ttl); 935 flow_mask->mpls_lse[bit] |= 936 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL, 937 match.mask->ls[bit].mpls_ttl); 938 } 939 } 940 } 941 942 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) { 943 struct flow_match_icmp match; 944 945 flow_rule_match_icmp(rule, &match); 946 947 flow_spec->icmp_type = match.key->type; 948 flow_mask->icmp_type = match.mask->type; 949 req->features |= BIT_ULL(NPC_TYPE_ICMP); 950 951 flow_spec->icmp_code = match.key->code; 952 flow_mask->icmp_code = match.mask->code; 953 req->features |= BIT_ULL(NPC_CODE_ICMP); 954 } 955 return otx2_tc_parse_actions(nic, &rule->action, req, f, node); 956 } 957 958 static void otx2_destroy_tc_flow_list(struct otx2_nic *pfvf) 959 { 960 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; 961 struct otx2_tc_flow *iter, *tmp; 962 963 if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) 964 return; 965 966 list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list_tc, list) { 967 list_del(&iter->list); 968 kfree(iter); 969 flow_cfg->nr_flows--; 970 } 971 } 972 973 static struct otx2_tc_flow *otx2_tc_get_entry_by_cookie(struct otx2_flow_config *flow_cfg, 974 unsigned long cookie) 975 { 976 struct otx2_tc_flow *tmp; 977 978 list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) { 979 if (tmp->cookie == cookie) 980 return tmp; 981 } 982 983 return NULL; 984 } 985 986 static struct otx2_tc_flow *otx2_tc_get_entry_by_index(struct otx2_flow_config *flow_cfg, 987 int index) 988 { 989 struct otx2_tc_flow *tmp; 990 int i = 0; 991 992 list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) { 993 if (i == index) 994 return tmp; 995 i++; 996 } 997 998 return NULL; 999 } 1000 1001 static void otx2_tc_del_from_flow_list(struct otx2_flow_config *flow_cfg, 1002 struct otx2_tc_flow *node) 1003 { 1004 struct list_head *pos, *n; 1005 struct otx2_tc_flow *tmp; 1006 1007 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 1008 tmp = list_entry(pos, struct otx2_tc_flow, list); 1009 if (node == tmp) { 1010 list_del(&node->list); 1011 return; 1012 } 1013 } 1014 } 1015 1016 static int otx2_tc_add_to_flow_list(struct otx2_flow_config *flow_cfg, 1017 struct otx2_tc_flow *node) 1018 { 1019 struct list_head *pos, *n; 1020 struct otx2_tc_flow *tmp; 1021 int index = 0; 1022 1023 /* If the flow list is empty then add the new node */ 1024 if (list_empty(&flow_cfg->flow_list_tc)) { 1025 list_add(&node->list, &flow_cfg->flow_list_tc); 1026 return index; 1027 } 1028 1029 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 1030 tmp = list_entry(pos, struct otx2_tc_flow, list); 1031 if (node->prio < tmp->prio) 1032 break; 1033 index++; 1034 } 1035 1036 list_add(&node->list, pos->prev); 1037 return index; 1038 } 1039 1040 static int otx2_add_mcam_flow_entry(struct otx2_nic *nic, struct npc_install_flow_req *req) 1041 { 1042 struct npc_install_flow_req *tmp_req; 1043 int err; 1044 1045 mutex_lock(&nic->mbox.lock); 1046 tmp_req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 1047 if (!tmp_req) { 1048 mutex_unlock(&nic->mbox.lock); 1049 return -ENOMEM; 1050 } 1051 1052 memcpy(tmp_req, req, sizeof(struct npc_install_flow_req)); 1053 /* Send message to AF */ 1054 err = otx2_sync_mbox_msg(&nic->mbox); 1055 if (err) { 1056 netdev_err(nic->netdev, "Failed to install MCAM flow entry %d\n", 1057 req->entry); 1058 mutex_unlock(&nic->mbox.lock); 1059 return -EFAULT; 1060 } 1061 1062 mutex_unlock(&nic->mbox.lock); 1063 return 0; 1064 } 1065 1066 static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry, u16 *cntr_val) 1067 { 1068 struct npc_delete_flow_rsp *rsp; 1069 struct npc_delete_flow_req *req; 1070 int err; 1071 1072 mutex_lock(&nic->mbox.lock); 1073 req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox); 1074 if (!req) { 1075 mutex_unlock(&nic->mbox.lock); 1076 return -ENOMEM; 1077 } 1078 1079 req->entry = entry; 1080 1081 /* Send message to AF */ 1082 err = otx2_sync_mbox_msg(&nic->mbox); 1083 if (err) { 1084 netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n", 1085 entry); 1086 mutex_unlock(&nic->mbox.lock); 1087 return -EFAULT; 1088 } 1089 1090 if (cntr_val) { 1091 rsp = (struct npc_delete_flow_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox, 1092 0, &req->hdr); 1093 if (IS_ERR(rsp)) { 1094 netdev_err(nic->netdev, "Failed to get MCAM delete response for entry %d\n", 1095 entry); 1096 mutex_unlock(&nic->mbox.lock); 1097 return -EFAULT; 1098 } 1099 1100 *cntr_val = rsp->cntr_val; 1101 } 1102 1103 mutex_unlock(&nic->mbox.lock); 1104 return 0; 1105 } 1106 1107 static int otx2_tc_update_mcam_table_del_req(struct otx2_nic *nic, 1108 struct otx2_flow_config *flow_cfg, 1109 struct otx2_tc_flow *node) 1110 { 1111 struct list_head *pos, *n; 1112 struct otx2_tc_flow *tmp; 1113 int i = 0, index = 0; 1114 u16 cntr_val = 0; 1115 1116 /* Find and delete the entry from the list and re-install 1117 * all the entries from beginning to the index of the 1118 * deleted entry to higher mcam indexes. 1119 */ 1120 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 1121 tmp = list_entry(pos, struct otx2_tc_flow, list); 1122 if (node == tmp) { 1123 list_del(&tmp->list); 1124 break; 1125 } 1126 1127 otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val); 1128 tmp->entry++; 1129 tmp->req.entry = tmp->entry; 1130 tmp->req.cntr_val = cntr_val; 1131 index++; 1132 } 1133 1134 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 1135 if (i == index) 1136 break; 1137 1138 tmp = list_entry(pos, struct otx2_tc_flow, list); 1139 otx2_add_mcam_flow_entry(nic, &tmp->req); 1140 i++; 1141 } 1142 1143 return 0; 1144 } 1145 1146 static int otx2_tc_update_mcam_table_add_req(struct otx2_nic *nic, 1147 struct otx2_flow_config *flow_cfg, 1148 struct otx2_tc_flow *node) 1149 { 1150 int mcam_idx = flow_cfg->max_flows - flow_cfg->nr_flows - 1; 1151 struct otx2_tc_flow *tmp; 1152 int list_idx, i; 1153 u16 cntr_val = 0; 1154 1155 /* Find the index of the entry(list_idx) whose priority 1156 * is greater than the new entry and re-install all 1157 * the entries from beginning to list_idx to higher 1158 * mcam indexes. 1159 */ 1160 list_idx = otx2_tc_add_to_flow_list(flow_cfg, node); 1161 for (i = 0; i < list_idx; i++) { 1162 tmp = otx2_tc_get_entry_by_index(flow_cfg, i); 1163 if (!tmp) 1164 return -ENOMEM; 1165 1166 otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val); 1167 tmp->entry = flow_cfg->flow_ent[mcam_idx]; 1168 tmp->req.entry = tmp->entry; 1169 tmp->req.cntr_val = cntr_val; 1170 otx2_add_mcam_flow_entry(nic, &tmp->req); 1171 mcam_idx++; 1172 } 1173 1174 return mcam_idx; 1175 } 1176 1177 static int otx2_tc_update_mcam_table(struct otx2_nic *nic, 1178 struct otx2_flow_config *flow_cfg, 1179 struct otx2_tc_flow *node, 1180 bool add_req) 1181 { 1182 if (add_req) 1183 return otx2_tc_update_mcam_table_add_req(nic, flow_cfg, node); 1184 1185 return otx2_tc_update_mcam_table_del_req(nic, flow_cfg, node); 1186 } 1187 1188 static int otx2_tc_del_flow(struct otx2_nic *nic, 1189 struct flow_cls_offload *tc_flow_cmd) 1190 { 1191 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 1192 struct nix_mcast_grp_destroy_req *grp_destroy_req; 1193 struct otx2_tc_flow *flow_node; 1194 int err; 1195 1196 flow_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); 1197 if (!flow_node) { 1198 netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n", 1199 tc_flow_cmd->cookie); 1200 return -EINVAL; 1201 } 1202 1203 /* Disable TC MARK flag if they are no rules with skbedit mark action */ 1204 if (flow_node->req.match_id) 1205 if (!refcount_dec_and_test(&flow_cfg->mark_flows)) 1206 nic->flags &= ~OTX2_FLAG_TC_MARK_ENABLED; 1207 1208 if (flow_node->is_act_police) { 1209 __clear_bit(flow_node->rq, &nic->rq_bmap); 1210 1211 if (nic->flags & OTX2_FLAG_INTF_DOWN) 1212 goto free_mcam_flow; 1213 1214 mutex_lock(&nic->mbox.lock); 1215 1216 err = cn10k_map_unmap_rq_policer(nic, flow_node->rq, 1217 flow_node->leaf_profile, false); 1218 if (err) 1219 netdev_err(nic->netdev, 1220 "Unmapping RQ %d & profile %d failed\n", 1221 flow_node->rq, flow_node->leaf_profile); 1222 1223 err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile); 1224 if (err) 1225 netdev_err(nic->netdev, 1226 "Unable to free leaf bandwidth profile(%d)\n", 1227 flow_node->leaf_profile); 1228 1229 mutex_unlock(&nic->mbox.lock); 1230 } 1231 /* Remove the multicast/mirror related nodes */ 1232 if (flow_node->mcast_grp_idx != MCAST_INVALID_GRP) { 1233 mutex_lock(&nic->mbox.lock); 1234 grp_destroy_req = otx2_mbox_alloc_msg_nix_mcast_grp_destroy(&nic->mbox); 1235 grp_destroy_req->mcast_grp_idx = flow_node->mcast_grp_idx; 1236 otx2_sync_mbox_msg(&nic->mbox); 1237 mutex_unlock(&nic->mbox.lock); 1238 } 1239 1240 1241 free_mcam_flow: 1242 otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL); 1243 otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false); 1244 kfree_rcu(flow_node, rcu); 1245 flow_cfg->nr_flows--; 1246 return 0; 1247 } 1248 1249 static int otx2_tc_add_flow(struct otx2_nic *nic, 1250 struct flow_cls_offload *tc_flow_cmd) 1251 { 1252 struct netlink_ext_ack *extack = tc_flow_cmd->common.extack; 1253 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 1254 struct otx2_tc_flow *new_node, *old_node; 1255 struct npc_install_flow_req *req, dummy; 1256 int rc, err, mcam_idx; 1257 1258 if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) 1259 return -ENOMEM; 1260 1261 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 1262 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 1263 return -EINVAL; 1264 } 1265 1266 if (flow_cfg->nr_flows == flow_cfg->max_flows) { 1267 NL_SET_ERR_MSG_MOD(extack, 1268 "Free MCAM entry not available to add the flow"); 1269 return -ENOMEM; 1270 } 1271 1272 /* allocate memory for the new flow and it's node */ 1273 new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); 1274 if (!new_node) 1275 return -ENOMEM; 1276 spin_lock_init(&new_node->lock); 1277 new_node->cookie = tc_flow_cmd->cookie; 1278 new_node->prio = tc_flow_cmd->common.prio; 1279 new_node->mcast_grp_idx = MCAST_INVALID_GRP; 1280 1281 memset(&dummy, 0, sizeof(struct npc_install_flow_req)); 1282 1283 rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy); 1284 if (rc) { 1285 kfree_rcu(new_node, rcu); 1286 return rc; 1287 } 1288 1289 /* If a flow exists with the same cookie, delete it */ 1290 old_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); 1291 if (old_node) 1292 otx2_tc_del_flow(nic, tc_flow_cmd); 1293 1294 mcam_idx = otx2_tc_update_mcam_table(nic, flow_cfg, new_node, true); 1295 mutex_lock(&nic->mbox.lock); 1296 req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 1297 if (!req) { 1298 mutex_unlock(&nic->mbox.lock); 1299 rc = -ENOMEM; 1300 goto free_leaf; 1301 } 1302 1303 memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr)); 1304 memcpy(req, &dummy, sizeof(struct npc_install_flow_req)); 1305 req->channel = nic->hw.rx_chan_base; 1306 req->entry = flow_cfg->flow_ent[mcam_idx]; 1307 req->intf = NIX_INTF_RX; 1308 req->vf = nic->pcifunc; 1309 req->set_cntr = 1; 1310 new_node->entry = req->entry; 1311 1312 /* Send message to AF */ 1313 rc = otx2_sync_mbox_msg(&nic->mbox); 1314 if (rc) { 1315 NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry"); 1316 mutex_unlock(&nic->mbox.lock); 1317 goto free_leaf; 1318 } 1319 1320 mutex_unlock(&nic->mbox.lock); 1321 memcpy(&new_node->req, req, sizeof(struct npc_install_flow_req)); 1322 1323 flow_cfg->nr_flows++; 1324 return 0; 1325 1326 free_leaf: 1327 otx2_tc_del_from_flow_list(flow_cfg, new_node); 1328 kfree_rcu(new_node, rcu); 1329 if (new_node->is_act_police) { 1330 mutex_lock(&nic->mbox.lock); 1331 1332 err = cn10k_map_unmap_rq_policer(nic, new_node->rq, 1333 new_node->leaf_profile, false); 1334 if (err) 1335 netdev_err(nic->netdev, 1336 "Unmapping RQ %d & profile %d failed\n", 1337 new_node->rq, new_node->leaf_profile); 1338 err = cn10k_free_leaf_profile(nic, new_node->leaf_profile); 1339 if (err) 1340 netdev_err(nic->netdev, 1341 "Unable to free leaf bandwidth profile(%d)\n", 1342 new_node->leaf_profile); 1343 1344 __clear_bit(new_node->rq, &nic->rq_bmap); 1345 1346 mutex_unlock(&nic->mbox.lock); 1347 } 1348 1349 return rc; 1350 } 1351 1352 static int otx2_tc_get_flow_stats(struct otx2_nic *nic, 1353 struct flow_cls_offload *tc_flow_cmd) 1354 { 1355 struct npc_mcam_get_stats_req *req; 1356 struct npc_mcam_get_stats_rsp *rsp; 1357 struct otx2_tc_flow_stats *stats; 1358 struct otx2_tc_flow *flow_node; 1359 int err; 1360 1361 flow_node = otx2_tc_get_entry_by_cookie(nic->flow_cfg, tc_flow_cmd->cookie); 1362 if (!flow_node) { 1363 netdev_info(nic->netdev, "tc flow not found for cookie %lx", 1364 tc_flow_cmd->cookie); 1365 return -EINVAL; 1366 } 1367 1368 mutex_lock(&nic->mbox.lock); 1369 1370 req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox); 1371 if (!req) { 1372 mutex_unlock(&nic->mbox.lock); 1373 return -ENOMEM; 1374 } 1375 1376 req->entry = flow_node->entry; 1377 1378 err = otx2_sync_mbox_msg(&nic->mbox); 1379 if (err) { 1380 netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n", 1381 req->entry); 1382 mutex_unlock(&nic->mbox.lock); 1383 return -EFAULT; 1384 } 1385 1386 rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp 1387 (&nic->mbox.mbox, 0, &req->hdr); 1388 if (IS_ERR(rsp)) { 1389 mutex_unlock(&nic->mbox.lock); 1390 return PTR_ERR(rsp); 1391 } 1392 1393 mutex_unlock(&nic->mbox.lock); 1394 1395 if (!rsp->stat_ena) 1396 return -EINVAL; 1397 1398 stats = &flow_node->stats; 1399 1400 spin_lock(&flow_node->lock); 1401 flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0, 1402 FLOW_ACTION_HW_STATS_IMMEDIATE); 1403 stats->pkts = rsp->stat; 1404 spin_unlock(&flow_node->lock); 1405 1406 return 0; 1407 } 1408 1409 int otx2_setup_tc_cls_flower(struct otx2_nic *nic, 1410 struct flow_cls_offload *cls_flower) 1411 { 1412 switch (cls_flower->command) { 1413 case FLOW_CLS_REPLACE: 1414 return otx2_tc_add_flow(nic, cls_flower); 1415 case FLOW_CLS_DESTROY: 1416 return otx2_tc_del_flow(nic, cls_flower); 1417 case FLOW_CLS_STATS: 1418 return otx2_tc_get_flow_stats(nic, cls_flower); 1419 default: 1420 return -EOPNOTSUPP; 1421 } 1422 } 1423 EXPORT_SYMBOL(otx2_setup_tc_cls_flower); 1424 1425 static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic, 1426 struct tc_cls_matchall_offload *cls) 1427 { 1428 struct netlink_ext_ack *extack = cls->common.extack; 1429 struct flow_action *actions = &cls->rule->action; 1430 struct flow_action_entry *entry; 1431 u64 rate; 1432 int err; 1433 1434 err = otx2_tc_validate_flow(nic, actions, extack); 1435 if (err) 1436 return err; 1437 1438 if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) { 1439 NL_SET_ERR_MSG_MOD(extack, 1440 "Only one ingress MATCHALL ratelimitter can be offloaded"); 1441 return -ENOMEM; 1442 } 1443 1444 entry = &cls->rule->action.entries[0]; 1445 switch (entry->id) { 1446 case FLOW_ACTION_POLICE: 1447 /* Ingress ratelimiting is not supported on OcteonTx2 */ 1448 if (is_dev_otx2(nic->pdev)) { 1449 NL_SET_ERR_MSG_MOD(extack, 1450 "Ingress policing not supported on this platform"); 1451 return -EOPNOTSUPP; 1452 } 1453 1454 err = cn10k_alloc_matchall_ipolicer(nic); 1455 if (err) 1456 return err; 1457 1458 /* Convert to bits per second */ 1459 rate = entry->police.rate_bytes_ps * 8; 1460 err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate); 1461 if (err) 1462 return err; 1463 nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 1464 break; 1465 default: 1466 NL_SET_ERR_MSG_MOD(extack, 1467 "Only police action supported with Ingress MATCHALL offload"); 1468 return -EOPNOTSUPP; 1469 } 1470 1471 return 0; 1472 } 1473 1474 static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic, 1475 struct tc_cls_matchall_offload *cls) 1476 { 1477 struct netlink_ext_ack *extack = cls->common.extack; 1478 int err; 1479 1480 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 1481 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 1482 return -EINVAL; 1483 } 1484 1485 err = cn10k_free_matchall_ipolicer(nic); 1486 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 1487 return err; 1488 } 1489 1490 static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic, 1491 struct tc_cls_matchall_offload *cls_matchall) 1492 { 1493 switch (cls_matchall->command) { 1494 case TC_CLSMATCHALL_REPLACE: 1495 return otx2_tc_ingress_matchall_install(nic, cls_matchall); 1496 case TC_CLSMATCHALL_DESTROY: 1497 return otx2_tc_ingress_matchall_delete(nic, cls_matchall); 1498 case TC_CLSMATCHALL_STATS: 1499 default: 1500 break; 1501 } 1502 1503 return -EOPNOTSUPP; 1504 } 1505 1506 static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type, 1507 void *type_data, void *cb_priv) 1508 { 1509 struct otx2_nic *nic = cb_priv; 1510 bool ntuple; 1511 1512 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1513 return -EOPNOTSUPP; 1514 1515 ntuple = nic->netdev->features & NETIF_F_NTUPLE; 1516 switch (type) { 1517 case TC_SETUP_CLSFLOWER: 1518 if (ntuple) { 1519 netdev_warn(nic->netdev, 1520 "Can't install TC flower offload rule when NTUPLE is active"); 1521 return -EOPNOTSUPP; 1522 } 1523 1524 return otx2_setup_tc_cls_flower(nic, type_data); 1525 case TC_SETUP_CLSMATCHALL: 1526 return otx2_setup_tc_ingress_matchall(nic, type_data); 1527 default: 1528 break; 1529 } 1530 1531 return -EOPNOTSUPP; 1532 } 1533 1534 static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic, 1535 struct tc_cls_matchall_offload *cls_matchall) 1536 { 1537 switch (cls_matchall->command) { 1538 case TC_CLSMATCHALL_REPLACE: 1539 return otx2_tc_egress_matchall_install(nic, cls_matchall); 1540 case TC_CLSMATCHALL_DESTROY: 1541 return otx2_tc_egress_matchall_delete(nic, cls_matchall); 1542 case TC_CLSMATCHALL_STATS: 1543 default: 1544 break; 1545 } 1546 1547 return -EOPNOTSUPP; 1548 } 1549 1550 static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type, 1551 void *type_data, void *cb_priv) 1552 { 1553 struct otx2_nic *nic = cb_priv; 1554 1555 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1556 return -EOPNOTSUPP; 1557 1558 switch (type) { 1559 case TC_SETUP_CLSMATCHALL: 1560 return otx2_setup_tc_egress_matchall(nic, type_data); 1561 default: 1562 break; 1563 } 1564 1565 return -EOPNOTSUPP; 1566 } 1567 1568 static LIST_HEAD(otx2_block_cb_list); 1569 1570 static int otx2_setup_tc_block(struct net_device *netdev, 1571 struct flow_block_offload *f) 1572 { 1573 struct otx2_nic *nic = netdev_priv(netdev); 1574 flow_setup_cb_t *cb; 1575 bool ingress; 1576 1577 if (f->block_shared) 1578 return -EOPNOTSUPP; 1579 1580 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1581 cb = otx2_setup_tc_block_ingress_cb; 1582 ingress = true; 1583 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1584 cb = otx2_setup_tc_block_egress_cb; 1585 ingress = false; 1586 } else { 1587 return -EOPNOTSUPP; 1588 } 1589 1590 return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb, 1591 nic, nic, ingress); 1592 } 1593 1594 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, 1595 void *type_data) 1596 { 1597 switch (type) { 1598 case TC_SETUP_BLOCK: 1599 return otx2_setup_tc_block(netdev, type_data); 1600 case TC_SETUP_QDISC_HTB: 1601 return otx2_setup_tc_htb(netdev, type_data); 1602 default: 1603 return -EOPNOTSUPP; 1604 } 1605 } 1606 EXPORT_SYMBOL(otx2_setup_tc); 1607 1608 int otx2_init_tc(struct otx2_nic *nic) 1609 { 1610 /* Exclude receive queue 0 being used for police action */ 1611 set_bit(0, &nic->rq_bmap); 1612 1613 if (!nic->flow_cfg) { 1614 netdev_err(nic->netdev, 1615 "Can't init TC, nic->flow_cfg is not setup\n"); 1616 return -EINVAL; 1617 } 1618 1619 return 0; 1620 } 1621 EXPORT_SYMBOL(otx2_init_tc); 1622 1623 void otx2_shutdown_tc(struct otx2_nic *nic) 1624 { 1625 otx2_destroy_tc_flow_list(nic); 1626 } 1627 EXPORT_SYMBOL(otx2_shutdown_tc); 1628 1629 static void otx2_tc_config_ingress_rule(struct otx2_nic *nic, 1630 struct otx2_tc_flow *node) 1631 { 1632 struct npc_install_flow_req *req; 1633 1634 if (otx2_tc_act_set_hw_police(nic, node)) 1635 return; 1636 1637 mutex_lock(&nic->mbox.lock); 1638 1639 req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 1640 if (!req) 1641 goto err; 1642 1643 memcpy(req, &node->req, sizeof(struct npc_install_flow_req)); 1644 1645 if (otx2_sync_mbox_msg(&nic->mbox)) 1646 netdev_err(nic->netdev, 1647 "Failed to install MCAM flow entry for ingress rule"); 1648 err: 1649 mutex_unlock(&nic->mbox.lock); 1650 } 1651 1652 void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic) 1653 { 1654 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 1655 struct otx2_tc_flow *node; 1656 1657 /* If any ingress policer rules exist for the interface then 1658 * apply those rules. Ingress policer rules depend on bandwidth 1659 * profiles linked to the receive queues. Since no receive queues 1660 * exist when interface is down, ingress policer rules are stored 1661 * and configured in hardware after all receive queues are allocated 1662 * in otx2_open. 1663 */ 1664 list_for_each_entry(node, &flow_cfg->flow_list_tc, list) { 1665 if (node->is_act_police) 1666 otx2_tc_config_ingress_rule(nic, node); 1667 } 1668 } 1669 EXPORT_SYMBOL(otx2_tc_apply_ingress_police_rules); 1670