1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2021 Marvell. 5 * 6 */ 7 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/inetdevice.h> 11 #include <linux/rhashtable.h> 12 #include <linux/bitfield.h> 13 #include <net/flow_dissector.h> 14 #include <net/pkt_cls.h> 15 #include <net/tc_act/tc_gact.h> 16 #include <net/tc_act/tc_mirred.h> 17 #include <net/tc_act/tc_vlan.h> 18 #include <net/ipv6.h> 19 20 #include "cn10k.h" 21 #include "otx2_common.h" 22 #include "qos.h" 23 24 #define CN10K_MAX_BURST_MANTISSA 0x7FFFULL 25 #define CN10K_MAX_BURST_SIZE 8453888ULL 26 27 #define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29) 28 #define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44) 29 30 #define OTX2_UNSUPP_LSE_DEPTH GENMASK(6, 4) 31 32 #define MCAST_INVALID_GRP (-1U) 33 34 struct otx2_tc_flow_stats { 35 u64 bytes; 36 u64 pkts; 37 u64 used; 38 }; 39 40 struct otx2_tc_flow { 41 struct list_head list; 42 unsigned long cookie; 43 struct rcu_head rcu; 44 struct otx2_tc_flow_stats stats; 45 spinlock_t lock; /* lock for stats */ 46 u16 rq; 47 u16 entry; 48 u16 leaf_profile; 49 bool is_act_police; 50 u32 prio; 51 struct npc_install_flow_req req; 52 u32 mcast_grp_idx; 53 u64 rate; 54 u32 burst; 55 bool is_pps; 56 }; 57 58 static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst, 59 u32 *burst_exp, u32 *burst_mantissa) 60 { 61 int max_burst, max_mantissa; 62 unsigned int tmp; 63 64 if (is_dev_otx2(nic->pdev)) { 65 max_burst = MAX_BURST_SIZE; 66 max_mantissa = MAX_BURST_MANTISSA; 67 } else { 68 max_burst = CN10K_MAX_BURST_SIZE; 69 max_mantissa = CN10K_MAX_BURST_MANTISSA; 70 } 71 72 /* Burst is calculated as 73 * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256 74 * Max supported burst size is 130,816 bytes. 75 */ 76 burst = min_t(u32, burst, max_burst); 77 if (burst) { 78 *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0; 79 tmp = burst - rounddown_pow_of_two(burst); 80 if (burst < max_mantissa) 81 *burst_mantissa = tmp * 2; 82 else 83 *burst_mantissa = tmp / (1ULL << (*burst_exp - 7)); 84 } else { 85 *burst_exp = MAX_BURST_EXPONENT; 86 *burst_mantissa = max_mantissa; 87 } 88 } 89 90 static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp, 91 u32 *mantissa, u32 *div_exp) 92 { 93 u64 tmp; 94 95 /* Rate calculation by hardware 96 * 97 * PIR_ADD = ((256 + mantissa) << exp) / 256 98 * rate = (2 * PIR_ADD) / ( 1 << div_exp) 99 * The resultant rate is in Mbps. 100 */ 101 102 /* 2Mbps to 100Gbps can be expressed with div_exp = 0. 103 * Setting this to '0' will ease the calculation of 104 * exponent and mantissa. 105 */ 106 *div_exp = 0; 107 108 if (maxrate) { 109 *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0; 110 tmp = maxrate - rounddown_pow_of_two(maxrate); 111 if (maxrate < MAX_RATE_MANTISSA) 112 *mantissa = tmp * 2; 113 else 114 *mantissa = tmp / (1ULL << (*exp - 7)); 115 } else { 116 /* Instead of disabling rate limiting, set all values to max */ 117 *exp = MAX_RATE_EXPONENT; 118 *mantissa = MAX_RATE_MANTISSA; 119 } 120 } 121 122 u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic, 123 u64 maxrate, u32 burst) 124 { 125 u32 burst_exp, burst_mantissa; 126 u32 exp, mantissa, div_exp; 127 u64 regval = 0; 128 129 /* Get exponent and mantissa values from the desired rate */ 130 otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa); 131 otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp); 132 133 if (is_dev_otx2(nic->pdev)) { 134 regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) | 135 FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) | 136 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 137 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 138 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 139 } else { 140 regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) | 141 FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) | 142 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 143 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 144 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 145 } 146 147 return regval; 148 } 149 150 static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, 151 u32 burst, u64 maxrate) 152 { 153 struct otx2_hw *hw = &nic->hw; 154 struct nix_txschq_config *req; 155 int txschq, err; 156 157 /* All SQs share the same TL4, so pick the first scheduler */ 158 txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0]; 159 160 mutex_lock(&nic->mbox.lock); 161 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox); 162 if (!req) { 163 mutex_unlock(&nic->mbox.lock); 164 return -ENOMEM; 165 } 166 167 req->lvl = NIX_TXSCH_LVL_TL4; 168 req->num_regs = 1; 169 req->reg[0] = NIX_AF_TL4X_PIR(txschq); 170 req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst); 171 172 err = otx2_sync_mbox_msg(&nic->mbox); 173 mutex_unlock(&nic->mbox.lock); 174 return err; 175 } 176 177 static int otx2_tc_validate_flow(struct otx2_nic *nic, 178 struct flow_action *actions, 179 struct netlink_ext_ack *extack) 180 { 181 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 182 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 183 return -EINVAL; 184 } 185 186 if (!flow_action_has_entries(actions)) { 187 NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action"); 188 return -EINVAL; 189 } 190 191 if (!flow_offload_has_one_action(actions)) { 192 NL_SET_ERR_MSG_MOD(extack, 193 "Egress MATCHALL offload supports only 1 policing action"); 194 return -EINVAL; 195 } 196 return 0; 197 } 198 199 static int otx2_policer_validate(const struct flow_action *action, 200 const struct flow_action_entry *act, 201 struct netlink_ext_ack *extack) 202 { 203 if (act->police.exceed.act_id != FLOW_ACTION_DROP) { 204 NL_SET_ERR_MSG_MOD(extack, 205 "Offload not supported when exceed action is not drop"); 206 return -EOPNOTSUPP; 207 } 208 209 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && 210 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { 211 NL_SET_ERR_MSG_MOD(extack, 212 "Offload not supported when conform action is not pipe or ok"); 213 return -EOPNOTSUPP; 214 } 215 216 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && 217 !flow_action_is_last_entry(action, act)) { 218 NL_SET_ERR_MSG_MOD(extack, 219 "Offload not supported when conform action is ok, but action is not last"); 220 return -EOPNOTSUPP; 221 } 222 223 if (act->police.peakrate_bytes_ps || 224 act->police.avrate || act->police.overhead) { 225 NL_SET_ERR_MSG_MOD(extack, 226 "Offload not supported when peakrate/avrate/overhead is configured"); 227 return -EOPNOTSUPP; 228 } 229 230 return 0; 231 } 232 233 static int otx2_tc_egress_matchall_install(struct otx2_nic *nic, 234 struct tc_cls_matchall_offload *cls) 235 { 236 struct netlink_ext_ack *extack = cls->common.extack; 237 struct flow_action *actions = &cls->rule->action; 238 struct flow_action_entry *entry; 239 int err; 240 241 err = otx2_tc_validate_flow(nic, actions, extack); 242 if (err) 243 return err; 244 245 if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) { 246 NL_SET_ERR_MSG_MOD(extack, 247 "Only one Egress MATCHALL ratelimiter can be offloaded"); 248 return -ENOMEM; 249 } 250 251 entry = &cls->rule->action.entries[0]; 252 switch (entry->id) { 253 case FLOW_ACTION_POLICE: 254 err = otx2_policer_validate(&cls->rule->action, entry, extack); 255 if (err) 256 return err; 257 258 if (entry->police.rate_pkt_ps) { 259 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); 260 return -EOPNOTSUPP; 261 } 262 err = otx2_set_matchall_egress_rate(nic, entry->police.burst, 263 otx2_convert_rate(entry->police.rate_bytes_ps)); 264 if (err) 265 return err; 266 nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 267 break; 268 default: 269 NL_SET_ERR_MSG_MOD(extack, 270 "Only police action is supported with Egress MATCHALL offload"); 271 return -EOPNOTSUPP; 272 } 273 274 return 0; 275 } 276 277 static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic, 278 struct tc_cls_matchall_offload *cls) 279 { 280 struct netlink_ext_ack *extack = cls->common.extack; 281 int err; 282 283 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 284 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 285 return -EINVAL; 286 } 287 288 err = otx2_set_matchall_egress_rate(nic, 0, 0); 289 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 290 return err; 291 } 292 293 static int otx2_tc_act_set_hw_police(struct otx2_nic *nic, 294 struct otx2_tc_flow *node) 295 { 296 int rc; 297 298 mutex_lock(&nic->mbox.lock); 299 300 rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile); 301 if (rc) { 302 mutex_unlock(&nic->mbox.lock); 303 return rc; 304 } 305 306 rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, 307 node->burst, node->rate, node->is_pps); 308 if (rc) 309 goto free_leaf; 310 311 rc = cn10k_map_unmap_rq_policer(nic, node->rq, node->leaf_profile, true); 312 if (rc) 313 goto free_leaf; 314 315 mutex_unlock(&nic->mbox.lock); 316 317 return 0; 318 319 free_leaf: 320 if (cn10k_free_leaf_profile(nic, node->leaf_profile)) 321 netdev_err(nic->netdev, 322 "Unable to free leaf bandwidth profile(%d)\n", 323 node->leaf_profile); 324 mutex_unlock(&nic->mbox.lock); 325 return rc; 326 } 327 328 static int otx2_tc_act_set_police(struct otx2_nic *nic, 329 struct otx2_tc_flow *node, 330 struct flow_cls_offload *f, 331 u64 rate, u32 burst, u32 mark, 332 struct npc_install_flow_req *req, bool pps) 333 { 334 struct netlink_ext_ack *extack = f->common.extack; 335 struct otx2_hw *hw = &nic->hw; 336 int rq_idx, rc; 337 338 rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues); 339 if (rq_idx >= hw->rx_queues) { 340 NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded"); 341 return -EINVAL; 342 } 343 344 req->match_id = mark & 0xFFFFULL; 345 req->index = rq_idx; 346 req->op = NIX_RX_ACTIONOP_UCAST; 347 348 node->is_act_police = true; 349 node->rq = rq_idx; 350 node->burst = burst; 351 node->rate = rate; 352 node->is_pps = pps; 353 354 rc = otx2_tc_act_set_hw_police(nic, node); 355 if (!rc) 356 set_bit(rq_idx, &nic->rq_bmap); 357 358 return rc; 359 } 360 361 static int otx2_tc_update_mcast(struct otx2_nic *nic, 362 struct npc_install_flow_req *req, 363 struct netlink_ext_ack *extack, 364 struct otx2_tc_flow *node, 365 struct nix_mcast_grp_update_req *ureq, 366 u8 num_intf) 367 { 368 struct nix_mcast_grp_update_req *grp_update_req; 369 struct nix_mcast_grp_create_req *creq; 370 struct nix_mcast_grp_create_rsp *crsp; 371 u32 grp_index; 372 int rc; 373 374 mutex_lock(&nic->mbox.lock); 375 creq = otx2_mbox_alloc_msg_nix_mcast_grp_create(&nic->mbox); 376 if (!creq) { 377 rc = -ENOMEM; 378 goto error; 379 } 380 381 creq->dir = NIX_MCAST_INGRESS; 382 /* Send message to AF */ 383 rc = otx2_sync_mbox_msg(&nic->mbox); 384 if (rc) { 385 NL_SET_ERR_MSG_MOD(extack, "Failed to create multicast group"); 386 goto error; 387 } 388 389 crsp = (struct nix_mcast_grp_create_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox, 390 0, 391 &creq->hdr); 392 if (IS_ERR(crsp)) { 393 rc = PTR_ERR(crsp); 394 goto error; 395 } 396 397 grp_index = crsp->mcast_grp_idx; 398 grp_update_req = otx2_mbox_alloc_msg_nix_mcast_grp_update(&nic->mbox); 399 if (!grp_update_req) { 400 NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group"); 401 rc = -ENOMEM; 402 goto error; 403 } 404 405 ureq->op = NIX_MCAST_OP_ADD_ENTRY; 406 ureq->mcast_grp_idx = grp_index; 407 ureq->num_mce_entry = num_intf; 408 ureq->pcifunc[0] = nic->pcifunc; 409 ureq->channel[0] = nic->hw.tx_chan_base; 410 411 ureq->dest_type[0] = NIX_RX_RSS; 412 ureq->rq_rss_index[0] = 0; 413 memcpy(&ureq->hdr, &grp_update_req->hdr, sizeof(struct mbox_msghdr)); 414 memcpy(grp_update_req, ureq, sizeof(struct nix_mcast_grp_update_req)); 415 416 /* Send message to AF */ 417 rc = otx2_sync_mbox_msg(&nic->mbox); 418 if (rc) { 419 NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group"); 420 goto error; 421 } 422 423 mutex_unlock(&nic->mbox.lock); 424 req->op = NIX_RX_ACTIONOP_MCAST; 425 req->index = grp_index; 426 node->mcast_grp_idx = grp_index; 427 return 0; 428 429 error: 430 mutex_unlock(&nic->mbox.lock); 431 return rc; 432 } 433 434 static int otx2_tc_parse_actions(struct otx2_nic *nic, 435 struct flow_action *flow_action, 436 struct npc_install_flow_req *req, 437 struct flow_cls_offload *f, 438 struct otx2_tc_flow *node) 439 { 440 struct nix_mcast_grp_update_req dummy_grp_update_req = { 0 }; 441 struct netlink_ext_ack *extack = f->common.extack; 442 bool pps = false, mcast = false; 443 struct flow_action_entry *act; 444 struct net_device *target; 445 struct otx2_nic *priv; 446 u32 burst, mark = 0; 447 u8 nr_police = 0; 448 u8 num_intf = 1; 449 int err, i; 450 u64 rate; 451 452 if (!flow_action_has_entries(flow_action)) { 453 NL_SET_ERR_MSG_MOD(extack, "no tc actions specified"); 454 return -EINVAL; 455 } 456 457 flow_action_for_each(i, act, flow_action) { 458 switch (act->id) { 459 case FLOW_ACTION_DROP: 460 req->op = NIX_RX_ACTIONOP_DROP; 461 return 0; 462 case FLOW_ACTION_ACCEPT: 463 req->op = NIX_RX_ACTION_DEFAULT; 464 return 0; 465 case FLOW_ACTION_REDIRECT_INGRESS: 466 target = act->dev; 467 priv = netdev_priv(target); 468 /* npc_install_flow_req doesn't support passing a target pcifunc */ 469 if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { 470 NL_SET_ERR_MSG_MOD(extack, 471 "can't redirect to other pf/vf"); 472 return -EOPNOTSUPP; 473 } 474 req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK; 475 476 /* if op is already set; avoid overwriting the same */ 477 if (!req->op) 478 req->op = NIX_RX_ACTION_DEFAULT; 479 break; 480 481 case FLOW_ACTION_VLAN_POP: 482 req->vtag0_valid = true; 483 /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */ 484 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7; 485 break; 486 case FLOW_ACTION_POLICE: 487 /* Ingress ratelimiting is not supported on OcteonTx2 */ 488 if (is_dev_otx2(nic->pdev)) { 489 NL_SET_ERR_MSG_MOD(extack, 490 "Ingress policing not supported on this platform"); 491 return -EOPNOTSUPP; 492 } 493 494 err = otx2_policer_validate(flow_action, act, extack); 495 if (err) 496 return err; 497 498 if (act->police.rate_bytes_ps > 0) { 499 rate = act->police.rate_bytes_ps * 8; 500 burst = act->police.burst; 501 } else if (act->police.rate_pkt_ps > 0) { 502 /* The algorithm used to calculate rate 503 * mantissa, exponent values for a given token 504 * rate (token can be byte or packet) requires 505 * token rate to be mutiplied by 8. 506 */ 507 rate = act->police.rate_pkt_ps * 8; 508 burst = act->police.burst_pkt; 509 pps = true; 510 } 511 nr_police++; 512 break; 513 case FLOW_ACTION_MARK: 514 if (act->mark & ~OTX2_RX_MATCH_ID_MASK) { 515 NL_SET_ERR_MSG_MOD(extack, "Bad flow mark, only 16 bit supported"); 516 return -EOPNOTSUPP; 517 } 518 mark = act->mark; 519 req->match_id = mark & OTX2_RX_MATCH_ID_MASK; 520 req->op = NIX_RX_ACTION_DEFAULT; 521 nic->flags |= OTX2_FLAG_TC_MARK_ENABLED; 522 refcount_inc(&nic->flow_cfg->mark_flows); 523 break; 524 525 case FLOW_ACTION_RX_QUEUE_MAPPING: 526 req->op = NIX_RX_ACTIONOP_UCAST; 527 req->index = act->rx_queue; 528 break; 529 530 case FLOW_ACTION_MIRRED_INGRESS: 531 target = act->dev; 532 priv = netdev_priv(target); 533 dummy_grp_update_req.pcifunc[num_intf] = priv->pcifunc; 534 dummy_grp_update_req.channel[num_intf] = priv->hw.tx_chan_base; 535 dummy_grp_update_req.dest_type[num_intf] = NIX_RX_RSS; 536 dummy_grp_update_req.rq_rss_index[num_intf] = 0; 537 mcast = true; 538 num_intf++; 539 break; 540 541 default: 542 return -EOPNOTSUPP; 543 } 544 } 545 546 if (mcast) { 547 err = otx2_tc_update_mcast(nic, req, extack, node, 548 &dummy_grp_update_req, 549 num_intf); 550 if (err) 551 return err; 552 } 553 554 if (nr_police > 1) { 555 NL_SET_ERR_MSG_MOD(extack, 556 "rate limit police offload requires a single action"); 557 return -EOPNOTSUPP; 558 } 559 560 if (nr_police) 561 return otx2_tc_act_set_police(nic, node, f, rate, burst, 562 mark, req, pps); 563 564 return 0; 565 } 566 567 static int otx2_tc_process_vlan(struct otx2_nic *nic, struct flow_msg *flow_spec, 568 struct flow_msg *flow_mask, struct flow_rule *rule, 569 struct npc_install_flow_req *req, bool is_inner) 570 { 571 struct flow_match_vlan match; 572 u16 vlan_tci, vlan_tci_mask; 573 574 if (is_inner) 575 flow_rule_match_cvlan(rule, &match); 576 else 577 flow_rule_match_vlan(rule, &match); 578 579 if (!eth_type_vlan(match.key->vlan_tpid)) { 580 netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n", 581 ntohs(match.key->vlan_tpid)); 582 return -EOPNOTSUPP; 583 } 584 585 if (!match.mask->vlan_id) { 586 struct flow_action_entry *act; 587 int i; 588 589 flow_action_for_each(i, act, &rule->action) { 590 if (act->id == FLOW_ACTION_DROP) { 591 netdev_err(nic->netdev, 592 "vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n", 593 ntohs(match.key->vlan_tpid), match.key->vlan_id); 594 return -EOPNOTSUPP; 595 } 596 } 597 } 598 599 if (match.mask->vlan_id || 600 match.mask->vlan_dei || 601 match.mask->vlan_priority) { 602 vlan_tci = match.key->vlan_id | 603 match.key->vlan_dei << 12 | 604 match.key->vlan_priority << 13; 605 606 vlan_tci_mask = match.mask->vlan_id | 607 match.mask->vlan_dei << 12 | 608 match.mask->vlan_priority << 13; 609 if (is_inner) { 610 flow_spec->vlan_itci = htons(vlan_tci); 611 flow_mask->vlan_itci = htons(vlan_tci_mask); 612 req->features |= BIT_ULL(NPC_INNER_VID); 613 } else { 614 flow_spec->vlan_tci = htons(vlan_tci); 615 flow_mask->vlan_tci = htons(vlan_tci_mask); 616 req->features |= BIT_ULL(NPC_OUTER_VID); 617 } 618 } 619 620 return 0; 621 } 622 623 static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, 624 struct flow_cls_offload *f, 625 struct npc_install_flow_req *req) 626 { 627 struct netlink_ext_ack *extack = f->common.extack; 628 struct flow_msg *flow_spec = &req->packet; 629 struct flow_msg *flow_mask = &req->mask; 630 struct flow_dissector *dissector; 631 struct flow_rule *rule; 632 u8 ip_proto = 0; 633 634 rule = flow_cls_offload_flow_rule(f); 635 dissector = rule->match.dissector; 636 637 if ((dissector->used_keys & 638 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | 639 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | 640 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 641 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | 642 BIT(FLOW_DISSECTOR_KEY_CVLAN) | 643 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 644 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 645 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | 646 BIT(FLOW_DISSECTOR_KEY_IPSEC) | 647 BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) | 648 BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) | 649 BIT_ULL(FLOW_DISSECTOR_KEY_TCP) | 650 BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) { 651 netdev_info(nic->netdev, "unsupported flow used key 0x%llx", 652 dissector->used_keys); 653 return -EOPNOTSUPP; 654 } 655 656 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 657 struct flow_match_basic match; 658 659 flow_rule_match_basic(rule, &match); 660 661 /* All EtherTypes can be matched, no hw limitation */ 662 flow_spec->etype = match.key->n_proto; 663 flow_mask->etype = match.mask->n_proto; 664 req->features |= BIT_ULL(NPC_ETYPE); 665 666 if (match.mask->ip_proto && 667 (match.key->ip_proto != IPPROTO_TCP && 668 match.key->ip_proto != IPPROTO_UDP && 669 match.key->ip_proto != IPPROTO_SCTP && 670 match.key->ip_proto != IPPROTO_ICMP && 671 match.key->ip_proto != IPPROTO_ESP && 672 match.key->ip_proto != IPPROTO_AH && 673 match.key->ip_proto != IPPROTO_ICMPV6)) { 674 netdev_info(nic->netdev, 675 "ip_proto=0x%x not supported\n", 676 match.key->ip_proto); 677 return -EOPNOTSUPP; 678 } 679 if (match.mask->ip_proto) 680 ip_proto = match.key->ip_proto; 681 682 if (ip_proto == IPPROTO_UDP) 683 req->features |= BIT_ULL(NPC_IPPROTO_UDP); 684 else if (ip_proto == IPPROTO_TCP) 685 req->features |= BIT_ULL(NPC_IPPROTO_TCP); 686 else if (ip_proto == IPPROTO_SCTP) 687 req->features |= BIT_ULL(NPC_IPPROTO_SCTP); 688 else if (ip_proto == IPPROTO_ICMP) 689 req->features |= BIT_ULL(NPC_IPPROTO_ICMP); 690 else if (ip_proto == IPPROTO_ICMPV6) 691 req->features |= BIT_ULL(NPC_IPPROTO_ICMP6); 692 else if (ip_proto == IPPROTO_ESP) 693 req->features |= BIT_ULL(NPC_IPPROTO_ESP); 694 else if (ip_proto == IPPROTO_AH) 695 req->features |= BIT_ULL(NPC_IPPROTO_AH); 696 } 697 698 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 699 struct flow_match_control match; 700 u32 val; 701 702 flow_rule_match_control(rule, &match); 703 704 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { 705 val = match.key->flags & FLOW_DIS_IS_FRAGMENT; 706 if (ntohs(flow_spec->etype) == ETH_P_IP) { 707 flow_spec->ip_flag = val ? IPV4_FLAG_MORE : 0; 708 flow_mask->ip_flag = IPV4_FLAG_MORE; 709 req->features |= BIT_ULL(NPC_IPFRAG_IPV4); 710 } else if (ntohs(flow_spec->etype) == ETH_P_IPV6) { 711 flow_spec->next_header = val ? 712 IPPROTO_FRAGMENT : 0; 713 flow_mask->next_header = 0xff; 714 req->features |= BIT_ULL(NPC_IPFRAG_IPV6); 715 } else { 716 NL_SET_ERR_MSG_MOD(extack, "flow-type should be either IPv4 and IPv6"); 717 return -EOPNOTSUPP; 718 } 719 } 720 721 if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT, 722 match.mask->flags, extack)) 723 return -EOPNOTSUPP; 724 } 725 726 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 727 struct flow_match_eth_addrs match; 728 729 flow_rule_match_eth_addrs(rule, &match); 730 if (!is_zero_ether_addr(match.mask->src)) { 731 NL_SET_ERR_MSG_MOD(extack, "src mac match not supported"); 732 return -EOPNOTSUPP; 733 } 734 735 if (!is_zero_ether_addr(match.mask->dst)) { 736 ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst); 737 ether_addr_copy(flow_mask->dmac, 738 (u8 *)&match.mask->dst); 739 req->features |= BIT_ULL(NPC_DMAC); 740 } 741 } 742 743 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPSEC)) { 744 struct flow_match_ipsec match; 745 746 flow_rule_match_ipsec(rule, &match); 747 if (!match.mask->spi) { 748 NL_SET_ERR_MSG_MOD(extack, "spi index not specified"); 749 return -EOPNOTSUPP; 750 } 751 if (ip_proto != IPPROTO_ESP && 752 ip_proto != IPPROTO_AH) { 753 NL_SET_ERR_MSG_MOD(extack, 754 "SPI index is valid only for ESP/AH proto"); 755 return -EOPNOTSUPP; 756 } 757 758 flow_spec->spi = match.key->spi; 759 flow_mask->spi = match.mask->spi; 760 req->features |= BIT_ULL(NPC_IPSEC_SPI); 761 } 762 763 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 764 struct flow_match_ip match; 765 766 flow_rule_match_ip(rule, &match); 767 if ((ntohs(flow_spec->etype) != ETH_P_IP) && 768 match.mask->tos) { 769 NL_SET_ERR_MSG_MOD(extack, "tos not supported"); 770 return -EOPNOTSUPP; 771 } 772 if (match.mask->ttl) { 773 NL_SET_ERR_MSG_MOD(extack, "ttl not supported"); 774 return -EOPNOTSUPP; 775 } 776 flow_spec->tos = match.key->tos; 777 flow_mask->tos = match.mask->tos; 778 req->features |= BIT_ULL(NPC_TOS); 779 } 780 781 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 782 int ret; 783 784 ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, false); 785 if (ret) 786 return ret; 787 } 788 789 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { 790 int ret; 791 792 ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, true); 793 if (ret) 794 return ret; 795 } 796 797 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { 798 struct flow_match_ipv4_addrs match; 799 800 flow_rule_match_ipv4_addrs(rule, &match); 801 802 flow_spec->ip4dst = match.key->dst; 803 flow_mask->ip4dst = match.mask->dst; 804 req->features |= BIT_ULL(NPC_DIP_IPV4); 805 806 flow_spec->ip4src = match.key->src; 807 flow_mask->ip4src = match.mask->src; 808 req->features |= BIT_ULL(NPC_SIP_IPV4); 809 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { 810 struct flow_match_ipv6_addrs match; 811 812 flow_rule_match_ipv6_addrs(rule, &match); 813 814 if (ipv6_addr_loopback(&match.key->dst) || 815 ipv6_addr_loopback(&match.key->src)) { 816 NL_SET_ERR_MSG_MOD(extack, 817 "Flow matching IPv6 loopback addr not supported"); 818 return -EOPNOTSUPP; 819 } 820 821 if (!ipv6_addr_any(&match.mask->dst)) { 822 memcpy(&flow_spec->ip6dst, 823 (struct in6_addr *)&match.key->dst, 824 sizeof(flow_spec->ip6dst)); 825 memcpy(&flow_mask->ip6dst, 826 (struct in6_addr *)&match.mask->dst, 827 sizeof(flow_spec->ip6dst)); 828 req->features |= BIT_ULL(NPC_DIP_IPV6); 829 } 830 831 if (!ipv6_addr_any(&match.mask->src)) { 832 memcpy(&flow_spec->ip6src, 833 (struct in6_addr *)&match.key->src, 834 sizeof(flow_spec->ip6src)); 835 memcpy(&flow_mask->ip6src, 836 (struct in6_addr *)&match.mask->src, 837 sizeof(flow_spec->ip6src)); 838 req->features |= BIT_ULL(NPC_SIP_IPV6); 839 } 840 } 841 842 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 843 struct flow_match_ports match; 844 845 flow_rule_match_ports(rule, &match); 846 847 flow_spec->dport = match.key->dst; 848 flow_mask->dport = match.mask->dst; 849 850 if (flow_mask->dport) { 851 if (ip_proto == IPPROTO_UDP) 852 req->features |= BIT_ULL(NPC_DPORT_UDP); 853 else if (ip_proto == IPPROTO_TCP) 854 req->features |= BIT_ULL(NPC_DPORT_TCP); 855 else if (ip_proto == IPPROTO_SCTP) 856 req->features |= BIT_ULL(NPC_DPORT_SCTP); 857 } 858 859 flow_spec->sport = match.key->src; 860 flow_mask->sport = match.mask->src; 861 862 if (flow_mask->sport) { 863 if (ip_proto == IPPROTO_UDP) 864 req->features |= BIT_ULL(NPC_SPORT_UDP); 865 else if (ip_proto == IPPROTO_TCP) 866 req->features |= BIT_ULL(NPC_SPORT_TCP); 867 else if (ip_proto == IPPROTO_SCTP) 868 req->features |= BIT_ULL(NPC_SPORT_SCTP); 869 } 870 } 871 872 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { 873 struct flow_match_tcp match; 874 875 flow_rule_match_tcp(rule, &match); 876 877 flow_spec->tcp_flags = match.key->flags; 878 flow_mask->tcp_flags = match.mask->flags; 879 req->features |= BIT_ULL(NPC_TCP_FLAGS); 880 } 881 882 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) { 883 struct flow_match_mpls match; 884 u8 bit; 885 886 flow_rule_match_mpls(rule, &match); 887 888 if (match.mask->used_lses & OTX2_UNSUPP_LSE_DEPTH) { 889 NL_SET_ERR_MSG_MOD(extack, 890 "unsupported LSE depth for MPLS match offload"); 891 return -EOPNOTSUPP; 892 } 893 894 for_each_set_bit(bit, (unsigned long *)&match.mask->used_lses, 895 FLOW_DIS_MPLS_MAX) { 896 /* check if any of the fields LABEL,TC,BOS are set */ 897 if (*((u32 *)&match.mask->ls[bit]) & 898 OTX2_FLOWER_MASK_MPLS_NON_TTL) { 899 /* Hardware will capture 4 byte MPLS header into 900 * two fields NPC_MPLSX_LBTCBOS and NPC_MPLSX_TTL. 901 * Derive the associated NPC key based on header 902 * index and offset. 903 */ 904 905 req->features |= BIT_ULL(NPC_MPLS1_LBTCBOS + 906 2 * bit); 907 flow_spec->mpls_lse[bit] = 908 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB, 909 match.key->ls[bit].mpls_label) | 910 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC, 911 match.key->ls[bit].mpls_tc) | 912 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS, 913 match.key->ls[bit].mpls_bos); 914 915 flow_mask->mpls_lse[bit] = 916 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB, 917 match.mask->ls[bit].mpls_label) | 918 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC, 919 match.mask->ls[bit].mpls_tc) | 920 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS, 921 match.mask->ls[bit].mpls_bos); 922 } 923 924 if (match.mask->ls[bit].mpls_ttl) { 925 req->features |= BIT_ULL(NPC_MPLS1_TTL + 926 2 * bit); 927 flow_spec->mpls_lse[bit] |= 928 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL, 929 match.key->ls[bit].mpls_ttl); 930 flow_mask->mpls_lse[bit] |= 931 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL, 932 match.mask->ls[bit].mpls_ttl); 933 } 934 } 935 } 936 937 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) { 938 struct flow_match_icmp match; 939 940 flow_rule_match_icmp(rule, &match); 941 942 flow_spec->icmp_type = match.key->type; 943 flow_mask->icmp_type = match.mask->type; 944 req->features |= BIT_ULL(NPC_TYPE_ICMP); 945 946 flow_spec->icmp_code = match.key->code; 947 flow_mask->icmp_code = match.mask->code; 948 req->features |= BIT_ULL(NPC_CODE_ICMP); 949 } 950 return otx2_tc_parse_actions(nic, &rule->action, req, f, node); 951 } 952 953 static void otx2_destroy_tc_flow_list(struct otx2_nic *pfvf) 954 { 955 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; 956 struct otx2_tc_flow *iter, *tmp; 957 958 if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) 959 return; 960 961 list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list_tc, list) { 962 list_del(&iter->list); 963 kfree(iter); 964 flow_cfg->nr_flows--; 965 } 966 } 967 968 static struct otx2_tc_flow *otx2_tc_get_entry_by_cookie(struct otx2_flow_config *flow_cfg, 969 unsigned long cookie) 970 { 971 struct otx2_tc_flow *tmp; 972 973 list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) { 974 if (tmp->cookie == cookie) 975 return tmp; 976 } 977 978 return NULL; 979 } 980 981 static struct otx2_tc_flow *otx2_tc_get_entry_by_index(struct otx2_flow_config *flow_cfg, 982 int index) 983 { 984 struct otx2_tc_flow *tmp; 985 int i = 0; 986 987 list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) { 988 if (i == index) 989 return tmp; 990 i++; 991 } 992 993 return NULL; 994 } 995 996 static void otx2_tc_del_from_flow_list(struct otx2_flow_config *flow_cfg, 997 struct otx2_tc_flow *node) 998 { 999 struct list_head *pos, *n; 1000 struct otx2_tc_flow *tmp; 1001 1002 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 1003 tmp = list_entry(pos, struct otx2_tc_flow, list); 1004 if (node == tmp) { 1005 list_del(&node->list); 1006 return; 1007 } 1008 } 1009 } 1010 1011 static int otx2_tc_add_to_flow_list(struct otx2_flow_config *flow_cfg, 1012 struct otx2_tc_flow *node) 1013 { 1014 struct list_head *pos, *n; 1015 struct otx2_tc_flow *tmp; 1016 int index = 0; 1017 1018 /* If the flow list is empty then add the new node */ 1019 if (list_empty(&flow_cfg->flow_list_tc)) { 1020 list_add(&node->list, &flow_cfg->flow_list_tc); 1021 return index; 1022 } 1023 1024 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 1025 tmp = list_entry(pos, struct otx2_tc_flow, list); 1026 if (node->prio < tmp->prio) 1027 break; 1028 index++; 1029 } 1030 1031 list_add(&node->list, pos->prev); 1032 return index; 1033 } 1034 1035 static int otx2_add_mcam_flow_entry(struct otx2_nic *nic, struct npc_install_flow_req *req) 1036 { 1037 struct npc_install_flow_req *tmp_req; 1038 int err; 1039 1040 mutex_lock(&nic->mbox.lock); 1041 tmp_req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 1042 if (!tmp_req) { 1043 mutex_unlock(&nic->mbox.lock); 1044 return -ENOMEM; 1045 } 1046 1047 memcpy(tmp_req, req, sizeof(struct npc_install_flow_req)); 1048 /* Send message to AF */ 1049 err = otx2_sync_mbox_msg(&nic->mbox); 1050 if (err) { 1051 netdev_err(nic->netdev, "Failed to install MCAM flow entry %d\n", 1052 req->entry); 1053 mutex_unlock(&nic->mbox.lock); 1054 return -EFAULT; 1055 } 1056 1057 mutex_unlock(&nic->mbox.lock); 1058 return 0; 1059 } 1060 1061 static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry, u16 *cntr_val) 1062 { 1063 struct npc_delete_flow_rsp *rsp; 1064 struct npc_delete_flow_req *req; 1065 int err; 1066 1067 mutex_lock(&nic->mbox.lock); 1068 req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox); 1069 if (!req) { 1070 mutex_unlock(&nic->mbox.lock); 1071 return -ENOMEM; 1072 } 1073 1074 req->entry = entry; 1075 1076 /* Send message to AF */ 1077 err = otx2_sync_mbox_msg(&nic->mbox); 1078 if (err) { 1079 netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n", 1080 entry); 1081 mutex_unlock(&nic->mbox.lock); 1082 return -EFAULT; 1083 } 1084 1085 if (cntr_val) { 1086 rsp = (struct npc_delete_flow_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox, 1087 0, &req->hdr); 1088 if (IS_ERR(rsp)) { 1089 netdev_err(nic->netdev, "Failed to get MCAM delete response for entry %d\n", 1090 entry); 1091 mutex_unlock(&nic->mbox.lock); 1092 return -EFAULT; 1093 } 1094 1095 *cntr_val = rsp->cntr_val; 1096 } 1097 1098 mutex_unlock(&nic->mbox.lock); 1099 return 0; 1100 } 1101 1102 static int otx2_tc_update_mcam_table_del_req(struct otx2_nic *nic, 1103 struct otx2_flow_config *flow_cfg, 1104 struct otx2_tc_flow *node) 1105 { 1106 struct list_head *pos, *n; 1107 struct otx2_tc_flow *tmp; 1108 int i = 0, index = 0; 1109 u16 cntr_val = 0; 1110 1111 /* Find and delete the entry from the list and re-install 1112 * all the entries from beginning to the index of the 1113 * deleted entry to higher mcam indexes. 1114 */ 1115 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 1116 tmp = list_entry(pos, struct otx2_tc_flow, list); 1117 if (node == tmp) { 1118 list_del(&tmp->list); 1119 break; 1120 } 1121 1122 otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val); 1123 tmp->entry++; 1124 tmp->req.entry = tmp->entry; 1125 tmp->req.cntr_val = cntr_val; 1126 index++; 1127 } 1128 1129 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 1130 if (i == index) 1131 break; 1132 1133 tmp = list_entry(pos, struct otx2_tc_flow, list); 1134 otx2_add_mcam_flow_entry(nic, &tmp->req); 1135 i++; 1136 } 1137 1138 return 0; 1139 } 1140 1141 static int otx2_tc_update_mcam_table_add_req(struct otx2_nic *nic, 1142 struct otx2_flow_config *flow_cfg, 1143 struct otx2_tc_flow *node) 1144 { 1145 int mcam_idx = flow_cfg->max_flows - flow_cfg->nr_flows - 1; 1146 struct otx2_tc_flow *tmp; 1147 int list_idx, i; 1148 u16 cntr_val = 0; 1149 1150 /* Find the index of the entry(list_idx) whose priority 1151 * is greater than the new entry and re-install all 1152 * the entries from beginning to list_idx to higher 1153 * mcam indexes. 1154 */ 1155 list_idx = otx2_tc_add_to_flow_list(flow_cfg, node); 1156 for (i = 0; i < list_idx; i++) { 1157 tmp = otx2_tc_get_entry_by_index(flow_cfg, i); 1158 if (!tmp) 1159 return -ENOMEM; 1160 1161 otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val); 1162 tmp->entry = flow_cfg->flow_ent[mcam_idx]; 1163 tmp->req.entry = tmp->entry; 1164 tmp->req.cntr_val = cntr_val; 1165 otx2_add_mcam_flow_entry(nic, &tmp->req); 1166 mcam_idx++; 1167 } 1168 1169 return mcam_idx; 1170 } 1171 1172 static int otx2_tc_update_mcam_table(struct otx2_nic *nic, 1173 struct otx2_flow_config *flow_cfg, 1174 struct otx2_tc_flow *node, 1175 bool add_req) 1176 { 1177 if (add_req) 1178 return otx2_tc_update_mcam_table_add_req(nic, flow_cfg, node); 1179 1180 return otx2_tc_update_mcam_table_del_req(nic, flow_cfg, node); 1181 } 1182 1183 static int otx2_tc_del_flow(struct otx2_nic *nic, 1184 struct flow_cls_offload *tc_flow_cmd) 1185 { 1186 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 1187 struct nix_mcast_grp_destroy_req *grp_destroy_req; 1188 struct otx2_tc_flow *flow_node; 1189 int err; 1190 1191 flow_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); 1192 if (!flow_node) { 1193 netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n", 1194 tc_flow_cmd->cookie); 1195 return -EINVAL; 1196 } 1197 1198 /* Disable TC MARK flag if they are no rules with skbedit mark action */ 1199 if (flow_node->req.match_id) 1200 if (!refcount_dec_and_test(&flow_cfg->mark_flows)) 1201 nic->flags &= ~OTX2_FLAG_TC_MARK_ENABLED; 1202 1203 if (flow_node->is_act_police) { 1204 __clear_bit(flow_node->rq, &nic->rq_bmap); 1205 1206 if (nic->flags & OTX2_FLAG_INTF_DOWN) 1207 goto free_mcam_flow; 1208 1209 mutex_lock(&nic->mbox.lock); 1210 1211 err = cn10k_map_unmap_rq_policer(nic, flow_node->rq, 1212 flow_node->leaf_profile, false); 1213 if (err) 1214 netdev_err(nic->netdev, 1215 "Unmapping RQ %d & profile %d failed\n", 1216 flow_node->rq, flow_node->leaf_profile); 1217 1218 err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile); 1219 if (err) 1220 netdev_err(nic->netdev, 1221 "Unable to free leaf bandwidth profile(%d)\n", 1222 flow_node->leaf_profile); 1223 1224 mutex_unlock(&nic->mbox.lock); 1225 } 1226 /* Remove the multicast/mirror related nodes */ 1227 if (flow_node->mcast_grp_idx != MCAST_INVALID_GRP) { 1228 mutex_lock(&nic->mbox.lock); 1229 grp_destroy_req = otx2_mbox_alloc_msg_nix_mcast_grp_destroy(&nic->mbox); 1230 grp_destroy_req->mcast_grp_idx = flow_node->mcast_grp_idx; 1231 otx2_sync_mbox_msg(&nic->mbox); 1232 mutex_unlock(&nic->mbox.lock); 1233 } 1234 1235 1236 free_mcam_flow: 1237 otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL); 1238 otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false); 1239 kfree_rcu(flow_node, rcu); 1240 flow_cfg->nr_flows--; 1241 return 0; 1242 } 1243 1244 static int otx2_tc_add_flow(struct otx2_nic *nic, 1245 struct flow_cls_offload *tc_flow_cmd) 1246 { 1247 struct netlink_ext_ack *extack = tc_flow_cmd->common.extack; 1248 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 1249 struct otx2_tc_flow *new_node, *old_node; 1250 struct npc_install_flow_req *req, dummy; 1251 int rc, err, mcam_idx; 1252 1253 if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) 1254 return -ENOMEM; 1255 1256 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 1257 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 1258 return -EINVAL; 1259 } 1260 1261 if (flow_cfg->nr_flows == flow_cfg->max_flows) { 1262 NL_SET_ERR_MSG_MOD(extack, 1263 "Free MCAM entry not available to add the flow"); 1264 return -ENOMEM; 1265 } 1266 1267 /* allocate memory for the new flow and it's node */ 1268 new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); 1269 if (!new_node) 1270 return -ENOMEM; 1271 spin_lock_init(&new_node->lock); 1272 new_node->cookie = tc_flow_cmd->cookie; 1273 new_node->prio = tc_flow_cmd->common.prio; 1274 new_node->mcast_grp_idx = MCAST_INVALID_GRP; 1275 1276 memset(&dummy, 0, sizeof(struct npc_install_flow_req)); 1277 1278 rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy); 1279 if (rc) { 1280 kfree_rcu(new_node, rcu); 1281 return rc; 1282 } 1283 1284 /* If a flow exists with the same cookie, delete it */ 1285 old_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); 1286 if (old_node) 1287 otx2_tc_del_flow(nic, tc_flow_cmd); 1288 1289 mcam_idx = otx2_tc_update_mcam_table(nic, flow_cfg, new_node, true); 1290 mutex_lock(&nic->mbox.lock); 1291 req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 1292 if (!req) { 1293 mutex_unlock(&nic->mbox.lock); 1294 rc = -ENOMEM; 1295 goto free_leaf; 1296 } 1297 1298 memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr)); 1299 memcpy(req, &dummy, sizeof(struct npc_install_flow_req)); 1300 req->channel = nic->hw.rx_chan_base; 1301 req->entry = flow_cfg->flow_ent[mcam_idx]; 1302 req->intf = NIX_INTF_RX; 1303 req->set_cntr = 1; 1304 new_node->entry = req->entry; 1305 1306 /* Send message to AF */ 1307 rc = otx2_sync_mbox_msg(&nic->mbox); 1308 if (rc) { 1309 NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry"); 1310 mutex_unlock(&nic->mbox.lock); 1311 goto free_leaf; 1312 } 1313 1314 mutex_unlock(&nic->mbox.lock); 1315 memcpy(&new_node->req, req, sizeof(struct npc_install_flow_req)); 1316 1317 flow_cfg->nr_flows++; 1318 return 0; 1319 1320 free_leaf: 1321 otx2_tc_del_from_flow_list(flow_cfg, new_node); 1322 kfree_rcu(new_node, rcu); 1323 if (new_node->is_act_police) { 1324 mutex_lock(&nic->mbox.lock); 1325 1326 err = cn10k_map_unmap_rq_policer(nic, new_node->rq, 1327 new_node->leaf_profile, false); 1328 if (err) 1329 netdev_err(nic->netdev, 1330 "Unmapping RQ %d & profile %d failed\n", 1331 new_node->rq, new_node->leaf_profile); 1332 err = cn10k_free_leaf_profile(nic, new_node->leaf_profile); 1333 if (err) 1334 netdev_err(nic->netdev, 1335 "Unable to free leaf bandwidth profile(%d)\n", 1336 new_node->leaf_profile); 1337 1338 __clear_bit(new_node->rq, &nic->rq_bmap); 1339 1340 mutex_unlock(&nic->mbox.lock); 1341 } 1342 1343 return rc; 1344 } 1345 1346 static int otx2_tc_get_flow_stats(struct otx2_nic *nic, 1347 struct flow_cls_offload *tc_flow_cmd) 1348 { 1349 struct npc_mcam_get_stats_req *req; 1350 struct npc_mcam_get_stats_rsp *rsp; 1351 struct otx2_tc_flow_stats *stats; 1352 struct otx2_tc_flow *flow_node; 1353 int err; 1354 1355 flow_node = otx2_tc_get_entry_by_cookie(nic->flow_cfg, tc_flow_cmd->cookie); 1356 if (!flow_node) { 1357 netdev_info(nic->netdev, "tc flow not found for cookie %lx", 1358 tc_flow_cmd->cookie); 1359 return -EINVAL; 1360 } 1361 1362 mutex_lock(&nic->mbox.lock); 1363 1364 req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox); 1365 if (!req) { 1366 mutex_unlock(&nic->mbox.lock); 1367 return -ENOMEM; 1368 } 1369 1370 req->entry = flow_node->entry; 1371 1372 err = otx2_sync_mbox_msg(&nic->mbox); 1373 if (err) { 1374 netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n", 1375 req->entry); 1376 mutex_unlock(&nic->mbox.lock); 1377 return -EFAULT; 1378 } 1379 1380 rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp 1381 (&nic->mbox.mbox, 0, &req->hdr); 1382 if (IS_ERR(rsp)) { 1383 mutex_unlock(&nic->mbox.lock); 1384 return PTR_ERR(rsp); 1385 } 1386 1387 mutex_unlock(&nic->mbox.lock); 1388 1389 if (!rsp->stat_ena) 1390 return -EINVAL; 1391 1392 stats = &flow_node->stats; 1393 1394 spin_lock(&flow_node->lock); 1395 flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0, 1396 FLOW_ACTION_HW_STATS_IMMEDIATE); 1397 stats->pkts = rsp->stat; 1398 spin_unlock(&flow_node->lock); 1399 1400 return 0; 1401 } 1402 1403 static int otx2_setup_tc_cls_flower(struct otx2_nic *nic, 1404 struct flow_cls_offload *cls_flower) 1405 { 1406 switch (cls_flower->command) { 1407 case FLOW_CLS_REPLACE: 1408 return otx2_tc_add_flow(nic, cls_flower); 1409 case FLOW_CLS_DESTROY: 1410 return otx2_tc_del_flow(nic, cls_flower); 1411 case FLOW_CLS_STATS: 1412 return otx2_tc_get_flow_stats(nic, cls_flower); 1413 default: 1414 return -EOPNOTSUPP; 1415 } 1416 } 1417 1418 static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic, 1419 struct tc_cls_matchall_offload *cls) 1420 { 1421 struct netlink_ext_ack *extack = cls->common.extack; 1422 struct flow_action *actions = &cls->rule->action; 1423 struct flow_action_entry *entry; 1424 u64 rate; 1425 int err; 1426 1427 err = otx2_tc_validate_flow(nic, actions, extack); 1428 if (err) 1429 return err; 1430 1431 if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) { 1432 NL_SET_ERR_MSG_MOD(extack, 1433 "Only one ingress MATCHALL ratelimitter can be offloaded"); 1434 return -ENOMEM; 1435 } 1436 1437 entry = &cls->rule->action.entries[0]; 1438 switch (entry->id) { 1439 case FLOW_ACTION_POLICE: 1440 /* Ingress ratelimiting is not supported on OcteonTx2 */ 1441 if (is_dev_otx2(nic->pdev)) { 1442 NL_SET_ERR_MSG_MOD(extack, 1443 "Ingress policing not supported on this platform"); 1444 return -EOPNOTSUPP; 1445 } 1446 1447 err = cn10k_alloc_matchall_ipolicer(nic); 1448 if (err) 1449 return err; 1450 1451 /* Convert to bits per second */ 1452 rate = entry->police.rate_bytes_ps * 8; 1453 err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate); 1454 if (err) 1455 return err; 1456 nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 1457 break; 1458 default: 1459 NL_SET_ERR_MSG_MOD(extack, 1460 "Only police action supported with Ingress MATCHALL offload"); 1461 return -EOPNOTSUPP; 1462 } 1463 1464 return 0; 1465 } 1466 1467 static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic, 1468 struct tc_cls_matchall_offload *cls) 1469 { 1470 struct netlink_ext_ack *extack = cls->common.extack; 1471 int err; 1472 1473 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 1474 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 1475 return -EINVAL; 1476 } 1477 1478 err = cn10k_free_matchall_ipolicer(nic); 1479 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 1480 return err; 1481 } 1482 1483 static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic, 1484 struct tc_cls_matchall_offload *cls_matchall) 1485 { 1486 switch (cls_matchall->command) { 1487 case TC_CLSMATCHALL_REPLACE: 1488 return otx2_tc_ingress_matchall_install(nic, cls_matchall); 1489 case TC_CLSMATCHALL_DESTROY: 1490 return otx2_tc_ingress_matchall_delete(nic, cls_matchall); 1491 case TC_CLSMATCHALL_STATS: 1492 default: 1493 break; 1494 } 1495 1496 return -EOPNOTSUPP; 1497 } 1498 1499 static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type, 1500 void *type_data, void *cb_priv) 1501 { 1502 struct otx2_nic *nic = cb_priv; 1503 bool ntuple; 1504 1505 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1506 return -EOPNOTSUPP; 1507 1508 ntuple = nic->netdev->features & NETIF_F_NTUPLE; 1509 switch (type) { 1510 case TC_SETUP_CLSFLOWER: 1511 if (ntuple) { 1512 netdev_warn(nic->netdev, 1513 "Can't install TC flower offload rule when NTUPLE is active"); 1514 return -EOPNOTSUPP; 1515 } 1516 1517 return otx2_setup_tc_cls_flower(nic, type_data); 1518 case TC_SETUP_CLSMATCHALL: 1519 return otx2_setup_tc_ingress_matchall(nic, type_data); 1520 default: 1521 break; 1522 } 1523 1524 return -EOPNOTSUPP; 1525 } 1526 1527 static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic, 1528 struct tc_cls_matchall_offload *cls_matchall) 1529 { 1530 switch (cls_matchall->command) { 1531 case TC_CLSMATCHALL_REPLACE: 1532 return otx2_tc_egress_matchall_install(nic, cls_matchall); 1533 case TC_CLSMATCHALL_DESTROY: 1534 return otx2_tc_egress_matchall_delete(nic, cls_matchall); 1535 case TC_CLSMATCHALL_STATS: 1536 default: 1537 break; 1538 } 1539 1540 return -EOPNOTSUPP; 1541 } 1542 1543 static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type, 1544 void *type_data, void *cb_priv) 1545 { 1546 struct otx2_nic *nic = cb_priv; 1547 1548 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1549 return -EOPNOTSUPP; 1550 1551 switch (type) { 1552 case TC_SETUP_CLSMATCHALL: 1553 return otx2_setup_tc_egress_matchall(nic, type_data); 1554 default: 1555 break; 1556 } 1557 1558 return -EOPNOTSUPP; 1559 } 1560 1561 static LIST_HEAD(otx2_block_cb_list); 1562 1563 static int otx2_setup_tc_block(struct net_device *netdev, 1564 struct flow_block_offload *f) 1565 { 1566 struct otx2_nic *nic = netdev_priv(netdev); 1567 flow_setup_cb_t *cb; 1568 bool ingress; 1569 1570 if (f->block_shared) 1571 return -EOPNOTSUPP; 1572 1573 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1574 cb = otx2_setup_tc_block_ingress_cb; 1575 ingress = true; 1576 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1577 cb = otx2_setup_tc_block_egress_cb; 1578 ingress = false; 1579 } else { 1580 return -EOPNOTSUPP; 1581 } 1582 1583 return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb, 1584 nic, nic, ingress); 1585 } 1586 1587 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, 1588 void *type_data) 1589 { 1590 switch (type) { 1591 case TC_SETUP_BLOCK: 1592 return otx2_setup_tc_block(netdev, type_data); 1593 case TC_SETUP_QDISC_HTB: 1594 return otx2_setup_tc_htb(netdev, type_data); 1595 default: 1596 return -EOPNOTSUPP; 1597 } 1598 } 1599 EXPORT_SYMBOL(otx2_setup_tc); 1600 1601 int otx2_init_tc(struct otx2_nic *nic) 1602 { 1603 /* Exclude receive queue 0 being used for police action */ 1604 set_bit(0, &nic->rq_bmap); 1605 1606 if (!nic->flow_cfg) { 1607 netdev_err(nic->netdev, 1608 "Can't init TC, nic->flow_cfg is not setup\n"); 1609 return -EINVAL; 1610 } 1611 1612 return 0; 1613 } 1614 EXPORT_SYMBOL(otx2_init_tc); 1615 1616 void otx2_shutdown_tc(struct otx2_nic *nic) 1617 { 1618 otx2_destroy_tc_flow_list(nic); 1619 } 1620 EXPORT_SYMBOL(otx2_shutdown_tc); 1621 1622 static void otx2_tc_config_ingress_rule(struct otx2_nic *nic, 1623 struct otx2_tc_flow *node) 1624 { 1625 struct npc_install_flow_req *req; 1626 1627 if (otx2_tc_act_set_hw_police(nic, node)) 1628 return; 1629 1630 mutex_lock(&nic->mbox.lock); 1631 1632 req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 1633 if (!req) 1634 goto err; 1635 1636 memcpy(req, &node->req, sizeof(struct npc_install_flow_req)); 1637 1638 if (otx2_sync_mbox_msg(&nic->mbox)) 1639 netdev_err(nic->netdev, 1640 "Failed to install MCAM flow entry for ingress rule"); 1641 err: 1642 mutex_unlock(&nic->mbox.lock); 1643 } 1644 1645 void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic) 1646 { 1647 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 1648 struct otx2_tc_flow *node; 1649 1650 /* If any ingress policer rules exist for the interface then 1651 * apply those rules. Ingress policer rules depend on bandwidth 1652 * profiles linked to the receive queues. Since no receive queues 1653 * exist when interface is down, ingress policer rules are stored 1654 * and configured in hardware after all receive queues are allocated 1655 * in otx2_open. 1656 */ 1657 list_for_each_entry(node, &flow_cfg->flow_list_tc, list) { 1658 if (node->is_act_police) 1659 otx2_tc_config_ingress_rule(nic, node); 1660 } 1661 } 1662 EXPORT_SYMBOL(otx2_tc_apply_ingress_police_rules); 1663