1 /* 2 * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. 3 * 4 * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <net/tc_act/tc_mirred.h> 36 #include <net/tc_act/tc_pedit.h> 37 #include <net/tc_act/tc_gact.h> 38 #include <net/tc_act/tc_vlan.h> 39 40 #include "cxgb4.h" 41 #include "cxgb4_filter.h" 42 #include "cxgb4_tc_flower.h" 43 44 #define STATS_CHECK_PERIOD (HZ / 2) 45 46 struct ch_tc_pedit_fields pedits[] = { 47 PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0), 48 PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4), 49 PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0), 50 PEDIT_FIELDS(ETH_, SMAC_47_16, 4, smac, 2), 51 PEDIT_FIELDS(IP4_, SRC, 4, nat_fip, 0), 52 PEDIT_FIELDS(IP4_, DST, 4, nat_lip, 0), 53 PEDIT_FIELDS(IP6_, SRC_31_0, 4, nat_fip, 0), 54 PEDIT_FIELDS(IP6_, SRC_63_32, 4, nat_fip, 4), 55 PEDIT_FIELDS(IP6_, SRC_95_64, 4, nat_fip, 8), 56 PEDIT_FIELDS(IP6_, SRC_127_96, 4, nat_fip, 12), 57 PEDIT_FIELDS(IP6_, DST_31_0, 4, nat_lip, 0), 58 PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4), 59 PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8), 60 PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12), 61 PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0), 62 PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0), 63 PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0), 64 PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0), 65 }; 66 67 static struct ch_tc_flower_entry *allocate_flower_entry(void) 68 { 69 struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL); 70 spin_lock_init(&new->lock); 71 return new; 72 } 73 74 /* Must be called with either RTNL or rcu_read_lock */ 75 static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap, 76 unsigned long flower_cookie) 77 { 78 return rhashtable_lookup_fast(&adap->flower_tbl, &flower_cookie, 79 adap->flower_ht_params); 80 } 81 82 static void cxgb4_process_flow_match(struct net_device *dev, 83 struct tc_cls_flower_offload *cls, 84 struct ch_filter_specification *fs) 85 { 86 u16 addr_type = 0; 87 88 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { 89 struct flow_dissector_key_control *key = 90 skb_flow_dissector_target(cls->dissector, 91 FLOW_DISSECTOR_KEY_CONTROL, 92 cls->key); 93 94 addr_type = key->addr_type; 95 } 96 97 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) { 98 struct flow_dissector_key_basic *key = 99 skb_flow_dissector_target(cls->dissector, 100 FLOW_DISSECTOR_KEY_BASIC, 101 cls->key); 102 struct flow_dissector_key_basic *mask = 103 skb_flow_dissector_target(cls->dissector, 104 FLOW_DISSECTOR_KEY_BASIC, 105 cls->mask); 106 u16 ethtype_key = ntohs(key->n_proto); 107 u16 ethtype_mask = ntohs(mask->n_proto); 108 109 if (ethtype_key == ETH_P_ALL) { 110 ethtype_key = 0; 111 ethtype_mask = 0; 112 } 113 114 if (ethtype_key == ETH_P_IPV6) 115 fs->type = 1; 116 117 fs->val.ethtype = ethtype_key; 118 fs->mask.ethtype = ethtype_mask; 119 fs->val.proto = key->ip_proto; 120 fs->mask.proto = mask->ip_proto; 121 } 122 123 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 124 struct flow_dissector_key_ipv4_addrs *key = 125 skb_flow_dissector_target(cls->dissector, 126 FLOW_DISSECTOR_KEY_IPV4_ADDRS, 127 cls->key); 128 struct flow_dissector_key_ipv4_addrs *mask = 129 skb_flow_dissector_target(cls->dissector, 130 FLOW_DISSECTOR_KEY_IPV4_ADDRS, 131 cls->mask); 132 fs->type = 0; 133 memcpy(&fs->val.lip[0], &key->dst, sizeof(key->dst)); 134 memcpy(&fs->val.fip[0], &key->src, sizeof(key->src)); 135 memcpy(&fs->mask.lip[0], &mask->dst, sizeof(mask->dst)); 136 memcpy(&fs->mask.fip[0], &mask->src, sizeof(mask->src)); 137 138 /* also initialize nat_lip/fip to same values */ 139 memcpy(&fs->nat_lip[0], &key->dst, sizeof(key->dst)); 140 memcpy(&fs->nat_fip[0], &key->src, sizeof(key->src)); 141 142 } 143 144 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 145 struct flow_dissector_key_ipv6_addrs *key = 146 skb_flow_dissector_target(cls->dissector, 147 FLOW_DISSECTOR_KEY_IPV6_ADDRS, 148 cls->key); 149 struct flow_dissector_key_ipv6_addrs *mask = 150 skb_flow_dissector_target(cls->dissector, 151 FLOW_DISSECTOR_KEY_IPV6_ADDRS, 152 cls->mask); 153 154 fs->type = 1; 155 memcpy(&fs->val.lip[0], key->dst.s6_addr, sizeof(key->dst)); 156 memcpy(&fs->val.fip[0], key->src.s6_addr, sizeof(key->src)); 157 memcpy(&fs->mask.lip[0], mask->dst.s6_addr, sizeof(mask->dst)); 158 memcpy(&fs->mask.fip[0], mask->src.s6_addr, sizeof(mask->src)); 159 160 /* also initialize nat_lip/fip to same values */ 161 memcpy(&fs->nat_lip[0], key->dst.s6_addr, sizeof(key->dst)); 162 memcpy(&fs->nat_fip[0], key->src.s6_addr, sizeof(key->src)); 163 } 164 165 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_PORTS)) { 166 struct flow_dissector_key_ports *key, *mask; 167 168 key = skb_flow_dissector_target(cls->dissector, 169 FLOW_DISSECTOR_KEY_PORTS, 170 cls->key); 171 mask = skb_flow_dissector_target(cls->dissector, 172 FLOW_DISSECTOR_KEY_PORTS, 173 cls->mask); 174 fs->val.lport = cpu_to_be16(key->dst); 175 fs->mask.lport = cpu_to_be16(mask->dst); 176 fs->val.fport = cpu_to_be16(key->src); 177 fs->mask.fport = cpu_to_be16(mask->src); 178 179 /* also initialize nat_lport/fport to same values */ 180 fs->nat_lport = cpu_to_be16(key->dst); 181 fs->nat_fport = cpu_to_be16(key->src); 182 } 183 184 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) { 185 struct flow_dissector_key_ip *key, *mask; 186 187 key = skb_flow_dissector_target(cls->dissector, 188 FLOW_DISSECTOR_KEY_IP, 189 cls->key); 190 mask = skb_flow_dissector_target(cls->dissector, 191 FLOW_DISSECTOR_KEY_IP, 192 cls->mask); 193 fs->val.tos = key->tos; 194 fs->mask.tos = mask->tos; 195 } 196 197 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_VLAN)) { 198 struct flow_dissector_key_vlan *key, *mask; 199 u16 vlan_tci, vlan_tci_mask; 200 201 key = skb_flow_dissector_target(cls->dissector, 202 FLOW_DISSECTOR_KEY_VLAN, 203 cls->key); 204 mask = skb_flow_dissector_target(cls->dissector, 205 FLOW_DISSECTOR_KEY_VLAN, 206 cls->mask); 207 vlan_tci = key->vlan_id | (key->vlan_priority << 208 VLAN_PRIO_SHIFT); 209 vlan_tci_mask = mask->vlan_id | (mask->vlan_priority << 210 VLAN_PRIO_SHIFT); 211 fs->val.ivlan = vlan_tci; 212 fs->mask.ivlan = vlan_tci_mask; 213 214 /* Chelsio adapters use ivlan_vld bit to match vlan packets 215 * as 802.1Q. Also, when vlan tag is present in packets, 216 * ethtype match is used then to match on ethtype of inner 217 * header ie. the header following the vlan header. 218 * So, set the ivlan_vld based on ethtype info supplied by 219 * TC for vlan packets if its 802.1Q. And then reset the 220 * ethtype value else, hw will try to match the supplied 221 * ethtype value with ethtype of inner header. 222 */ 223 if (fs->val.ethtype == ETH_P_8021Q) { 224 fs->val.ivlan_vld = 1; 225 fs->mask.ivlan_vld = 1; 226 fs->val.ethtype = 0; 227 fs->mask.ethtype = 0; 228 } 229 } 230 231 /* Match only packets coming from the ingress port where this 232 * filter will be created. 233 */ 234 fs->val.iport = netdev2pinfo(dev)->port_id; 235 fs->mask.iport = ~0; 236 } 237 238 static int cxgb4_validate_flow_match(struct net_device *dev, 239 struct tc_cls_flower_offload *cls) 240 { 241 u16 ethtype_mask = 0; 242 u16 ethtype_key = 0; 243 244 if (cls->dissector->used_keys & 245 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 246 BIT(FLOW_DISSECTOR_KEY_BASIC) | 247 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 248 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 249 BIT(FLOW_DISSECTOR_KEY_PORTS) | 250 BIT(FLOW_DISSECTOR_KEY_VLAN) | 251 BIT(FLOW_DISSECTOR_KEY_IP))) { 252 netdev_warn(dev, "Unsupported key used: 0x%x\n", 253 cls->dissector->used_keys); 254 return -EOPNOTSUPP; 255 } 256 257 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) { 258 struct flow_dissector_key_basic *key = 259 skb_flow_dissector_target(cls->dissector, 260 FLOW_DISSECTOR_KEY_BASIC, 261 cls->key); 262 struct flow_dissector_key_basic *mask = 263 skb_flow_dissector_target(cls->dissector, 264 FLOW_DISSECTOR_KEY_BASIC, 265 cls->mask); 266 ethtype_key = ntohs(key->n_proto); 267 ethtype_mask = ntohs(mask->n_proto); 268 } 269 270 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) { 271 u16 eth_ip_type = ethtype_key & ethtype_mask; 272 struct flow_dissector_key_ip *mask; 273 274 if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) { 275 netdev_err(dev, "IP Key supported only with IPv4/v6"); 276 return -EINVAL; 277 } 278 279 mask = skb_flow_dissector_target(cls->dissector, 280 FLOW_DISSECTOR_KEY_IP, 281 cls->mask); 282 if (mask->ttl) { 283 netdev_warn(dev, "ttl match unsupported for offload"); 284 return -EOPNOTSUPP; 285 } 286 } 287 288 return 0; 289 } 290 291 static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask, 292 u8 field) 293 { 294 u32 set_val = val & ~mask; 295 u32 offset = 0; 296 u8 size = 1; 297 int i; 298 299 for (i = 0; i < ARRAY_SIZE(pedits); i++) { 300 if (pedits[i].field == field) { 301 offset = pedits[i].offset; 302 size = pedits[i].size; 303 break; 304 } 305 } 306 memcpy((u8 *)fs + offset, &set_val, size); 307 } 308 309 static void process_pedit_field(struct ch_filter_specification *fs, u32 val, 310 u32 mask, u32 offset, u8 htype) 311 { 312 switch (htype) { 313 case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: 314 switch (offset) { 315 case PEDIT_ETH_DMAC_31_0: 316 fs->newdmac = 1; 317 offload_pedit(fs, val, mask, ETH_DMAC_31_0); 318 break; 319 case PEDIT_ETH_DMAC_47_32_SMAC_15_0: 320 if (~mask & PEDIT_ETH_DMAC_MASK) 321 offload_pedit(fs, val, mask, ETH_DMAC_47_32); 322 else 323 offload_pedit(fs, val >> 16, mask >> 16, 324 ETH_SMAC_15_0); 325 break; 326 case PEDIT_ETH_SMAC_47_16: 327 fs->newsmac = 1; 328 offload_pedit(fs, val, mask, ETH_SMAC_47_16); 329 } 330 break; 331 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: 332 switch (offset) { 333 case PEDIT_IP4_SRC: 334 offload_pedit(fs, val, mask, IP4_SRC); 335 break; 336 case PEDIT_IP4_DST: 337 offload_pedit(fs, val, mask, IP4_DST); 338 } 339 fs->nat_mode = NAT_MODE_ALL; 340 break; 341 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: 342 switch (offset) { 343 case PEDIT_IP6_SRC_31_0: 344 offload_pedit(fs, val, mask, IP6_SRC_31_0); 345 break; 346 case PEDIT_IP6_SRC_63_32: 347 offload_pedit(fs, val, mask, IP6_SRC_63_32); 348 break; 349 case PEDIT_IP6_SRC_95_64: 350 offload_pedit(fs, val, mask, IP6_SRC_95_64); 351 break; 352 case PEDIT_IP6_SRC_127_96: 353 offload_pedit(fs, val, mask, IP6_SRC_127_96); 354 break; 355 case PEDIT_IP6_DST_31_0: 356 offload_pedit(fs, val, mask, IP6_DST_31_0); 357 break; 358 case PEDIT_IP6_DST_63_32: 359 offload_pedit(fs, val, mask, IP6_DST_63_32); 360 break; 361 case PEDIT_IP6_DST_95_64: 362 offload_pedit(fs, val, mask, IP6_DST_95_64); 363 break; 364 case PEDIT_IP6_DST_127_96: 365 offload_pedit(fs, val, mask, IP6_DST_127_96); 366 } 367 fs->nat_mode = NAT_MODE_ALL; 368 break; 369 case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: 370 switch (offset) { 371 case PEDIT_TCP_SPORT_DPORT: 372 if (~mask & PEDIT_TCP_UDP_SPORT_MASK) 373 offload_pedit(fs, cpu_to_be32(val) >> 16, 374 cpu_to_be32(mask) >> 16, 375 TCP_SPORT); 376 else 377 offload_pedit(fs, cpu_to_be32(val), 378 cpu_to_be32(mask), TCP_DPORT); 379 } 380 fs->nat_mode = NAT_MODE_ALL; 381 break; 382 case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: 383 switch (offset) { 384 case PEDIT_UDP_SPORT_DPORT: 385 if (~mask & PEDIT_TCP_UDP_SPORT_MASK) 386 offload_pedit(fs, cpu_to_be32(val) >> 16, 387 cpu_to_be32(mask) >> 16, 388 UDP_SPORT); 389 else 390 offload_pedit(fs, cpu_to_be32(val), 391 cpu_to_be32(mask), UDP_DPORT); 392 } 393 fs->nat_mode = NAT_MODE_ALL; 394 } 395 } 396 397 static void cxgb4_process_flow_actions(struct net_device *in, 398 struct tc_cls_flower_offload *cls, 399 struct ch_filter_specification *fs) 400 { 401 const struct tc_action *a; 402 LIST_HEAD(actions); 403 404 tcf_exts_to_list(cls->exts, &actions); 405 list_for_each_entry(a, &actions, list) { 406 if (is_tcf_gact_ok(a)) { 407 fs->action = FILTER_PASS; 408 } else if (is_tcf_gact_shot(a)) { 409 fs->action = FILTER_DROP; 410 } else if (is_tcf_mirred_egress_redirect(a)) { 411 int ifindex = tcf_mirred_ifindex(a); 412 struct net_device *out = __dev_get_by_index(dev_net(in), 413 ifindex); 414 struct port_info *pi = netdev_priv(out); 415 416 fs->action = FILTER_SWITCH; 417 fs->eport = pi->port_id; 418 } else if (is_tcf_vlan(a)) { 419 u32 vlan_action = tcf_vlan_action(a); 420 u8 prio = tcf_vlan_push_prio(a); 421 u16 vid = tcf_vlan_push_vid(a); 422 u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid; 423 424 switch (vlan_action) { 425 case TCA_VLAN_ACT_POP: 426 fs->newvlan |= VLAN_REMOVE; 427 break; 428 case TCA_VLAN_ACT_PUSH: 429 fs->newvlan |= VLAN_INSERT; 430 fs->vlan = vlan_tci; 431 break; 432 case TCA_VLAN_ACT_MODIFY: 433 fs->newvlan |= VLAN_REWRITE; 434 fs->vlan = vlan_tci; 435 break; 436 default: 437 break; 438 } 439 } else if (is_tcf_pedit(a)) { 440 u32 mask, val, offset; 441 int nkeys, i; 442 u8 htype; 443 444 nkeys = tcf_pedit_nkeys(a); 445 for (i = 0; i < nkeys; i++) { 446 htype = tcf_pedit_htype(a, i); 447 mask = tcf_pedit_mask(a, i); 448 val = tcf_pedit_val(a, i); 449 offset = tcf_pedit_offset(a, i); 450 451 process_pedit_field(fs, val, mask, offset, 452 htype); 453 } 454 } 455 } 456 } 457 458 static bool valid_l4_mask(u32 mask) 459 { 460 u16 hi, lo; 461 462 /* Either the upper 16-bits (SPORT) OR the lower 463 * 16-bits (DPORT) can be set, but NOT BOTH. 464 */ 465 hi = (mask >> 16) & 0xFFFF; 466 lo = mask & 0xFFFF; 467 468 return hi && lo ? false : true; 469 } 470 471 static bool valid_pedit_action(struct net_device *dev, 472 const struct tc_action *a) 473 { 474 u32 mask, offset; 475 u8 cmd, htype; 476 int nkeys, i; 477 478 nkeys = tcf_pedit_nkeys(a); 479 for (i = 0; i < nkeys; i++) { 480 htype = tcf_pedit_htype(a, i); 481 cmd = tcf_pedit_cmd(a, i); 482 mask = tcf_pedit_mask(a, i); 483 offset = tcf_pedit_offset(a, i); 484 485 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET) { 486 netdev_err(dev, "%s: Unsupported pedit cmd\n", 487 __func__); 488 return false; 489 } 490 491 switch (htype) { 492 case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: 493 switch (offset) { 494 case PEDIT_ETH_DMAC_31_0: 495 case PEDIT_ETH_DMAC_47_32_SMAC_15_0: 496 case PEDIT_ETH_SMAC_47_16: 497 break; 498 default: 499 netdev_err(dev, "%s: Unsupported pedit field\n", 500 __func__); 501 return false; 502 } 503 break; 504 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: 505 switch (offset) { 506 case PEDIT_IP4_SRC: 507 case PEDIT_IP4_DST: 508 break; 509 default: 510 netdev_err(dev, "%s: Unsupported pedit field\n", 511 __func__); 512 return false; 513 } 514 break; 515 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: 516 switch (offset) { 517 case PEDIT_IP6_SRC_31_0: 518 case PEDIT_IP6_SRC_63_32: 519 case PEDIT_IP6_SRC_95_64: 520 case PEDIT_IP6_SRC_127_96: 521 case PEDIT_IP6_DST_31_0: 522 case PEDIT_IP6_DST_63_32: 523 case PEDIT_IP6_DST_95_64: 524 case PEDIT_IP6_DST_127_96: 525 break; 526 default: 527 netdev_err(dev, "%s: Unsupported pedit field\n", 528 __func__); 529 return false; 530 } 531 break; 532 case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: 533 switch (offset) { 534 case PEDIT_TCP_SPORT_DPORT: 535 if (!valid_l4_mask(~mask)) { 536 netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n", 537 __func__); 538 return false; 539 } 540 break; 541 default: 542 netdev_err(dev, "%s: Unsupported pedit field\n", 543 __func__); 544 return false; 545 } 546 break; 547 case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: 548 switch (offset) { 549 case PEDIT_UDP_SPORT_DPORT: 550 if (!valid_l4_mask(~mask)) { 551 netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n", 552 __func__); 553 return false; 554 } 555 break; 556 default: 557 netdev_err(dev, "%s: Unsupported pedit field\n", 558 __func__); 559 return false; 560 } 561 break; 562 default: 563 netdev_err(dev, "%s: Unsupported pedit type\n", 564 __func__); 565 return false; 566 } 567 } 568 return true; 569 } 570 571 static int cxgb4_validate_flow_actions(struct net_device *dev, 572 struct tc_cls_flower_offload *cls) 573 { 574 const struct tc_action *a; 575 bool act_redir = false; 576 bool act_pedit = false; 577 bool act_vlan = false; 578 LIST_HEAD(actions); 579 580 tcf_exts_to_list(cls->exts, &actions); 581 list_for_each_entry(a, &actions, list) { 582 if (is_tcf_gact_ok(a)) { 583 /* Do nothing */ 584 } else if (is_tcf_gact_shot(a)) { 585 /* Do nothing */ 586 } else if (is_tcf_mirred_egress_redirect(a)) { 587 struct adapter *adap = netdev2adap(dev); 588 struct net_device *n_dev; 589 unsigned int i, ifindex; 590 bool found = false; 591 592 ifindex = tcf_mirred_ifindex(a); 593 for_each_port(adap, i) { 594 n_dev = adap->port[i]; 595 if (ifindex == n_dev->ifindex) { 596 found = true; 597 break; 598 } 599 } 600 601 /* If interface doesn't belong to our hw, then 602 * the provided output port is not valid 603 */ 604 if (!found) { 605 netdev_err(dev, "%s: Out port invalid\n", 606 __func__); 607 return -EINVAL; 608 } 609 act_redir = true; 610 } else if (is_tcf_vlan(a)) { 611 u16 proto = be16_to_cpu(tcf_vlan_push_proto(a)); 612 u32 vlan_action = tcf_vlan_action(a); 613 614 switch (vlan_action) { 615 case TCA_VLAN_ACT_POP: 616 break; 617 case TCA_VLAN_ACT_PUSH: 618 case TCA_VLAN_ACT_MODIFY: 619 if (proto != ETH_P_8021Q) { 620 netdev_err(dev, "%s: Unsupported vlan proto\n", 621 __func__); 622 return -EOPNOTSUPP; 623 } 624 break; 625 default: 626 netdev_err(dev, "%s: Unsupported vlan action\n", 627 __func__); 628 return -EOPNOTSUPP; 629 } 630 act_vlan = true; 631 } else if (is_tcf_pedit(a)) { 632 bool pedit_valid = valid_pedit_action(dev, a); 633 634 if (!pedit_valid) 635 return -EOPNOTSUPP; 636 act_pedit = true; 637 } else { 638 netdev_err(dev, "%s: Unsupported action\n", __func__); 639 return -EOPNOTSUPP; 640 } 641 } 642 643 if ((act_pedit || act_vlan) && !act_redir) { 644 netdev_err(dev, "%s: pedit/vlan rewrite invalid without egress redirect\n", 645 __func__); 646 return -EINVAL; 647 } 648 649 return 0; 650 } 651 652 int cxgb4_tc_flower_replace(struct net_device *dev, 653 struct tc_cls_flower_offload *cls) 654 { 655 struct adapter *adap = netdev2adap(dev); 656 struct ch_tc_flower_entry *ch_flower; 657 struct ch_filter_specification *fs; 658 struct filter_ctx ctx; 659 int fidx; 660 int ret; 661 662 if (cxgb4_validate_flow_actions(dev, cls)) 663 return -EOPNOTSUPP; 664 665 if (cxgb4_validate_flow_match(dev, cls)) 666 return -EOPNOTSUPP; 667 668 ch_flower = allocate_flower_entry(); 669 if (!ch_flower) { 670 netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__); 671 return -ENOMEM; 672 } 673 674 fs = &ch_flower->fs; 675 fs->hitcnts = 1; 676 cxgb4_process_flow_match(dev, cls, fs); 677 cxgb4_process_flow_actions(dev, cls, fs); 678 679 fs->hash = is_filter_exact_match(adap, fs); 680 if (fs->hash) { 681 fidx = 0; 682 } else { 683 fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET); 684 if (fidx < 0) { 685 netdev_err(dev, "%s: No fidx for offload.\n", __func__); 686 ret = -ENOMEM; 687 goto free_entry; 688 } 689 } 690 691 init_completion(&ctx.completion); 692 ret = __cxgb4_set_filter(dev, fidx, fs, &ctx); 693 if (ret) { 694 netdev_err(dev, "%s: filter creation err %d\n", 695 __func__, ret); 696 goto free_entry; 697 } 698 699 /* Wait for reply */ 700 ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ); 701 if (!ret) { 702 ret = -ETIMEDOUT; 703 goto free_entry; 704 } 705 706 ret = ctx.result; 707 /* Check if hw returned error for filter creation */ 708 if (ret) { 709 netdev_err(dev, "%s: filter creation err %d\n", 710 __func__, ret); 711 goto free_entry; 712 } 713 714 ch_flower->tc_flower_cookie = cls->cookie; 715 ch_flower->filter_id = ctx.tid; 716 ret = rhashtable_insert_fast(&adap->flower_tbl, &ch_flower->node, 717 adap->flower_ht_params); 718 if (ret) 719 goto del_filter; 720 721 return 0; 722 723 del_filter: 724 cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs); 725 726 free_entry: 727 kfree(ch_flower); 728 return ret; 729 } 730 731 int cxgb4_tc_flower_destroy(struct net_device *dev, 732 struct tc_cls_flower_offload *cls) 733 { 734 struct adapter *adap = netdev2adap(dev); 735 struct ch_tc_flower_entry *ch_flower; 736 int ret; 737 738 ch_flower = ch_flower_lookup(adap, cls->cookie); 739 if (!ch_flower) 740 return -ENOENT; 741 742 ret = cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs); 743 if (ret) 744 goto err; 745 746 ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node, 747 adap->flower_ht_params); 748 if (ret) { 749 netdev_err(dev, "Flow remove from rhashtable failed"); 750 goto err; 751 } 752 kfree_rcu(ch_flower, rcu); 753 754 err: 755 return ret; 756 } 757 758 static void ch_flower_stats_handler(struct work_struct *work) 759 { 760 struct adapter *adap = container_of(work, struct adapter, 761 flower_stats_work); 762 struct ch_tc_flower_entry *flower_entry; 763 struct ch_tc_flower_stats *ofld_stats; 764 struct rhashtable_iter iter; 765 u64 packets; 766 u64 bytes; 767 int ret; 768 769 rhashtable_walk_enter(&adap->flower_tbl, &iter); 770 do { 771 flower_entry = ERR_PTR(rhashtable_walk_start(&iter)); 772 if (IS_ERR(flower_entry)) 773 goto walk_stop; 774 775 while ((flower_entry = rhashtable_walk_next(&iter)) && 776 !IS_ERR(flower_entry)) { 777 ret = cxgb4_get_filter_counters(adap->port[0], 778 flower_entry->filter_id, 779 &packets, &bytes, 780 flower_entry->fs.hash); 781 if (!ret) { 782 spin_lock(&flower_entry->lock); 783 ofld_stats = &flower_entry->stats; 784 785 if (ofld_stats->prev_packet_count != packets) { 786 ofld_stats->prev_packet_count = packets; 787 ofld_stats->last_used = jiffies; 788 } 789 spin_unlock(&flower_entry->lock); 790 } 791 } 792 walk_stop: 793 rhashtable_walk_stop(&iter); 794 } while (flower_entry == ERR_PTR(-EAGAIN)); 795 rhashtable_walk_exit(&iter); 796 mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD); 797 } 798 799 static void ch_flower_stats_cb(struct timer_list *t) 800 { 801 struct adapter *adap = from_timer(adap, t, flower_stats_timer); 802 803 schedule_work(&adap->flower_stats_work); 804 } 805 806 int cxgb4_tc_flower_stats(struct net_device *dev, 807 struct tc_cls_flower_offload *cls) 808 { 809 struct adapter *adap = netdev2adap(dev); 810 struct ch_tc_flower_stats *ofld_stats; 811 struct ch_tc_flower_entry *ch_flower; 812 u64 packets; 813 u64 bytes; 814 int ret; 815 816 ch_flower = ch_flower_lookup(adap, cls->cookie); 817 if (!ch_flower) { 818 ret = -ENOENT; 819 goto err; 820 } 821 822 ret = cxgb4_get_filter_counters(dev, ch_flower->filter_id, 823 &packets, &bytes, 824 ch_flower->fs.hash); 825 if (ret < 0) 826 goto err; 827 828 spin_lock_bh(&ch_flower->lock); 829 ofld_stats = &ch_flower->stats; 830 if (ofld_stats->packet_count != packets) { 831 if (ofld_stats->prev_packet_count != packets) 832 ofld_stats->last_used = jiffies; 833 tcf_exts_stats_update(cls->exts, bytes - ofld_stats->byte_count, 834 packets - ofld_stats->packet_count, 835 ofld_stats->last_used); 836 837 ofld_stats->packet_count = packets; 838 ofld_stats->byte_count = bytes; 839 ofld_stats->prev_packet_count = packets; 840 } 841 spin_unlock_bh(&ch_flower->lock); 842 return 0; 843 844 err: 845 return ret; 846 } 847 848 static const struct rhashtable_params cxgb4_tc_flower_ht_params = { 849 .nelem_hint = 384, 850 .head_offset = offsetof(struct ch_tc_flower_entry, node), 851 .key_offset = offsetof(struct ch_tc_flower_entry, tc_flower_cookie), 852 .key_len = sizeof(((struct ch_tc_flower_entry *)0)->tc_flower_cookie), 853 .max_size = 524288, 854 .min_size = 512, 855 .automatic_shrinking = true 856 }; 857 858 int cxgb4_init_tc_flower(struct adapter *adap) 859 { 860 int ret; 861 862 adap->flower_ht_params = cxgb4_tc_flower_ht_params; 863 ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params); 864 if (ret) 865 return ret; 866 867 INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler); 868 timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0); 869 mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD); 870 return 0; 871 } 872 873 void cxgb4_cleanup_tc_flower(struct adapter *adap) 874 { 875 if (adap->flower_stats_timer.function) 876 del_timer_sync(&adap->flower_stats_timer); 877 cancel_work_sync(&adap->flower_stats_work); 878 rhashtable_destroy(&adap->flower_tbl); 879 } 880