1 /* 2 * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. 3 * 4 * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <net/tc_act/tc_mirred.h> 36 #include <net/tc_act/tc_pedit.h> 37 #include <net/tc_act/tc_gact.h> 38 #include <net/tc_act/tc_vlan.h> 39 40 #include "cxgb4.h" 41 #include "cxgb4_tc_flower.h" 42 43 #define STATS_CHECK_PERIOD (HZ / 2) 44 45 struct ch_tc_pedit_fields pedits[] = { 46 PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0), 47 PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4), 48 PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0), 49 PEDIT_FIELDS(ETH_, SMAC_47_16, 4, smac, 2), 50 PEDIT_FIELDS(IP4_, SRC, 4, nat_fip, 0), 51 PEDIT_FIELDS(IP4_, DST, 4, nat_lip, 0), 52 PEDIT_FIELDS(IP6_, SRC_31_0, 4, nat_fip, 0), 53 PEDIT_FIELDS(IP6_, SRC_63_32, 4, nat_fip, 4), 54 PEDIT_FIELDS(IP6_, SRC_95_64, 4, nat_fip, 8), 55 PEDIT_FIELDS(IP6_, SRC_127_96, 4, nat_fip, 12), 56 PEDIT_FIELDS(IP6_, DST_31_0, 4, nat_lip, 0), 57 PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4), 58 PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8), 59 PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12), 60 PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0), 61 PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0), 62 PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0), 63 PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0), 64 }; 65 66 static struct ch_tc_flower_entry *allocate_flower_entry(void) 67 { 68 struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL); 69 spin_lock_init(&new->lock); 70 return new; 71 } 72 73 /* Must be called with either RTNL or rcu_read_lock */ 74 static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap, 75 unsigned long flower_cookie) 76 { 77 struct ch_tc_flower_entry *flower_entry; 78 79 hash_for_each_possible_rcu(adap->flower_anymatch_tbl, flower_entry, 80 link, flower_cookie) 81 if (flower_entry->tc_flower_cookie == flower_cookie) 82 return flower_entry; 83 return NULL; 84 } 85 86 static void cxgb4_process_flow_match(struct net_device *dev, 87 struct tc_cls_flower_offload *cls, 88 struct ch_filter_specification *fs) 89 { 90 u16 addr_type = 0; 91 92 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { 93 struct flow_dissector_key_control *key = 94 skb_flow_dissector_target(cls->dissector, 95 FLOW_DISSECTOR_KEY_CONTROL, 96 cls->key); 97 98 addr_type = key->addr_type; 99 } 100 101 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) { 102 struct flow_dissector_key_basic *key = 103 skb_flow_dissector_target(cls->dissector, 104 FLOW_DISSECTOR_KEY_BASIC, 105 cls->key); 106 struct flow_dissector_key_basic *mask = 107 skb_flow_dissector_target(cls->dissector, 108 FLOW_DISSECTOR_KEY_BASIC, 109 cls->mask); 110 u16 ethtype_key = ntohs(key->n_proto); 111 u16 ethtype_mask = ntohs(mask->n_proto); 112 113 if (ethtype_key == ETH_P_ALL) { 114 ethtype_key = 0; 115 ethtype_mask = 0; 116 } 117 118 fs->val.ethtype = ethtype_key; 119 fs->mask.ethtype = ethtype_mask; 120 fs->val.proto = key->ip_proto; 121 fs->mask.proto = mask->ip_proto; 122 } 123 124 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 125 struct flow_dissector_key_ipv4_addrs *key = 126 skb_flow_dissector_target(cls->dissector, 127 FLOW_DISSECTOR_KEY_IPV4_ADDRS, 128 cls->key); 129 struct flow_dissector_key_ipv4_addrs *mask = 130 skb_flow_dissector_target(cls->dissector, 131 FLOW_DISSECTOR_KEY_IPV4_ADDRS, 132 cls->mask); 133 fs->type = 0; 134 memcpy(&fs->val.lip[0], &key->dst, sizeof(key->dst)); 135 memcpy(&fs->val.fip[0], &key->src, sizeof(key->src)); 136 memcpy(&fs->mask.lip[0], &mask->dst, sizeof(mask->dst)); 137 memcpy(&fs->mask.fip[0], &mask->src, sizeof(mask->src)); 138 139 /* also initialize nat_lip/fip to same values */ 140 memcpy(&fs->nat_lip[0], &key->dst, sizeof(key->dst)); 141 memcpy(&fs->nat_fip[0], &key->src, sizeof(key->src)); 142 143 } 144 145 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 146 struct flow_dissector_key_ipv6_addrs *key = 147 skb_flow_dissector_target(cls->dissector, 148 FLOW_DISSECTOR_KEY_IPV6_ADDRS, 149 cls->key); 150 struct flow_dissector_key_ipv6_addrs *mask = 151 skb_flow_dissector_target(cls->dissector, 152 FLOW_DISSECTOR_KEY_IPV6_ADDRS, 153 cls->mask); 154 155 fs->type = 1; 156 memcpy(&fs->val.lip[0], key->dst.s6_addr, sizeof(key->dst)); 157 memcpy(&fs->val.fip[0], key->src.s6_addr, sizeof(key->src)); 158 memcpy(&fs->mask.lip[0], mask->dst.s6_addr, sizeof(mask->dst)); 159 memcpy(&fs->mask.fip[0], mask->src.s6_addr, sizeof(mask->src)); 160 161 /* also initialize nat_lip/fip to same values */ 162 memcpy(&fs->nat_lip[0], key->dst.s6_addr, sizeof(key->dst)); 163 memcpy(&fs->nat_fip[0], key->src.s6_addr, sizeof(key->src)); 164 } 165 166 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_PORTS)) { 167 struct flow_dissector_key_ports *key, *mask; 168 169 key = skb_flow_dissector_target(cls->dissector, 170 FLOW_DISSECTOR_KEY_PORTS, 171 cls->key); 172 mask = skb_flow_dissector_target(cls->dissector, 173 FLOW_DISSECTOR_KEY_PORTS, 174 cls->mask); 175 fs->val.lport = cpu_to_be16(key->dst); 176 fs->mask.lport = cpu_to_be16(mask->dst); 177 fs->val.fport = cpu_to_be16(key->src); 178 fs->mask.fport = cpu_to_be16(mask->src); 179 180 /* also initialize nat_lport/fport to same values */ 181 fs->nat_lport = cpu_to_be16(key->dst); 182 fs->nat_fport = cpu_to_be16(key->src); 183 } 184 185 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) { 186 struct flow_dissector_key_ip *key, *mask; 187 188 key = skb_flow_dissector_target(cls->dissector, 189 FLOW_DISSECTOR_KEY_IP, 190 cls->key); 191 mask = skb_flow_dissector_target(cls->dissector, 192 FLOW_DISSECTOR_KEY_IP, 193 cls->mask); 194 fs->val.tos = key->tos; 195 fs->mask.tos = mask->tos; 196 } 197 198 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_VLAN)) { 199 struct flow_dissector_key_vlan *key, *mask; 200 u16 vlan_tci, vlan_tci_mask; 201 202 key = skb_flow_dissector_target(cls->dissector, 203 FLOW_DISSECTOR_KEY_VLAN, 204 cls->key); 205 mask = skb_flow_dissector_target(cls->dissector, 206 FLOW_DISSECTOR_KEY_VLAN, 207 cls->mask); 208 vlan_tci = key->vlan_id | (key->vlan_priority << 209 VLAN_PRIO_SHIFT); 210 vlan_tci_mask = mask->vlan_id | (mask->vlan_priority << 211 VLAN_PRIO_SHIFT); 212 fs->val.ivlan = cpu_to_be16(vlan_tci); 213 fs->mask.ivlan = cpu_to_be16(vlan_tci_mask); 214 215 /* Chelsio adapters use ivlan_vld bit to match vlan packets 216 * as 802.1Q. Also, when vlan tag is present in packets, 217 * ethtype match is used then to match on ethtype of inner 218 * header ie. the header following the vlan header. 219 * So, set the ivlan_vld based on ethtype info supplied by 220 * TC for vlan packets if its 802.1Q. And then reset the 221 * ethtype value else, hw will try to match the supplied 222 * ethtype value with ethtype of inner header. 223 */ 224 if (fs->val.ethtype == ETH_P_8021Q) { 225 fs->val.ivlan_vld = 1; 226 fs->mask.ivlan_vld = 1; 227 fs->val.ethtype = 0; 228 fs->mask.ethtype = 0; 229 } 230 } 231 232 /* Match only packets coming from the ingress port where this 233 * filter will be created. 234 */ 235 fs->val.iport = netdev2pinfo(dev)->port_id; 236 fs->mask.iport = ~0; 237 } 238 239 static int cxgb4_validate_flow_match(struct net_device *dev, 240 struct tc_cls_flower_offload *cls) 241 { 242 u16 ethtype_mask = 0; 243 u16 ethtype_key = 0; 244 245 if (cls->dissector->used_keys & 246 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 247 BIT(FLOW_DISSECTOR_KEY_BASIC) | 248 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 249 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 250 BIT(FLOW_DISSECTOR_KEY_PORTS) | 251 BIT(FLOW_DISSECTOR_KEY_VLAN) | 252 BIT(FLOW_DISSECTOR_KEY_IP))) { 253 netdev_warn(dev, "Unsupported key used: 0x%x\n", 254 cls->dissector->used_keys); 255 return -EOPNOTSUPP; 256 } 257 258 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) { 259 struct flow_dissector_key_basic *key = 260 skb_flow_dissector_target(cls->dissector, 261 FLOW_DISSECTOR_KEY_BASIC, 262 cls->key); 263 struct flow_dissector_key_basic *mask = 264 skb_flow_dissector_target(cls->dissector, 265 FLOW_DISSECTOR_KEY_BASIC, 266 cls->mask); 267 ethtype_key = ntohs(key->n_proto); 268 ethtype_mask = ntohs(mask->n_proto); 269 } 270 271 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) { 272 u16 eth_ip_type = ethtype_key & ethtype_mask; 273 struct flow_dissector_key_ip *mask; 274 275 if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) { 276 netdev_err(dev, "IP Key supported only with IPv4/v6"); 277 return -EINVAL; 278 } 279 280 mask = skb_flow_dissector_target(cls->dissector, 281 FLOW_DISSECTOR_KEY_IP, 282 cls->mask); 283 if (mask->ttl) { 284 netdev_warn(dev, "ttl match unsupported for offload"); 285 return -EOPNOTSUPP; 286 } 287 } 288 289 return 0; 290 } 291 292 static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask, 293 u8 field) 294 { 295 u32 set_val = val & ~mask; 296 u32 offset = 0; 297 u8 size = 1; 298 int i; 299 300 for (i = 0; i < ARRAY_SIZE(pedits); i++) { 301 if (pedits[i].field == field) { 302 offset = pedits[i].offset; 303 size = pedits[i].size; 304 break; 305 } 306 } 307 memcpy((u8 *)fs + offset, &set_val, size); 308 } 309 310 static void process_pedit_field(struct ch_filter_specification *fs, u32 val, 311 u32 mask, u32 offset, u8 htype) 312 { 313 switch (htype) { 314 case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: 315 switch (offset) { 316 case PEDIT_ETH_DMAC_31_0: 317 fs->newdmac = 1; 318 offload_pedit(fs, val, mask, ETH_DMAC_31_0); 319 break; 320 case PEDIT_ETH_DMAC_47_32_SMAC_15_0: 321 if (~mask & PEDIT_ETH_DMAC_MASK) 322 offload_pedit(fs, val, mask, ETH_DMAC_47_32); 323 else 324 offload_pedit(fs, val >> 16, mask >> 16, 325 ETH_SMAC_15_0); 326 break; 327 case PEDIT_ETH_SMAC_47_16: 328 fs->newsmac = 1; 329 offload_pedit(fs, val, mask, ETH_SMAC_47_16); 330 } 331 break; 332 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: 333 switch (offset) { 334 case PEDIT_IP4_SRC: 335 offload_pedit(fs, val, mask, IP4_SRC); 336 break; 337 case PEDIT_IP4_DST: 338 offload_pedit(fs, val, mask, IP4_DST); 339 } 340 fs->nat_mode = NAT_MODE_ALL; 341 break; 342 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: 343 switch (offset) { 344 case PEDIT_IP6_SRC_31_0: 345 offload_pedit(fs, val, mask, IP6_SRC_31_0); 346 break; 347 case PEDIT_IP6_SRC_63_32: 348 offload_pedit(fs, val, mask, IP6_SRC_63_32); 349 break; 350 case PEDIT_IP6_SRC_95_64: 351 offload_pedit(fs, val, mask, IP6_SRC_95_64); 352 break; 353 case PEDIT_IP6_SRC_127_96: 354 offload_pedit(fs, val, mask, IP6_SRC_127_96); 355 break; 356 case PEDIT_IP6_DST_31_0: 357 offload_pedit(fs, val, mask, IP6_DST_31_0); 358 break; 359 case PEDIT_IP6_DST_63_32: 360 offload_pedit(fs, val, mask, IP6_DST_63_32); 361 break; 362 case PEDIT_IP6_DST_95_64: 363 offload_pedit(fs, val, mask, IP6_DST_95_64); 364 break; 365 case PEDIT_IP6_DST_127_96: 366 offload_pedit(fs, val, mask, IP6_DST_127_96); 367 } 368 fs->nat_mode = NAT_MODE_ALL; 369 break; 370 case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: 371 switch (offset) { 372 case PEDIT_TCP_SPORT_DPORT: 373 if (~mask & PEDIT_TCP_UDP_SPORT_MASK) 374 offload_pedit(fs, cpu_to_be32(val) >> 16, 375 cpu_to_be32(mask) >> 16, 376 TCP_SPORT); 377 else 378 offload_pedit(fs, cpu_to_be32(val), 379 cpu_to_be32(mask), TCP_DPORT); 380 } 381 fs->nat_mode = NAT_MODE_ALL; 382 break; 383 case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: 384 switch (offset) { 385 case PEDIT_UDP_SPORT_DPORT: 386 if (~mask & PEDIT_TCP_UDP_SPORT_MASK) 387 offload_pedit(fs, cpu_to_be32(val) >> 16, 388 cpu_to_be32(mask) >> 16, 389 UDP_SPORT); 390 else 391 offload_pedit(fs, cpu_to_be32(val), 392 cpu_to_be32(mask), UDP_DPORT); 393 } 394 fs->nat_mode = NAT_MODE_ALL; 395 } 396 } 397 398 static void cxgb4_process_flow_actions(struct net_device *in, 399 struct tc_cls_flower_offload *cls, 400 struct ch_filter_specification *fs) 401 { 402 const struct tc_action *a; 403 LIST_HEAD(actions); 404 405 tcf_exts_to_list(cls->exts, &actions); 406 list_for_each_entry(a, &actions, list) { 407 if (is_tcf_gact_ok(a)) { 408 fs->action = FILTER_PASS; 409 } else if (is_tcf_gact_shot(a)) { 410 fs->action = FILTER_DROP; 411 } else if (is_tcf_mirred_egress_redirect(a)) { 412 int ifindex = tcf_mirred_ifindex(a); 413 struct net_device *out = __dev_get_by_index(dev_net(in), 414 ifindex); 415 struct port_info *pi = netdev_priv(out); 416 417 fs->action = FILTER_SWITCH; 418 fs->eport = pi->port_id; 419 } else if (is_tcf_vlan(a)) { 420 u32 vlan_action = tcf_vlan_action(a); 421 u8 prio = tcf_vlan_push_prio(a); 422 u16 vid = tcf_vlan_push_vid(a); 423 u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid; 424 425 switch (vlan_action) { 426 case TCA_VLAN_ACT_POP: 427 fs->newvlan |= VLAN_REMOVE; 428 break; 429 case TCA_VLAN_ACT_PUSH: 430 fs->newvlan |= VLAN_INSERT; 431 fs->vlan = vlan_tci; 432 break; 433 case TCA_VLAN_ACT_MODIFY: 434 fs->newvlan |= VLAN_REWRITE; 435 fs->vlan = vlan_tci; 436 break; 437 default: 438 break; 439 } 440 } else if (is_tcf_pedit(a)) { 441 u32 mask, val, offset; 442 int nkeys, i; 443 u8 htype; 444 445 nkeys = tcf_pedit_nkeys(a); 446 for (i = 0; i < nkeys; i++) { 447 htype = tcf_pedit_htype(a, i); 448 mask = tcf_pedit_mask(a, i); 449 val = tcf_pedit_val(a, i); 450 offset = tcf_pedit_offset(a, i); 451 452 process_pedit_field(fs, val, mask, offset, 453 htype); 454 } 455 } 456 } 457 } 458 459 static bool valid_l4_mask(u32 mask) 460 { 461 u16 hi, lo; 462 463 /* Either the upper 16-bits (SPORT) OR the lower 464 * 16-bits (DPORT) can be set, but NOT BOTH. 465 */ 466 hi = (mask >> 16) & 0xFFFF; 467 lo = mask & 0xFFFF; 468 469 return hi && lo ? false : true; 470 } 471 472 static bool valid_pedit_action(struct net_device *dev, 473 const struct tc_action *a) 474 { 475 u32 mask, offset; 476 u8 cmd, htype; 477 int nkeys, i; 478 479 nkeys = tcf_pedit_nkeys(a); 480 for (i = 0; i < nkeys; i++) { 481 htype = tcf_pedit_htype(a, i); 482 cmd = tcf_pedit_cmd(a, i); 483 mask = tcf_pedit_mask(a, i); 484 offset = tcf_pedit_offset(a, i); 485 486 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET) { 487 netdev_err(dev, "%s: Unsupported pedit cmd\n", 488 __func__); 489 return false; 490 } 491 492 switch (htype) { 493 case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: 494 switch (offset) { 495 case PEDIT_ETH_DMAC_31_0: 496 case PEDIT_ETH_DMAC_47_32_SMAC_15_0: 497 case PEDIT_ETH_SMAC_47_16: 498 break; 499 default: 500 netdev_err(dev, "%s: Unsupported pedit field\n", 501 __func__); 502 return false; 503 } 504 break; 505 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: 506 switch (offset) { 507 case PEDIT_IP4_SRC: 508 case PEDIT_IP4_DST: 509 break; 510 default: 511 netdev_err(dev, "%s: Unsupported pedit field\n", 512 __func__); 513 return false; 514 } 515 break; 516 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: 517 switch (offset) { 518 case PEDIT_IP6_SRC_31_0: 519 case PEDIT_IP6_SRC_63_32: 520 case PEDIT_IP6_SRC_95_64: 521 case PEDIT_IP6_SRC_127_96: 522 case PEDIT_IP6_DST_31_0: 523 case PEDIT_IP6_DST_63_32: 524 case PEDIT_IP6_DST_95_64: 525 case PEDIT_IP6_DST_127_96: 526 break; 527 default: 528 netdev_err(dev, "%s: Unsupported pedit field\n", 529 __func__); 530 return false; 531 } 532 break; 533 case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: 534 switch (offset) { 535 case PEDIT_TCP_SPORT_DPORT: 536 if (!valid_l4_mask(~mask)) { 537 netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n", 538 __func__); 539 return false; 540 } 541 break; 542 default: 543 netdev_err(dev, "%s: Unsupported pedit field\n", 544 __func__); 545 return false; 546 } 547 break; 548 case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: 549 switch (offset) { 550 case PEDIT_UDP_SPORT_DPORT: 551 if (!valid_l4_mask(~mask)) { 552 netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n", 553 __func__); 554 return false; 555 } 556 break; 557 default: 558 netdev_err(dev, "%s: Unsupported pedit field\n", 559 __func__); 560 return false; 561 } 562 break; 563 default: 564 netdev_err(dev, "%s: Unsupported pedit type\n", 565 __func__); 566 return false; 567 } 568 } 569 return true; 570 } 571 572 static int cxgb4_validate_flow_actions(struct net_device *dev, 573 struct tc_cls_flower_offload *cls) 574 { 575 const struct tc_action *a; 576 bool act_redir = false; 577 bool act_pedit = false; 578 bool act_vlan = false; 579 LIST_HEAD(actions); 580 581 tcf_exts_to_list(cls->exts, &actions); 582 list_for_each_entry(a, &actions, list) { 583 if (is_tcf_gact_ok(a)) { 584 /* Do nothing */ 585 } else if (is_tcf_gact_shot(a)) { 586 /* Do nothing */ 587 } else if (is_tcf_mirred_egress_redirect(a)) { 588 struct adapter *adap = netdev2adap(dev); 589 struct net_device *n_dev; 590 unsigned int i, ifindex; 591 bool found = false; 592 593 ifindex = tcf_mirred_ifindex(a); 594 for_each_port(adap, i) { 595 n_dev = adap->port[i]; 596 if (ifindex == n_dev->ifindex) { 597 found = true; 598 break; 599 } 600 } 601 602 /* If interface doesn't belong to our hw, then 603 * the provided output port is not valid 604 */ 605 if (!found) { 606 netdev_err(dev, "%s: Out port invalid\n", 607 __func__); 608 return -EINVAL; 609 } 610 act_redir = true; 611 } else if (is_tcf_vlan(a)) { 612 u16 proto = be16_to_cpu(tcf_vlan_push_proto(a)); 613 u32 vlan_action = tcf_vlan_action(a); 614 615 switch (vlan_action) { 616 case TCA_VLAN_ACT_POP: 617 break; 618 case TCA_VLAN_ACT_PUSH: 619 case TCA_VLAN_ACT_MODIFY: 620 if (proto != ETH_P_8021Q) { 621 netdev_err(dev, "%s: Unsupported vlan proto\n", 622 __func__); 623 return -EOPNOTSUPP; 624 } 625 break; 626 default: 627 netdev_err(dev, "%s: Unsupported vlan action\n", 628 __func__); 629 return -EOPNOTSUPP; 630 } 631 act_vlan = true; 632 } else if (is_tcf_pedit(a)) { 633 bool pedit_valid = valid_pedit_action(dev, a); 634 635 if (!pedit_valid) 636 return -EOPNOTSUPP; 637 act_pedit = true; 638 } else { 639 netdev_err(dev, "%s: Unsupported action\n", __func__); 640 return -EOPNOTSUPP; 641 } 642 } 643 644 if ((act_pedit || act_vlan) && !act_redir) { 645 netdev_err(dev, "%s: pedit/vlan rewrite invalid without egress redirect\n", 646 __func__); 647 return -EINVAL; 648 } 649 650 return 0; 651 } 652 653 int cxgb4_tc_flower_replace(struct net_device *dev, 654 struct tc_cls_flower_offload *cls) 655 { 656 struct adapter *adap = netdev2adap(dev); 657 struct ch_tc_flower_entry *ch_flower; 658 struct ch_filter_specification *fs; 659 struct filter_ctx ctx; 660 int fidx; 661 int ret; 662 663 if (cxgb4_validate_flow_actions(dev, cls)) 664 return -EOPNOTSUPP; 665 666 if (cxgb4_validate_flow_match(dev, cls)) 667 return -EOPNOTSUPP; 668 669 ch_flower = allocate_flower_entry(); 670 if (!ch_flower) { 671 netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__); 672 return -ENOMEM; 673 } 674 675 fs = &ch_flower->fs; 676 fs->hitcnts = 1; 677 cxgb4_process_flow_match(dev, cls, fs); 678 cxgb4_process_flow_actions(dev, cls, fs); 679 680 fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET); 681 if (fidx < 0) { 682 netdev_err(dev, "%s: No fidx for offload.\n", __func__); 683 ret = -ENOMEM; 684 goto free_entry; 685 } 686 687 init_completion(&ctx.completion); 688 ret = __cxgb4_set_filter(dev, fidx, fs, &ctx); 689 if (ret) { 690 netdev_err(dev, "%s: filter creation err %d\n", 691 __func__, ret); 692 goto free_entry; 693 } 694 695 /* Wait for reply */ 696 ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ); 697 if (!ret) { 698 ret = -ETIMEDOUT; 699 goto free_entry; 700 } 701 702 ret = ctx.result; 703 /* Check if hw returned error for filter creation */ 704 if (ret) { 705 netdev_err(dev, "%s: filter creation err %d\n", 706 __func__, ret); 707 goto free_entry; 708 } 709 710 INIT_HLIST_NODE(&ch_flower->link); 711 ch_flower->tc_flower_cookie = cls->cookie; 712 ch_flower->filter_id = ctx.tid; 713 hash_add_rcu(adap->flower_anymatch_tbl, &ch_flower->link, cls->cookie); 714 715 return ret; 716 717 free_entry: 718 kfree(ch_flower); 719 return ret; 720 } 721 722 int cxgb4_tc_flower_destroy(struct net_device *dev, 723 struct tc_cls_flower_offload *cls) 724 { 725 struct adapter *adap = netdev2adap(dev); 726 struct ch_tc_flower_entry *ch_flower; 727 int ret; 728 729 ch_flower = ch_flower_lookup(adap, cls->cookie); 730 if (!ch_flower) 731 return -ENOENT; 732 733 ret = cxgb4_del_filter(dev, ch_flower->filter_id); 734 if (ret) 735 goto err; 736 737 hash_del_rcu(&ch_flower->link); 738 kfree_rcu(ch_flower, rcu); 739 740 err: 741 return ret; 742 } 743 744 static void ch_flower_stats_cb(struct timer_list *t) 745 { 746 struct adapter *adap = from_timer(adap, t, flower_stats_timer); 747 struct ch_tc_flower_entry *flower_entry; 748 struct ch_tc_flower_stats *ofld_stats; 749 unsigned int i; 750 u64 packets; 751 u64 bytes; 752 int ret; 753 754 rcu_read_lock(); 755 hash_for_each_rcu(adap->flower_anymatch_tbl, i, flower_entry, link) { 756 ret = cxgb4_get_filter_counters(adap->port[0], 757 flower_entry->filter_id, 758 &packets, &bytes); 759 if (!ret) { 760 spin_lock(&flower_entry->lock); 761 ofld_stats = &flower_entry->stats; 762 763 if (ofld_stats->prev_packet_count != packets) { 764 ofld_stats->prev_packet_count = packets; 765 ofld_stats->last_used = jiffies; 766 } 767 spin_unlock(&flower_entry->lock); 768 } 769 } 770 rcu_read_unlock(); 771 mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD); 772 } 773 774 int cxgb4_tc_flower_stats(struct net_device *dev, 775 struct tc_cls_flower_offload *cls) 776 { 777 struct adapter *adap = netdev2adap(dev); 778 struct ch_tc_flower_stats *ofld_stats; 779 struct ch_tc_flower_entry *ch_flower; 780 u64 packets; 781 u64 bytes; 782 int ret; 783 784 ch_flower = ch_flower_lookup(adap, cls->cookie); 785 if (!ch_flower) { 786 ret = -ENOENT; 787 goto err; 788 } 789 790 ret = cxgb4_get_filter_counters(dev, ch_flower->filter_id, 791 &packets, &bytes); 792 if (ret < 0) 793 goto err; 794 795 spin_lock_bh(&ch_flower->lock); 796 ofld_stats = &ch_flower->stats; 797 if (ofld_stats->packet_count != packets) { 798 if (ofld_stats->prev_packet_count != packets) 799 ofld_stats->last_used = jiffies; 800 tcf_exts_stats_update(cls->exts, bytes - ofld_stats->byte_count, 801 packets - ofld_stats->packet_count, 802 ofld_stats->last_used); 803 804 ofld_stats->packet_count = packets; 805 ofld_stats->byte_count = bytes; 806 ofld_stats->prev_packet_count = packets; 807 } 808 spin_unlock_bh(&ch_flower->lock); 809 return 0; 810 811 err: 812 return ret; 813 } 814 815 void cxgb4_init_tc_flower(struct adapter *adap) 816 { 817 hash_init(adap->flower_anymatch_tbl); 818 timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0); 819 mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD); 820 } 821 822 void cxgb4_cleanup_tc_flower(struct adapter *adap) 823 { 824 if (adap->flower_stats_timer.function) 825 del_timer_sync(&adap->flower_stats_timer); 826 } 827