1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> 4 */ 5 6 #include <linux/if_ether.h> 7 #include <linux/rhashtable.h> 8 #include <linux/ip.h> 9 #include <linux/ipv6.h> 10 #include <net/flow_offload.h> 11 #include <net/pkt_cls.h> 12 #include <net/dsa.h> 13 #include "mtk_eth_soc.h" 14 #include "mtk_wed.h" 15 16 struct mtk_flow_data { 17 struct ethhdr eth; 18 19 union { 20 struct { 21 __be32 src_addr; 22 __be32 dst_addr; 23 } v4; 24 25 struct { 26 struct in6_addr src_addr; 27 struct in6_addr dst_addr; 28 } v6; 29 }; 30 31 __be16 src_port; 32 __be16 dst_port; 33 34 u16 vlan_in; 35 36 struct { 37 struct { 38 u16 id; 39 __be16 proto; 40 } vlans[2]; 41 u8 num; 42 } vlan; 43 struct { 44 u16 sid; 45 u8 num; 46 } pppoe; 47 }; 48 49 static const struct rhashtable_params mtk_flow_ht_params = { 50 .head_offset = offsetof(struct mtk_flow_entry, node), 51 .key_offset = offsetof(struct mtk_flow_entry, cookie), 52 .key_len = sizeof(unsigned long), 53 .automatic_shrinking = true, 54 }; 55 56 static int 57 mtk_flow_set_ipv4_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe, 58 struct mtk_flow_data *data, bool egress) 59 { 60 return mtk_foe_entry_set_ipv4_tuple(eth, foe, egress, 61 data->v4.src_addr, data->src_port, 62 data->v4.dst_addr, data->dst_port); 63 } 64 65 static int 66 mtk_flow_set_ipv6_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe, 67 struct mtk_flow_data *data) 68 { 69 return mtk_foe_entry_set_ipv6_tuple(eth, foe, 70 data->v6.src_addr.s6_addr32, data->src_port, 71 data->v6.dst_addr.s6_addr32, data->dst_port); 72 } 73 74 static void 75 mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth) 76 { 77 void *dest = eth + act->mangle.offset; 78 const void *src = &act->mangle.val; 79 80 if (act->mangle.offset > 8) 81 return; 82 83 if (act->mangle.mask == 0xffff) { 84 src += 2; 85 dest += 2; 86 } 87 88 memcpy(dest, src, act->mangle.mask ? 2 : 4); 89 } 90 91 static int 92 mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info) 93 { 94 struct net_device_path_stack stack; 95 struct net_device_path *path; 96 int err; 97 98 if (!dev) 99 return -ENODEV; 100 101 if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)) 102 return -1; 103 104 err = dev_fill_forward_path(dev, addr, &stack); 105 if (err) 106 return err; 107 108 path = &stack.path[stack.num_paths - 1]; 109 if (path->type != DEV_PATH_MTK_WDMA) 110 return -1; 111 112 info->wdma_idx = path->mtk_wdma.wdma_idx; 113 info->queue = path->mtk_wdma.queue; 114 info->bss = path->mtk_wdma.bss; 115 info->wcid = path->mtk_wdma.wcid; 116 info->amsdu = path->mtk_wdma.amsdu; 117 118 return 0; 119 } 120 121 122 static int 123 mtk_flow_mangle_ports(const struct flow_action_entry *act, 124 struct mtk_flow_data *data) 125 { 126 u32 val = ntohl(act->mangle.val); 127 128 switch (act->mangle.offset) { 129 case 0: 130 if (act->mangle.mask == ~htonl(0xffff)) 131 data->dst_port = cpu_to_be16(val); 132 else 133 data->src_port = cpu_to_be16(val >> 16); 134 break; 135 case 2: 136 data->dst_port = cpu_to_be16(val); 137 break; 138 default: 139 return -EINVAL; 140 } 141 142 return 0; 143 } 144 145 static int 146 mtk_flow_mangle_ipv4(const struct flow_action_entry *act, 147 struct mtk_flow_data *data) 148 { 149 __be32 *dest; 150 151 switch (act->mangle.offset) { 152 case offsetof(struct iphdr, saddr): 153 dest = &data->v4.src_addr; 154 break; 155 case offsetof(struct iphdr, daddr): 156 dest = &data->v4.dst_addr; 157 break; 158 default: 159 return -EINVAL; 160 } 161 162 memcpy(dest, &act->mangle.val, sizeof(u32)); 163 164 return 0; 165 } 166 167 static int 168 mtk_flow_get_dsa_port(struct net_device **dev) 169 { 170 #if IS_ENABLED(CONFIG_NET_DSA) 171 struct dsa_port *dp; 172 173 dp = dsa_port_from_netdev(*dev); 174 if (IS_ERR(dp)) 175 return -ENODEV; 176 177 if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK) 178 return -ENODEV; 179 180 *dev = dsa_port_to_conduit(dp); 181 182 return dp->index; 183 #else 184 return -ENODEV; 185 #endif 186 } 187 188 static int 189 mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe, 190 struct net_device *dev, const u8 *dest_mac, 191 int *wed_index) 192 { 193 struct mtk_wdma_info info = {}; 194 int pse_port, dsa_port, queue; 195 196 if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) { 197 mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue, 198 info.bss, info.wcid, info.amsdu); 199 if (mtk_is_netsys_v2_or_greater(eth)) { 200 switch (info.wdma_idx) { 201 case 0: 202 pse_port = PSE_WDMA0_PORT; 203 break; 204 case 1: 205 pse_port = PSE_WDMA1_PORT; 206 break; 207 case 2: 208 pse_port = PSE_WDMA2_PORT; 209 break; 210 default: 211 return -EINVAL; 212 } 213 } else { 214 pse_port = 3; 215 } 216 *wed_index = info.wdma_idx; 217 goto out; 218 } 219 220 dsa_port = mtk_flow_get_dsa_port(&dev); 221 222 if (dev == eth->netdev[0]) 223 pse_port = PSE_GDM1_PORT; 224 else if (dev == eth->netdev[1]) 225 pse_port = PSE_GDM2_PORT; 226 else if (dev == eth->netdev[2]) 227 pse_port = PSE_GDM3_PORT; 228 else 229 return -EOPNOTSUPP; 230 231 if (dsa_port >= 0) { 232 mtk_foe_entry_set_dsa(eth, foe, dsa_port); 233 queue = 3 + dsa_port; 234 } else { 235 queue = pse_port - 1; 236 } 237 mtk_foe_entry_set_queue(eth, foe, queue); 238 239 out: 240 mtk_foe_entry_set_pse_port(eth, foe, pse_port); 241 242 return 0; 243 } 244 245 static int 246 mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f, 247 int ppe_index) 248 { 249 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 250 struct net_device *idev = NULL, *odev = NULL; 251 struct flow_action_entry *act; 252 struct mtk_flow_data data = {}; 253 struct mtk_foe_entry foe; 254 struct mtk_flow_entry *entry; 255 int offload_type = 0; 256 int wed_index = -1; 257 u16 addr_type = 0; 258 u8 l4proto = 0; 259 int err = 0; 260 int i; 261 262 if (rhashtable_lookup(ð->flow_table, &f->cookie, mtk_flow_ht_params)) 263 return -EEXIST; 264 265 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) { 266 struct flow_match_meta match; 267 268 flow_rule_match_meta(rule, &match); 269 if (mtk_is_netsys_v2_or_greater(eth)) { 270 idev = __dev_get_by_index(&init_net, match.key->ingress_ifindex); 271 if (idev && idev->netdev_ops == eth->netdev[0]->netdev_ops) { 272 struct mtk_mac *mac = netdev_priv(idev); 273 274 if (WARN_ON(mac->ppe_idx >= eth->soc->ppe_num)) 275 return -EINVAL; 276 277 ppe_index = mac->ppe_idx; 278 } 279 } 280 } else { 281 return -EOPNOTSUPP; 282 } 283 284 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 285 struct flow_match_control match; 286 287 flow_rule_match_control(rule, &match); 288 addr_type = match.key->addr_type; 289 290 if (flow_rule_has_control_flags(match.mask->flags, 291 f->common.extack)) 292 return -EOPNOTSUPP; 293 } else { 294 return -EOPNOTSUPP; 295 } 296 297 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 298 struct flow_match_basic match; 299 300 flow_rule_match_basic(rule, &match); 301 l4proto = match.key->ip_proto; 302 } else { 303 return -EOPNOTSUPP; 304 } 305 306 switch (addr_type) { 307 case 0: 308 offload_type = MTK_PPE_PKT_TYPE_BRIDGE; 309 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 310 struct flow_match_eth_addrs match; 311 312 flow_rule_match_eth_addrs(rule, &match); 313 memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN); 314 memcpy(data.eth.h_source, match.key->src, ETH_ALEN); 315 } else { 316 return -EOPNOTSUPP; 317 } 318 319 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 320 struct flow_match_vlan match; 321 322 flow_rule_match_vlan(rule, &match); 323 324 if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q)) 325 return -EOPNOTSUPP; 326 327 data.vlan_in = match.key->vlan_id; 328 } 329 break; 330 case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 331 offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT; 332 break; 333 case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 334 offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T; 335 break; 336 default: 337 return -EOPNOTSUPP; 338 } 339 340 flow_action_for_each(i, act, &rule->action) { 341 switch (act->id) { 342 case FLOW_ACTION_MANGLE: 343 if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) 344 return -EOPNOTSUPP; 345 if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH) 346 mtk_flow_offload_mangle_eth(act, &data.eth); 347 break; 348 case FLOW_ACTION_REDIRECT: 349 odev = act->dev; 350 break; 351 case FLOW_ACTION_CSUM: 352 break; 353 case FLOW_ACTION_VLAN_PUSH: 354 if (data.vlan.num + data.pppoe.num == 2 || 355 act->vlan.proto != htons(ETH_P_8021Q)) 356 return -EOPNOTSUPP; 357 358 data.vlan.vlans[data.vlan.num].id = act->vlan.vid; 359 data.vlan.vlans[data.vlan.num].proto = act->vlan.proto; 360 data.vlan.num++; 361 break; 362 case FLOW_ACTION_VLAN_POP: 363 break; 364 case FLOW_ACTION_PPPOE_PUSH: 365 if (data.pppoe.num == 1 || 366 data.vlan.num == 2) 367 return -EOPNOTSUPP; 368 369 data.pppoe.sid = act->pppoe.sid; 370 data.pppoe.num++; 371 break; 372 default: 373 return -EOPNOTSUPP; 374 } 375 } 376 377 if (!is_valid_ether_addr(data.eth.h_source) || 378 !is_valid_ether_addr(data.eth.h_dest)) 379 return -EINVAL; 380 381 err = mtk_foe_entry_prepare(eth, &foe, offload_type, l4proto, 0, 382 data.eth.h_source, data.eth.h_dest); 383 if (err) 384 return err; 385 386 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 387 struct flow_match_ports ports; 388 389 if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) 390 return -EOPNOTSUPP; 391 392 flow_rule_match_ports(rule, &ports); 393 data.src_port = ports.key->src; 394 data.dst_port = ports.key->dst; 395 } else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) { 396 return -EOPNOTSUPP; 397 } 398 399 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 400 struct flow_match_ipv4_addrs addrs; 401 402 flow_rule_match_ipv4_addrs(rule, &addrs); 403 404 data.v4.src_addr = addrs.key->src; 405 data.v4.dst_addr = addrs.key->dst; 406 407 mtk_flow_set_ipv4_addr(eth, &foe, &data, false); 408 } 409 410 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 411 struct flow_match_ipv6_addrs addrs; 412 413 flow_rule_match_ipv6_addrs(rule, &addrs); 414 415 data.v6.src_addr = addrs.key->src; 416 data.v6.dst_addr = addrs.key->dst; 417 418 mtk_flow_set_ipv6_addr(eth, &foe, &data); 419 } 420 421 flow_action_for_each(i, act, &rule->action) { 422 if (act->id != FLOW_ACTION_MANGLE) 423 continue; 424 425 if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) 426 return -EOPNOTSUPP; 427 428 switch (act->mangle.htype) { 429 case FLOW_ACT_MANGLE_HDR_TYPE_TCP: 430 case FLOW_ACT_MANGLE_HDR_TYPE_UDP: 431 err = mtk_flow_mangle_ports(act, &data); 432 break; 433 case FLOW_ACT_MANGLE_HDR_TYPE_IP4: 434 err = mtk_flow_mangle_ipv4(act, &data); 435 break; 436 case FLOW_ACT_MANGLE_HDR_TYPE_ETH: 437 /* handled earlier */ 438 break; 439 default: 440 return -EOPNOTSUPP; 441 } 442 443 if (err) 444 return err; 445 } 446 447 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 448 err = mtk_flow_set_ipv4_addr(eth, &foe, &data, true); 449 if (err) 450 return err; 451 } 452 453 if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) 454 foe.bridge.vlan = data.vlan_in; 455 456 for (i = 0; i < data.vlan.num; i++) 457 mtk_foe_entry_set_vlan(eth, &foe, data.vlan.vlans[i].id); 458 459 if (data.pppoe.num == 1) 460 mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid); 461 462 err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest, 463 &wed_index); 464 if (err) 465 return err; 466 467 if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0) 468 return err; 469 470 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 471 if (!entry) 472 return -ENOMEM; 473 474 entry->cookie = f->cookie; 475 memcpy(&entry->data, &foe, sizeof(entry->data)); 476 entry->wed_index = wed_index; 477 entry->ppe_index = ppe_index; 478 479 err = mtk_foe_entry_commit(eth->ppe[entry->ppe_index], entry); 480 if (err < 0) 481 goto free; 482 483 err = rhashtable_insert_fast(ð->flow_table, &entry->node, 484 mtk_flow_ht_params); 485 if (err < 0) 486 goto clear; 487 488 return 0; 489 490 clear: 491 mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry); 492 free: 493 kfree(entry); 494 if (wed_index >= 0) 495 mtk_wed_flow_remove(wed_index); 496 return err; 497 } 498 499 static int 500 mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f) 501 { 502 struct mtk_flow_entry *entry; 503 504 entry = rhashtable_lookup(ð->flow_table, &f->cookie, 505 mtk_flow_ht_params); 506 if (!entry) 507 return -ENOENT; 508 509 mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry); 510 rhashtable_remove_fast(ð->flow_table, &entry->node, 511 mtk_flow_ht_params); 512 if (entry->wed_index >= 0) 513 mtk_wed_flow_remove(entry->wed_index); 514 kfree(entry); 515 516 return 0; 517 } 518 519 static int 520 mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f) 521 { 522 struct mtk_flow_entry *entry; 523 struct mtk_foe_accounting diff; 524 u32 idle; 525 526 entry = rhashtable_lookup(ð->flow_table, &f->cookie, 527 mtk_flow_ht_params); 528 if (!entry) 529 return -ENOENT; 530 531 idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry); 532 f->stats.lastused = jiffies - idle * HZ; 533 534 if (entry->hash != 0xFFFF && 535 mtk_foe_entry_get_mib(eth->ppe[entry->ppe_index], entry->hash, 536 &diff)) { 537 f->stats.pkts += diff.packets; 538 f->stats.bytes += diff.bytes; 539 } 540 541 return 0; 542 } 543 544 static DEFINE_MUTEX(mtk_flow_offload_mutex); 545 546 int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls, 547 int ppe_index) 548 { 549 int err; 550 551 mutex_lock(&mtk_flow_offload_mutex); 552 switch (cls->command) { 553 case FLOW_CLS_REPLACE: 554 err = mtk_flow_offload_replace(eth, cls, ppe_index); 555 break; 556 case FLOW_CLS_DESTROY: 557 err = mtk_flow_offload_destroy(eth, cls); 558 break; 559 case FLOW_CLS_STATS: 560 err = mtk_flow_offload_stats(eth, cls); 561 break; 562 default: 563 err = -EOPNOTSUPP; 564 break; 565 } 566 mutex_unlock(&mtk_flow_offload_mutex); 567 568 return err; 569 } 570 571 static int 572 mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) 573 { 574 struct flow_cls_offload *cls = type_data; 575 struct net_device *dev = cb_priv; 576 struct mtk_mac *mac; 577 struct mtk_eth *eth; 578 579 mac = netdev_priv(dev); 580 eth = mac->hw; 581 582 if (!tc_can_offload(dev)) 583 return -EOPNOTSUPP; 584 585 if (type != TC_SETUP_CLSFLOWER) 586 return -EOPNOTSUPP; 587 588 return mtk_flow_offload_cmd(eth, cls, 0); 589 } 590 591 static int 592 mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f) 593 { 594 struct mtk_mac *mac = netdev_priv(dev); 595 struct mtk_eth *eth = mac->hw; 596 static LIST_HEAD(block_cb_list); 597 struct flow_block_cb *block_cb; 598 flow_setup_cb_t *cb; 599 600 if (!eth->soc->offload_version) 601 return -EOPNOTSUPP; 602 603 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 604 return -EOPNOTSUPP; 605 606 cb = mtk_eth_setup_tc_block_cb; 607 f->driver_block_list = &block_cb_list; 608 609 switch (f->command) { 610 case FLOW_BLOCK_BIND: 611 block_cb = flow_block_cb_lookup(f->block, cb, dev); 612 if (block_cb) { 613 flow_block_cb_incref(block_cb); 614 return 0; 615 } 616 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL); 617 if (IS_ERR(block_cb)) 618 return PTR_ERR(block_cb); 619 620 flow_block_cb_incref(block_cb); 621 flow_block_cb_add(block_cb, f); 622 list_add_tail(&block_cb->driver_list, &block_cb_list); 623 return 0; 624 case FLOW_BLOCK_UNBIND: 625 block_cb = flow_block_cb_lookup(f->block, cb, dev); 626 if (!block_cb) 627 return -ENOENT; 628 629 if (!flow_block_cb_decref(block_cb)) { 630 flow_block_cb_remove(block_cb, f); 631 list_del(&block_cb->driver_list); 632 } 633 return 0; 634 default: 635 return -EOPNOTSUPP; 636 } 637 } 638 639 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, 640 void *type_data) 641 { 642 switch (type) { 643 case TC_SETUP_BLOCK: 644 case TC_SETUP_FT: 645 return mtk_eth_setup_tc_block(dev, type_data); 646 default: 647 return -EOPNOTSUPP; 648 } 649 } 650 651 int mtk_eth_offload_init(struct mtk_eth *eth, u8 id) 652 { 653 if (!eth->ppe[id] || !eth->ppe[id]->foe_table) 654 return 0; 655 return rhashtable_init(ð->flow_table, &mtk_flow_ht_params); 656 } 657