1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020 Felix Fietkau <nbd@nbd.name>
4 */
5
6 #include <linux/if_ether.h>
7 #include <linux/rhashtable.h>
8 #include <linux/ip.h>
9 #include <linux/ipv6.h>
10 #include <net/flow_offload.h>
11 #include <net/pkt_cls.h>
12 #include <net/dsa.h>
13 #include "mtk_eth_soc.h"
14 #include "mtk_wed.h"
15
16 struct mtk_flow_data {
17 struct ethhdr eth;
18
19 union {
20 struct {
21 __be32 src_addr;
22 __be32 dst_addr;
23 } v4;
24
25 struct {
26 struct in6_addr src_addr;
27 struct in6_addr dst_addr;
28 } v6;
29 };
30
31 __be16 src_port;
32 __be16 dst_port;
33
34 u16 vlan_in;
35
36 struct {
37 struct {
38 u16 id;
39 __be16 proto;
40 } vlans[2];
41 u8 num;
42 } vlan;
43 struct {
44 u16 sid;
45 u8 num;
46 } pppoe;
47 };
48
49 static const struct rhashtable_params mtk_flow_ht_params = {
50 .head_offset = offsetof(struct mtk_flow_entry, node),
51 .key_offset = offsetof(struct mtk_flow_entry, cookie),
52 .key_len = sizeof(unsigned long),
53 .automatic_shrinking = true,
54 };
55
56 static int
mtk_flow_set_ipv4_addr(struct mtk_eth * eth,struct mtk_foe_entry * foe,struct mtk_flow_data * data,bool egress)57 mtk_flow_set_ipv4_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
58 struct mtk_flow_data *data, bool egress)
59 {
60 return mtk_foe_entry_set_ipv4_tuple(eth, foe, egress,
61 data->v4.src_addr, data->src_port,
62 data->v4.dst_addr, data->dst_port);
63 }
64
65 static int
mtk_flow_set_ipv6_addr(struct mtk_eth * eth,struct mtk_foe_entry * foe,struct mtk_flow_data * data)66 mtk_flow_set_ipv6_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
67 struct mtk_flow_data *data)
68 {
69 return mtk_foe_entry_set_ipv6_tuple(eth, foe,
70 data->v6.src_addr.s6_addr32, data->src_port,
71 data->v6.dst_addr.s6_addr32, data->dst_port);
72 }
73
74 static void
mtk_flow_offload_mangle_eth(const struct flow_action_entry * act,void * eth)75 mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
76 {
77 void *dest = eth + act->mangle.offset;
78 const void *src = &act->mangle.val;
79
80 if (act->mangle.offset > 8)
81 return;
82
83 if (act->mangle.mask == 0xffff) {
84 src += 2;
85 dest += 2;
86 }
87
88 memcpy(dest, src, act->mangle.mask ? 2 : 4);
89 }
90
91 static int
mtk_flow_get_wdma_info(struct net_device * dev,const u8 * addr,struct mtk_wdma_info * info)92 mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info)
93 {
94 struct net_device_path_stack stack;
95 struct net_device_path *path;
96 int err;
97
98 if (!dev)
99 return -ENODEV;
100
101 if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
102 return -1;
103
104 rcu_read_lock();
105 err = dev_fill_forward_path(dev, addr, &stack);
106 rcu_read_unlock();
107 if (err)
108 return err;
109
110 path = &stack.path[stack.num_paths - 1];
111 if (path->type != DEV_PATH_MTK_WDMA)
112 return -1;
113
114 info->wdma_idx = path->mtk_wdma.wdma_idx;
115 info->queue = path->mtk_wdma.queue;
116 info->bss = path->mtk_wdma.bss;
117 info->wcid = path->mtk_wdma.wcid;
118 info->amsdu = path->mtk_wdma.amsdu;
119
120 return 0;
121 }
122
123
124 static int
mtk_flow_mangle_ports(const struct flow_action_entry * act,struct mtk_flow_data * data)125 mtk_flow_mangle_ports(const struct flow_action_entry *act,
126 struct mtk_flow_data *data)
127 {
128 u32 val = ntohl(act->mangle.val);
129
130 switch (act->mangle.offset) {
131 case 0:
132 if (act->mangle.mask == ~htonl(0xffff))
133 data->dst_port = cpu_to_be16(val);
134 else
135 data->src_port = cpu_to_be16(val >> 16);
136 break;
137 case 2:
138 data->dst_port = cpu_to_be16(val);
139 break;
140 default:
141 return -EINVAL;
142 }
143
144 return 0;
145 }
146
147 static int
mtk_flow_mangle_ipv4(const struct flow_action_entry * act,struct mtk_flow_data * data)148 mtk_flow_mangle_ipv4(const struct flow_action_entry *act,
149 struct mtk_flow_data *data)
150 {
151 __be32 *dest;
152
153 switch (act->mangle.offset) {
154 case offsetof(struct iphdr, saddr):
155 dest = &data->v4.src_addr;
156 break;
157 case offsetof(struct iphdr, daddr):
158 dest = &data->v4.dst_addr;
159 break;
160 default:
161 return -EINVAL;
162 }
163
164 memcpy(dest, &act->mangle.val, sizeof(u32));
165
166 return 0;
167 }
168
169 static int
mtk_flow_get_dsa_port(struct net_device ** dev)170 mtk_flow_get_dsa_port(struct net_device **dev)
171 {
172 #if IS_ENABLED(CONFIG_NET_DSA)
173 struct dsa_port *dp;
174
175 dp = dsa_port_from_netdev(*dev);
176 if (IS_ERR(dp))
177 return -ENODEV;
178
179 if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
180 return -ENODEV;
181
182 *dev = dsa_port_to_conduit(dp);
183
184 return dp->index;
185 #else
186 return -ENODEV;
187 #endif
188 }
189
190 static int
mtk_flow_set_output_device(struct mtk_eth * eth,struct mtk_foe_entry * foe,struct net_device * dev,const u8 * dest_mac,int * wed_index)191 mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
192 struct net_device *dev, const u8 *dest_mac,
193 int *wed_index)
194 {
195 struct mtk_wdma_info info = {};
196 int pse_port, dsa_port, queue;
197
198 if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
199 mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
200 info.bss, info.wcid, info.amsdu);
201 if (mtk_is_netsys_v2_or_greater(eth)) {
202 switch (info.wdma_idx) {
203 case 0:
204 pse_port = PSE_WDMA0_PORT;
205 break;
206 case 1:
207 pse_port = PSE_WDMA1_PORT;
208 break;
209 case 2:
210 pse_port = PSE_WDMA2_PORT;
211 break;
212 default:
213 return -EINVAL;
214 }
215 } else {
216 pse_port = 3;
217 }
218 *wed_index = info.wdma_idx;
219 goto out;
220 }
221
222 dsa_port = mtk_flow_get_dsa_port(&dev);
223
224 if (dev == eth->netdev[0])
225 pse_port = PSE_GDM1_PORT;
226 else if (dev == eth->netdev[1])
227 pse_port = PSE_GDM2_PORT;
228 else if (dev == eth->netdev[2])
229 pse_port = PSE_GDM3_PORT;
230 else
231 return -EOPNOTSUPP;
232
233 if (dsa_port >= 0) {
234 mtk_foe_entry_set_dsa(eth, foe, dsa_port);
235 queue = 3 + dsa_port;
236 } else {
237 queue = pse_port - 1;
238 }
239 mtk_foe_entry_set_queue(eth, foe, queue);
240
241 out:
242 mtk_foe_entry_set_pse_port(eth, foe, pse_port);
243
244 return 0;
245 }
246
247 static int
mtk_flow_offload_replace(struct mtk_eth * eth,struct flow_cls_offload * f,int ppe_index)248 mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
249 int ppe_index)
250 {
251 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
252 struct net_device *idev = NULL, *odev = NULL;
253 struct flow_action_entry *act;
254 struct mtk_flow_data data = {};
255 struct mtk_foe_entry foe;
256 struct mtk_flow_entry *entry;
257 int offload_type = 0;
258 int wed_index = -1;
259 u16 addr_type = 0;
260 u8 l4proto = 0;
261 int err = 0;
262 int i;
263
264 if (rhashtable_lookup(ð->flow_table, &f->cookie, mtk_flow_ht_params))
265 return -EEXIST;
266
267 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
268 struct flow_match_meta match;
269
270 flow_rule_match_meta(rule, &match);
271 if (mtk_is_netsys_v2_or_greater(eth)) {
272 idev = __dev_get_by_index(&init_net, match.key->ingress_ifindex);
273 if (idev && idev->netdev_ops == eth->netdev[0]->netdev_ops) {
274 struct mtk_mac *mac = netdev_priv(idev);
275
276 if (WARN_ON(mac->ppe_idx >= eth->soc->ppe_num))
277 return -EINVAL;
278
279 ppe_index = mac->ppe_idx;
280 }
281 }
282 } else {
283 return -EOPNOTSUPP;
284 }
285
286 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
287 struct flow_match_control match;
288
289 flow_rule_match_control(rule, &match);
290 addr_type = match.key->addr_type;
291
292 if (flow_rule_has_control_flags(match.mask->flags,
293 f->common.extack))
294 return -EOPNOTSUPP;
295 } else {
296 return -EOPNOTSUPP;
297 }
298
299 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
300 struct flow_match_basic match;
301
302 flow_rule_match_basic(rule, &match);
303 l4proto = match.key->ip_proto;
304 } else {
305 return -EOPNOTSUPP;
306 }
307
308 switch (addr_type) {
309 case 0:
310 offload_type = MTK_PPE_PKT_TYPE_BRIDGE;
311 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
312 struct flow_match_eth_addrs match;
313
314 flow_rule_match_eth_addrs(rule, &match);
315 memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
316 memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
317 } else {
318 return -EOPNOTSUPP;
319 }
320
321 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
322 struct flow_match_vlan match;
323
324 flow_rule_match_vlan(rule, &match);
325
326 if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q))
327 return -EOPNOTSUPP;
328
329 data.vlan_in = match.key->vlan_id;
330 }
331 break;
332 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
333 offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
334 break;
335 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
336 offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
337 break;
338 default:
339 return -EOPNOTSUPP;
340 }
341
342 flow_action_for_each(i, act, &rule->action) {
343 switch (act->id) {
344 case FLOW_ACTION_MANGLE:
345 if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
346 return -EOPNOTSUPP;
347 if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
348 mtk_flow_offload_mangle_eth(act, &data.eth);
349 break;
350 case FLOW_ACTION_REDIRECT:
351 odev = act->dev;
352 break;
353 case FLOW_ACTION_CSUM:
354 break;
355 case FLOW_ACTION_VLAN_PUSH:
356 if (data.vlan.num + data.pppoe.num == 2 ||
357 act->vlan.proto != htons(ETH_P_8021Q))
358 return -EOPNOTSUPP;
359
360 data.vlan.vlans[data.vlan.num].id = act->vlan.vid;
361 data.vlan.vlans[data.vlan.num].proto = act->vlan.proto;
362 data.vlan.num++;
363 break;
364 case FLOW_ACTION_VLAN_POP:
365 break;
366 case FLOW_ACTION_PPPOE_PUSH:
367 if (data.pppoe.num == 1 ||
368 data.vlan.num == 2)
369 return -EOPNOTSUPP;
370
371 data.pppoe.sid = act->pppoe.sid;
372 data.pppoe.num++;
373 break;
374 default:
375 return -EOPNOTSUPP;
376 }
377 }
378
379 if (!is_valid_ether_addr(data.eth.h_source) ||
380 !is_valid_ether_addr(data.eth.h_dest))
381 return -EINVAL;
382
383 err = mtk_foe_entry_prepare(eth, &foe, offload_type, l4proto, 0,
384 data.eth.h_source, data.eth.h_dest);
385 if (err)
386 return err;
387
388 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
389 struct flow_match_ports ports;
390
391 if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
392 return -EOPNOTSUPP;
393
394 flow_rule_match_ports(rule, &ports);
395 data.src_port = ports.key->src;
396 data.dst_port = ports.key->dst;
397 } else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) {
398 return -EOPNOTSUPP;
399 }
400
401 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
402 struct flow_match_ipv4_addrs addrs;
403
404 flow_rule_match_ipv4_addrs(rule, &addrs);
405
406 data.v4.src_addr = addrs.key->src;
407 data.v4.dst_addr = addrs.key->dst;
408
409 mtk_flow_set_ipv4_addr(eth, &foe, &data, false);
410 }
411
412 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
413 struct flow_match_ipv6_addrs addrs;
414
415 flow_rule_match_ipv6_addrs(rule, &addrs);
416
417 data.v6.src_addr = addrs.key->src;
418 data.v6.dst_addr = addrs.key->dst;
419
420 mtk_flow_set_ipv6_addr(eth, &foe, &data);
421 }
422
423 flow_action_for_each(i, act, &rule->action) {
424 if (act->id != FLOW_ACTION_MANGLE)
425 continue;
426
427 if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
428 return -EOPNOTSUPP;
429
430 switch (act->mangle.htype) {
431 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
432 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
433 err = mtk_flow_mangle_ports(act, &data);
434 break;
435 case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
436 err = mtk_flow_mangle_ipv4(act, &data);
437 break;
438 case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
439 /* handled earlier */
440 break;
441 default:
442 return -EOPNOTSUPP;
443 }
444
445 if (err)
446 return err;
447 }
448
449 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
450 err = mtk_flow_set_ipv4_addr(eth, &foe, &data, true);
451 if (err)
452 return err;
453 }
454
455 if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
456 foe.bridge.vlan = data.vlan_in;
457
458 for (i = 0; i < data.vlan.num; i++)
459 mtk_foe_entry_set_vlan(eth, &foe, data.vlan.vlans[i].id);
460
461 if (data.pppoe.num == 1)
462 mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid);
463
464 err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
465 &wed_index);
466 if (err)
467 return err;
468
469 if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
470 return err;
471
472 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
473 if (!entry)
474 return -ENOMEM;
475
476 entry->cookie = f->cookie;
477 memcpy(&entry->data, &foe, sizeof(entry->data));
478 entry->wed_index = wed_index;
479 entry->ppe_index = ppe_index;
480
481 err = mtk_foe_entry_commit(eth->ppe[entry->ppe_index], entry);
482 if (err < 0)
483 goto free;
484
485 err = rhashtable_insert_fast(ð->flow_table, &entry->node,
486 mtk_flow_ht_params);
487 if (err < 0)
488 goto clear;
489
490 return 0;
491
492 clear:
493 mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
494 free:
495 kfree(entry);
496 if (wed_index >= 0)
497 mtk_wed_flow_remove(wed_index);
498 return err;
499 }
500
501 static int
mtk_flow_offload_destroy(struct mtk_eth * eth,struct flow_cls_offload * f)502 mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
503 {
504 struct mtk_flow_entry *entry;
505
506 entry = rhashtable_lookup(ð->flow_table, &f->cookie,
507 mtk_flow_ht_params);
508 if (!entry)
509 return -ENOENT;
510
511 mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
512 rhashtable_remove_fast(ð->flow_table, &entry->node,
513 mtk_flow_ht_params);
514 if (entry->wed_index >= 0)
515 mtk_wed_flow_remove(entry->wed_index);
516 kfree(entry);
517
518 return 0;
519 }
520
521 static int
mtk_flow_offload_stats(struct mtk_eth * eth,struct flow_cls_offload * f)522 mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
523 {
524 struct mtk_flow_entry *entry;
525 struct mtk_foe_accounting diff;
526 u32 idle;
527
528 entry = rhashtable_lookup(ð->flow_table, &f->cookie,
529 mtk_flow_ht_params);
530 if (!entry)
531 return -ENOENT;
532
533 idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry);
534 f->stats.lastused = jiffies - idle * HZ;
535
536 if (entry->hash != 0xFFFF &&
537 mtk_foe_entry_get_mib(eth->ppe[entry->ppe_index], entry->hash,
538 &diff)) {
539 f->stats.pkts += diff.packets;
540 f->stats.bytes += diff.bytes;
541 }
542
543 return 0;
544 }
545
546 static DEFINE_MUTEX(mtk_flow_offload_mutex);
547
mtk_flow_offload_cmd(struct mtk_eth * eth,struct flow_cls_offload * cls,int ppe_index)548 int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
549 int ppe_index)
550 {
551 int err;
552
553 mutex_lock(&mtk_flow_offload_mutex);
554 switch (cls->command) {
555 case FLOW_CLS_REPLACE:
556 err = mtk_flow_offload_replace(eth, cls, ppe_index);
557 break;
558 case FLOW_CLS_DESTROY:
559 err = mtk_flow_offload_destroy(eth, cls);
560 break;
561 case FLOW_CLS_STATS:
562 err = mtk_flow_offload_stats(eth, cls);
563 break;
564 default:
565 err = -EOPNOTSUPP;
566 break;
567 }
568 mutex_unlock(&mtk_flow_offload_mutex);
569
570 return err;
571 }
572
573 static int
mtk_eth_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)574 mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
575 {
576 struct flow_cls_offload *cls = type_data;
577 struct net_device *dev = cb_priv;
578 struct mtk_mac *mac;
579 struct mtk_eth *eth;
580
581 mac = netdev_priv(dev);
582 eth = mac->hw;
583
584 if (!tc_can_offload(dev))
585 return -EOPNOTSUPP;
586
587 if (type != TC_SETUP_CLSFLOWER)
588 return -EOPNOTSUPP;
589
590 return mtk_flow_offload_cmd(eth, cls, 0);
591 }
592
593 static int
mtk_eth_setup_tc_block(struct net_device * dev,struct flow_block_offload * f)594 mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
595 {
596 struct mtk_mac *mac = netdev_priv(dev);
597 struct mtk_eth *eth = mac->hw;
598 static LIST_HEAD(block_cb_list);
599 struct flow_block_cb *block_cb;
600 flow_setup_cb_t *cb;
601
602 if (!eth->soc->offload_version)
603 return -EOPNOTSUPP;
604
605 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
606 return -EOPNOTSUPP;
607
608 cb = mtk_eth_setup_tc_block_cb;
609 f->driver_block_list = &block_cb_list;
610
611 switch (f->command) {
612 case FLOW_BLOCK_BIND:
613 block_cb = flow_block_cb_lookup(f->block, cb, dev);
614 if (block_cb) {
615 flow_block_cb_incref(block_cb);
616 return 0;
617 }
618 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
619 if (IS_ERR(block_cb))
620 return PTR_ERR(block_cb);
621
622 flow_block_cb_incref(block_cb);
623 flow_block_cb_add(block_cb, f);
624 list_add_tail(&block_cb->driver_list, &block_cb_list);
625 return 0;
626 case FLOW_BLOCK_UNBIND:
627 block_cb = flow_block_cb_lookup(f->block, cb, dev);
628 if (!block_cb)
629 return -ENOENT;
630
631 if (!flow_block_cb_decref(block_cb)) {
632 flow_block_cb_remove(block_cb, f);
633 list_del(&block_cb->driver_list);
634 }
635 return 0;
636 default:
637 return -EOPNOTSUPP;
638 }
639 }
640
mtk_eth_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)641 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
642 void *type_data)
643 {
644 switch (type) {
645 case TC_SETUP_BLOCK:
646 case TC_SETUP_FT:
647 return mtk_eth_setup_tc_block(dev, type_data);
648 default:
649 return -EOPNOTSUPP;
650 }
651 }
652
mtk_eth_offload_init(struct mtk_eth * eth,u8 id)653 int mtk_eth_offload_init(struct mtk_eth *eth, u8 id)
654 {
655 if (!eth->ppe[id] || !eth->ppe[id]->foe_table)
656 return 0;
657 return rhashtable_init(ð->flow_table, &mtk_flow_ht_params);
658 }
659