xref: /linux/drivers/net/ethernet/mediatek/mtk_ppe_offload.c (revision 976ff48c2ac6e6b25b01428c9d7997bcd0fb2949)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 2020 Felix Fietkau <nbd@nbd.name>
4  */
5 
6 #include <linux/if_ether.h>
7 #include <linux/rhashtable.h>
8 #include <linux/ip.h>
9 #include <linux/ipv6.h>
10 #include <net/flow_offload.h>
11 #include <net/pkt_cls.h>
12 #include <net/dsa.h>
13 #include "mtk_eth_soc.h"
14 #include "mtk_wed.h"
15 
16 struct mtk_flow_data {
17 	struct ethhdr eth;
18 
19 	union {
20 		struct {
21 			__be32 src_addr;
22 			__be32 dst_addr;
23 		} v4;
24 
25 		struct {
26 			struct in6_addr src_addr;
27 			struct in6_addr dst_addr;
28 		} v6;
29 	};
30 
31 	__be16 src_port;
32 	__be16 dst_port;
33 
34 	u16 vlan_in;
35 
36 	struct {
37 		struct {
38 			u16 id;
39 			__be16 proto;
40 		} vlans[2];
41 		u8 num;
42 	} vlan;
43 	struct {
44 		u16 sid;
45 		u8 num;
46 	} pppoe;
47 };
48 
49 static const struct rhashtable_params mtk_flow_ht_params = {
50 	.head_offset = offsetof(struct mtk_flow_entry, node),
51 	.key_offset = offsetof(struct mtk_flow_entry, cookie),
52 	.key_len = sizeof(unsigned long),
53 	.automatic_shrinking = true,
54 };
55 
56 static int
57 mtk_flow_set_ipv4_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
58 		       struct mtk_flow_data *data, bool egress)
59 {
60 	return mtk_foe_entry_set_ipv4_tuple(eth, foe, egress,
61 					    data->v4.src_addr, data->src_port,
62 					    data->v4.dst_addr, data->dst_port);
63 }
64 
65 static int
66 mtk_flow_set_ipv6_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
67 		       struct mtk_flow_data *data)
68 {
69 	return mtk_foe_entry_set_ipv6_tuple(eth, foe,
70 					    data->v6.src_addr.s6_addr32, data->src_port,
71 					    data->v6.dst_addr.s6_addr32, data->dst_port);
72 }
73 
74 static void
75 mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
76 {
77 	void *dest = eth + act->mangle.offset;
78 	const void *src = &act->mangle.val;
79 
80 	if (act->mangle.offset > 8)
81 		return;
82 
83 	if (act->mangle.mask == 0xffff) {
84 		src += 2;
85 		dest += 2;
86 	}
87 
88 	memcpy(dest, src, act->mangle.mask ? 2 : 4);
89 }
90 
91 static int
92 mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info)
93 {
94 	struct net_device_path_stack stack;
95 	struct net_device_path *path;
96 	int err;
97 
98 	if (!dev)
99 		return -ENODEV;
100 
101 	if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
102 		return -1;
103 
104 	rcu_read_lock();
105 	err = dev_fill_forward_path(dev, addr, &stack);
106 	rcu_read_unlock();
107 	if (err)
108 		return err;
109 
110 	path = &stack.path[stack.num_paths - 1];
111 	if (path->type != DEV_PATH_MTK_WDMA)
112 		return -1;
113 
114 	info->wdma_idx = path->mtk_wdma.wdma_idx;
115 	info->queue = path->mtk_wdma.queue;
116 	info->bss = path->mtk_wdma.bss;
117 	info->wcid = path->mtk_wdma.wcid;
118 	info->amsdu = path->mtk_wdma.amsdu;
119 
120 	return 0;
121 }
122 
123 
124 static int
125 mtk_flow_mangle_ports(const struct flow_action_entry *act,
126 		      struct mtk_flow_data *data)
127 {
128 	u32 val = ntohl(act->mangle.val);
129 
130 	switch (act->mangle.offset) {
131 	case 0:
132 		if (act->mangle.mask == ~htonl(0xffff))
133 			data->dst_port = cpu_to_be16(val);
134 		else
135 			data->src_port = cpu_to_be16(val >> 16);
136 		break;
137 	case 2:
138 		data->dst_port = cpu_to_be16(val);
139 		break;
140 	default:
141 		return -EINVAL;
142 	}
143 
144 	return 0;
145 }
146 
147 static int
148 mtk_flow_mangle_ipv4(const struct flow_action_entry *act,
149 		     struct mtk_flow_data *data)
150 {
151 	__be32 *dest;
152 
153 	switch (act->mangle.offset) {
154 	case offsetof(struct iphdr, saddr):
155 		dest = &data->v4.src_addr;
156 		break;
157 	case offsetof(struct iphdr, daddr):
158 		dest = &data->v4.dst_addr;
159 		break;
160 	default:
161 		return -EINVAL;
162 	}
163 
164 	memcpy(dest, &act->mangle.val, sizeof(u32));
165 
166 	return 0;
167 }
168 
169 static int
170 mtk_flow_get_dsa_port(struct net_device **dev)
171 {
172 #if IS_ENABLED(CONFIG_NET_DSA)
173 	struct dsa_port *dp;
174 
175 	dp = dsa_port_from_netdev(*dev);
176 	if (IS_ERR(dp))
177 		return -ENODEV;
178 
179 	if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
180 		return -ENODEV;
181 
182 	*dev = dsa_port_to_conduit(dp);
183 
184 	return dp->index;
185 #else
186 	return -ENODEV;
187 #endif
188 }
189 
190 static int
191 mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
192 			   struct net_device *dev, const u8 *dest_mac,
193 			   int *wed_index)
194 {
195 	struct mtk_wdma_info info = {};
196 	int pse_port, dsa_port, queue;
197 
198 	if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
199 		mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
200 				       info.bss, info.wcid, info.amsdu);
201 		if (mtk_is_netsys_v2_or_greater(eth)) {
202 			switch (info.wdma_idx) {
203 			case 0:
204 				pse_port = PSE_WDMA0_PORT;
205 				break;
206 			case 1:
207 				pse_port = PSE_WDMA1_PORT;
208 				break;
209 			case 2:
210 				pse_port = PSE_WDMA2_PORT;
211 				break;
212 			default:
213 				return -EINVAL;
214 			}
215 		} else {
216 			pse_port = 3;
217 		}
218 		*wed_index = info.wdma_idx;
219 		goto out;
220 	}
221 
222 	dsa_port = mtk_flow_get_dsa_port(&dev);
223 
224 	if (dev == eth->netdev[0])
225 		pse_port = PSE_GDM1_PORT;
226 	else if (dev == eth->netdev[1])
227 		pse_port = PSE_GDM2_PORT;
228 	else if (dev == eth->netdev[2])
229 		pse_port = PSE_GDM3_PORT;
230 	else
231 		return -EOPNOTSUPP;
232 
233 	if (dsa_port >= 0) {
234 		mtk_foe_entry_set_dsa(eth, foe, dsa_port);
235 		queue = 3 + dsa_port;
236 	} else {
237 		queue = pse_port - 1;
238 	}
239 	mtk_foe_entry_set_queue(eth, foe, queue);
240 
241 out:
242 	mtk_foe_entry_set_pse_port(eth, foe, pse_port);
243 
244 	return 0;
245 }
246 
247 static bool
248 mtk_flow_is_valid_idev(const struct mtk_eth *eth, const struct net_device *idev)
249 {
250 	size_t i;
251 
252 	if (!idev)
253 		return false;
254 
255 	for (i = 0; i < ARRAY_SIZE(eth->netdev); i++) {
256 		if (!eth->netdev[i])
257 			continue;
258 
259 		if (idev->netdev_ops == eth->netdev[i]->netdev_ops)
260 			return true;
261 	}
262 
263 	return false;
264 }
265 
266 static int
267 mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
268 			 int ppe_index)
269 {
270 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
271 	struct net_device *idev = NULL, *odev = NULL;
272 	struct flow_action_entry *act;
273 	struct mtk_flow_data data = {};
274 	struct mtk_foe_entry foe;
275 	struct mtk_flow_entry *entry;
276 	int offload_type = 0;
277 	int wed_index = -1;
278 	u16 addr_type = 0;
279 	u8 l4proto = 0;
280 	int err = 0;
281 	int i;
282 
283 	if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
284 		return -EEXIST;
285 
286 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
287 		struct flow_match_meta match;
288 
289 		flow_rule_match_meta(rule, &match);
290 		if (mtk_is_netsys_v2_or_greater(eth)) {
291 			idev = __dev_get_by_index(&init_net, match.key->ingress_ifindex);
292 			if (mtk_flow_is_valid_idev(eth, idev)) {
293 				struct mtk_mac *mac = netdev_priv(idev);
294 
295 				if (WARN_ON(mac->ppe_idx >= eth->soc->ppe_num))
296 					return -EINVAL;
297 
298 				ppe_index = mac->ppe_idx;
299 			}
300 		}
301 	} else {
302 		return -EOPNOTSUPP;
303 	}
304 
305 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
306 		struct flow_match_control match;
307 
308 		flow_rule_match_control(rule, &match);
309 		addr_type = match.key->addr_type;
310 
311 		if (flow_rule_has_control_flags(match.mask->flags,
312 						f->common.extack))
313 			return -EOPNOTSUPP;
314 	} else {
315 		return -EOPNOTSUPP;
316 	}
317 
318 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
319 		struct flow_match_basic match;
320 
321 		flow_rule_match_basic(rule, &match);
322 		l4proto = match.key->ip_proto;
323 	} else {
324 		return -EOPNOTSUPP;
325 	}
326 
327 	switch (addr_type) {
328 	case 0:
329 		offload_type = MTK_PPE_PKT_TYPE_BRIDGE;
330 		if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
331 			struct flow_match_eth_addrs match;
332 
333 			flow_rule_match_eth_addrs(rule, &match);
334 			memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
335 			memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
336 		} else {
337 			return -EOPNOTSUPP;
338 		}
339 
340 		if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
341 			struct flow_match_vlan match;
342 
343 			flow_rule_match_vlan(rule, &match);
344 
345 			if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q))
346 				return -EOPNOTSUPP;
347 
348 			data.vlan_in = match.key->vlan_id;
349 		}
350 		break;
351 	case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
352 		offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
353 		break;
354 	case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
355 		offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
356 		break;
357 	default:
358 		return -EOPNOTSUPP;
359 	}
360 
361 	flow_action_for_each(i, act, &rule->action) {
362 		switch (act->id) {
363 		case FLOW_ACTION_MANGLE:
364 			if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
365 				return -EOPNOTSUPP;
366 			if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
367 				mtk_flow_offload_mangle_eth(act, &data.eth);
368 			break;
369 		case FLOW_ACTION_REDIRECT:
370 			odev = act->dev;
371 			break;
372 		case FLOW_ACTION_CSUM:
373 			break;
374 		case FLOW_ACTION_VLAN_PUSH:
375 			if (data.vlan.num + data.pppoe.num == 2 ||
376 			    act->vlan.proto != htons(ETH_P_8021Q))
377 				return -EOPNOTSUPP;
378 
379 			data.vlan.vlans[data.vlan.num].id = act->vlan.vid;
380 			data.vlan.vlans[data.vlan.num].proto = act->vlan.proto;
381 			data.vlan.num++;
382 			break;
383 		case FLOW_ACTION_VLAN_POP:
384 			break;
385 		case FLOW_ACTION_PPPOE_PUSH:
386 			if (data.pppoe.num == 1 ||
387 			    data.vlan.num == 2)
388 				return -EOPNOTSUPP;
389 
390 			data.pppoe.sid = act->pppoe.sid;
391 			data.pppoe.num++;
392 			break;
393 		default:
394 			return -EOPNOTSUPP;
395 		}
396 	}
397 
398 	if (!is_valid_ether_addr(data.eth.h_source) ||
399 	    !is_valid_ether_addr(data.eth.h_dest))
400 		return -EINVAL;
401 
402 	err = mtk_foe_entry_prepare(eth, &foe, offload_type, l4proto, 0,
403 				    data.eth.h_source, data.eth.h_dest);
404 	if (err)
405 		return err;
406 
407 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
408 		struct flow_match_ports ports;
409 
410 		if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
411 			return -EOPNOTSUPP;
412 
413 		flow_rule_match_ports(rule, &ports);
414 		data.src_port = ports.key->src;
415 		data.dst_port = ports.key->dst;
416 	} else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) {
417 		return -EOPNOTSUPP;
418 	}
419 
420 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
421 		struct flow_match_ipv4_addrs addrs;
422 
423 		flow_rule_match_ipv4_addrs(rule, &addrs);
424 
425 		data.v4.src_addr = addrs.key->src;
426 		data.v4.dst_addr = addrs.key->dst;
427 
428 		mtk_flow_set_ipv4_addr(eth, &foe, &data, false);
429 	}
430 
431 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
432 		struct flow_match_ipv6_addrs addrs;
433 
434 		flow_rule_match_ipv6_addrs(rule, &addrs);
435 
436 		data.v6.src_addr = addrs.key->src;
437 		data.v6.dst_addr = addrs.key->dst;
438 
439 		mtk_flow_set_ipv6_addr(eth, &foe, &data);
440 	}
441 
442 	flow_action_for_each(i, act, &rule->action) {
443 		if (act->id != FLOW_ACTION_MANGLE)
444 			continue;
445 
446 		if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
447 			return -EOPNOTSUPP;
448 
449 		switch (act->mangle.htype) {
450 		case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
451 		case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
452 			err = mtk_flow_mangle_ports(act, &data);
453 			break;
454 		case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
455 			err = mtk_flow_mangle_ipv4(act, &data);
456 			break;
457 		case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
458 			/* handled earlier */
459 			break;
460 		default:
461 			return -EOPNOTSUPP;
462 		}
463 
464 		if (err)
465 			return err;
466 	}
467 
468 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
469 		err = mtk_flow_set_ipv4_addr(eth, &foe, &data, true);
470 		if (err)
471 			return err;
472 	}
473 
474 	if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
475 		foe.bridge.vlan = data.vlan_in;
476 
477 	for (i = 0; i < data.vlan.num; i++)
478 		mtk_foe_entry_set_vlan(eth, &foe, data.vlan.vlans[i].id);
479 
480 	if (data.pppoe.num == 1)
481 		mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid);
482 
483 	err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
484 					 &wed_index);
485 	if (err)
486 		return err;
487 
488 	if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
489 		return err;
490 
491 	entry = kzalloc_obj(*entry);
492 	if (!entry)
493 		return -ENOMEM;
494 
495 	entry->cookie = f->cookie;
496 	memcpy(&entry->data, &foe, sizeof(entry->data));
497 	entry->wed_index = wed_index;
498 	entry->ppe_index = ppe_index;
499 
500 	err = mtk_foe_entry_commit(eth->ppe[entry->ppe_index], entry);
501 	if (err < 0)
502 		goto free;
503 
504 	err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
505 				     mtk_flow_ht_params);
506 	if (err < 0)
507 		goto clear;
508 
509 	return 0;
510 
511 clear:
512 	mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
513 free:
514 	kfree(entry);
515 	if (wed_index >= 0)
516 	    mtk_wed_flow_remove(wed_index);
517 	return err;
518 }
519 
520 static int
521 mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
522 {
523 	struct mtk_flow_entry *entry;
524 
525 	entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
526 				  mtk_flow_ht_params);
527 	if (!entry)
528 		return -ENOENT;
529 
530 	mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
531 	rhashtable_remove_fast(&eth->flow_table, &entry->node,
532 			       mtk_flow_ht_params);
533 	if (entry->wed_index >= 0)
534 		mtk_wed_flow_remove(entry->wed_index);
535 	kfree(entry);
536 
537 	return 0;
538 }
539 
540 static int
541 mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
542 {
543 	struct mtk_flow_entry *entry;
544 	struct mtk_foe_accounting diff;
545 	u32 idle;
546 
547 	entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
548 				  mtk_flow_ht_params);
549 	if (!entry)
550 		return -ENOENT;
551 
552 	idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry);
553 	f->stats.lastused = jiffies - idle * HZ;
554 
555 	if (entry->hash != 0xFFFF &&
556 	    mtk_foe_entry_get_mib(eth->ppe[entry->ppe_index], entry->hash,
557 				  &diff)) {
558 		f->stats.pkts += diff.packets;
559 		f->stats.bytes += diff.bytes;
560 	}
561 
562 	return 0;
563 }
564 
565 static DEFINE_MUTEX(mtk_flow_offload_mutex);
566 
567 int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
568 			 int ppe_index)
569 {
570 	int err;
571 
572 	mutex_lock(&mtk_flow_offload_mutex);
573 	switch (cls->command) {
574 	case FLOW_CLS_REPLACE:
575 		err = mtk_flow_offload_replace(eth, cls, ppe_index);
576 		break;
577 	case FLOW_CLS_DESTROY:
578 		err = mtk_flow_offload_destroy(eth, cls);
579 		break;
580 	case FLOW_CLS_STATS:
581 		err = mtk_flow_offload_stats(eth, cls);
582 		break;
583 	default:
584 		err = -EOPNOTSUPP;
585 		break;
586 	}
587 	mutex_unlock(&mtk_flow_offload_mutex);
588 
589 	return err;
590 }
591 
592 static int
593 mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
594 {
595 	struct flow_cls_offload *cls = type_data;
596 	struct net_device *dev = cb_priv;
597 	struct mtk_mac *mac;
598 	struct mtk_eth *eth;
599 
600 	mac = netdev_priv(dev);
601 	eth = mac->hw;
602 
603 	if (!tc_can_offload(dev))
604 		return -EOPNOTSUPP;
605 
606 	if (type != TC_SETUP_CLSFLOWER)
607 		return -EOPNOTSUPP;
608 
609 	return mtk_flow_offload_cmd(eth, cls, 0);
610 }
611 
612 static int
613 mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
614 {
615 	struct mtk_mac *mac = netdev_priv(dev);
616 	struct mtk_eth *eth = mac->hw;
617 	static LIST_HEAD(block_cb_list);
618 	struct flow_block_cb *block_cb;
619 	flow_setup_cb_t *cb;
620 
621 	if (!eth->soc->offload_version)
622 		return -EOPNOTSUPP;
623 
624 	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
625 		return -EOPNOTSUPP;
626 
627 	cb = mtk_eth_setup_tc_block_cb;
628 	f->driver_block_list = &block_cb_list;
629 
630 	switch (f->command) {
631 	case FLOW_BLOCK_BIND:
632 		block_cb = flow_block_cb_lookup(f->block, cb, dev);
633 		if (block_cb) {
634 			flow_block_cb_incref(block_cb);
635 			return 0;
636 		}
637 		block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
638 		if (IS_ERR(block_cb))
639 			return PTR_ERR(block_cb);
640 
641 		flow_block_cb_incref(block_cb);
642 		flow_block_cb_add(block_cb, f);
643 		list_add_tail(&block_cb->driver_list, &block_cb_list);
644 		return 0;
645 	case FLOW_BLOCK_UNBIND:
646 		block_cb = flow_block_cb_lookup(f->block, cb, dev);
647 		if (!block_cb)
648 			return -ENOENT;
649 
650 		if (!flow_block_cb_decref(block_cb)) {
651 			flow_block_cb_remove(block_cb, f);
652 			list_del(&block_cb->driver_list);
653 		}
654 		return 0;
655 	default:
656 		return -EOPNOTSUPP;
657 	}
658 }
659 
660 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
661 		     void *type_data)
662 {
663 	switch (type) {
664 	case TC_SETUP_BLOCK:
665 	case TC_SETUP_FT:
666 		return mtk_eth_setup_tc_block(dev, type_data);
667 	default:
668 		return -EOPNOTSUPP;
669 	}
670 }
671 
672 int mtk_eth_offload_init(struct mtk_eth *eth, u8 id)
673 {
674 	if (!eth->ppe[id] || !eth->ppe[id]->foe_table)
675 		return 0;
676 	return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
677 }
678