xref: /linux/drivers/net/ethernet/mediatek/mtk_ppe.c (revision c9d23f9657cabfd2836a096bf6eddf8df2cf1434)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
3 
4 #include <linux/kernel.h>
5 #include <linux/io.h>
6 #include <linux/iopoll.h>
7 #include <linux/etherdevice.h>
8 #include <linux/platform_device.h>
9 #include <linux/if_ether.h>
10 #include <linux/if_vlan.h>
11 #include <net/dsa.h>
12 #include "mtk_eth_soc.h"
13 #include "mtk_ppe.h"
14 #include "mtk_ppe_regs.h"
15 
16 static DEFINE_SPINLOCK(ppe_lock);
17 
18 static const struct rhashtable_params mtk_flow_l2_ht_params = {
19 	.head_offset = offsetof(struct mtk_flow_entry, l2_node),
20 	.key_offset = offsetof(struct mtk_flow_entry, data.bridge),
21 	.key_len = offsetof(struct mtk_foe_bridge, key_end),
22 	.automatic_shrinking = true,
23 };
24 
25 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
26 {
27 	writel(val, ppe->base + reg);
28 }
29 
30 static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
31 {
32 	return readl(ppe->base + reg);
33 }
34 
35 static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
36 {
37 	u32 val;
38 
39 	val = ppe_r32(ppe, reg);
40 	val &= ~mask;
41 	val |= set;
42 	ppe_w32(ppe, reg, val);
43 
44 	return val;
45 }
46 
47 static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
48 {
49 	return ppe_m32(ppe, reg, 0, val);
50 }
51 
52 static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
53 {
54 	return ppe_m32(ppe, reg, val, 0);
55 }
56 
57 static u32 mtk_eth_timestamp(struct mtk_eth *eth)
58 {
59 	return mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth);
60 }
61 
62 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
63 {
64 	int ret;
65 	u32 val;
66 
67 	ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val,
68 				 !(val & MTK_PPE_GLO_CFG_BUSY),
69 				 20, MTK_PPE_WAIT_TIMEOUT_US);
70 
71 	if (ret)
72 		dev_err(ppe->dev, "PPE table busy");
73 
74 	return ret;
75 }
76 
77 static int mtk_ppe_mib_wait_busy(struct mtk_ppe *ppe)
78 {
79 	int ret;
80 	u32 val;
81 
82 	ret = readl_poll_timeout(ppe->base + MTK_PPE_MIB_SER_CR, val,
83 				 !(val & MTK_PPE_MIB_SER_CR_ST),
84 				 20, MTK_PPE_WAIT_TIMEOUT_US);
85 
86 	if (ret)
87 		dev_err(ppe->dev, "MIB table busy");
88 
89 	return ret;
90 }
91 
92 static int mtk_mib_entry_read(struct mtk_ppe *ppe, u16 index, u64 *bytes, u64 *packets)
93 {
94 	u32 byte_cnt_low, byte_cnt_high, pkt_cnt_low, pkt_cnt_high;
95 	u32 val, cnt_r0, cnt_r1, cnt_r2;
96 	int ret;
97 
98 	val = FIELD_PREP(MTK_PPE_MIB_SER_CR_ADDR, index) | MTK_PPE_MIB_SER_CR_ST;
99 	ppe_w32(ppe, MTK_PPE_MIB_SER_CR, val);
100 
101 	ret = mtk_ppe_mib_wait_busy(ppe);
102 	if (ret)
103 		return ret;
104 
105 	cnt_r0 = readl(ppe->base + MTK_PPE_MIB_SER_R0);
106 	cnt_r1 = readl(ppe->base + MTK_PPE_MIB_SER_R1);
107 	cnt_r2 = readl(ppe->base + MTK_PPE_MIB_SER_R2);
108 
109 	byte_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW, cnt_r0);
110 	byte_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH, cnt_r1);
111 	pkt_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R1_PKT_CNT_LOW, cnt_r1);
112 	pkt_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH, cnt_r2);
113 	*bytes = ((u64)byte_cnt_high << 32) | byte_cnt_low;
114 	*packets = (pkt_cnt_high << 16) | pkt_cnt_low;
115 
116 	return 0;
117 }
118 
119 static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
120 {
121 	ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
122 	ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
123 }
124 
125 static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
126 {
127 	mtk_ppe_cache_clear(ppe);
128 
129 	ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
130 		enable * MTK_PPE_CACHE_CTL_EN);
131 }
132 
133 static u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e)
134 {
135 	u32 hv1, hv2, hv3;
136 	u32 hash;
137 
138 	switch (mtk_get_ib1_pkt_type(eth, e->ib1)) {
139 		case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
140 		case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
141 			hv1 = e->ipv4.orig.ports;
142 			hv2 = e->ipv4.orig.dest_ip;
143 			hv3 = e->ipv4.orig.src_ip;
144 			break;
145 		case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
146 		case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
147 			hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
148 			hv1 ^= e->ipv6.ports;
149 
150 			hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
151 			hv2 ^= e->ipv6.dest_ip[0];
152 
153 			hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
154 			hv3 ^= e->ipv6.src_ip[0];
155 			break;
156 		case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
157 		case MTK_PPE_PKT_TYPE_IPV6_6RD:
158 		default:
159 			WARN_ON_ONCE(1);
160 			return MTK_PPE_HASH_MASK;
161 	}
162 
163 	hash = (hv1 & hv2) | ((~hv1) & hv3);
164 	hash = (hash >> 24) | ((hash & 0xffffff) << 8);
165 	hash ^= hv1 ^ hv2 ^ hv3;
166 	hash ^= hash >> 16;
167 	hash <<= (ffs(eth->soc->hash_offset) - 1);
168 	hash &= MTK_PPE_ENTRIES - 1;
169 
170 	return hash;
171 }
172 
173 static inline struct mtk_foe_mac_info *
174 mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
175 {
176 	int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
177 
178 	if (type == MTK_PPE_PKT_TYPE_BRIDGE)
179 		return &entry->bridge.l2;
180 
181 	if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
182 		return &entry->ipv6.l2;
183 
184 	return &entry->ipv4.l2;
185 }
186 
187 static inline u32 *
188 mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
189 {
190 	int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
191 
192 	if (type == MTK_PPE_PKT_TYPE_BRIDGE)
193 		return &entry->bridge.ib2;
194 
195 	if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
196 		return &entry->ipv6.ib2;
197 
198 	return &entry->ipv4.ib2;
199 }
200 
201 int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
202 			  int type, int l4proto, u8 pse_port, u8 *src_mac,
203 			  u8 *dest_mac)
204 {
205 	struct mtk_foe_mac_info *l2;
206 	u32 ports_pad, val;
207 
208 	memset(entry, 0, sizeof(*entry));
209 
210 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
211 		val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
212 		      FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
213 		      FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
214 		      MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2;
215 		entry->ib1 = val;
216 
217 		val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) |
218 		      FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf);
219 	} else {
220 		int port_mg = eth->soc->offload_version > 1 ? 0 : 0x3f;
221 
222 		val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
223 		      FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
224 		      FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
225 		      MTK_FOE_IB1_BIND_CACHE | MTK_FOE_IB1_BIND_TTL;
226 		entry->ib1 = val;
227 
228 		val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) |
229 		      FIELD_PREP(MTK_FOE_IB2_PORT_MG, port_mg) |
230 		      FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f);
231 	}
232 
233 	if (is_multicast_ether_addr(dest_mac))
234 		val |= mtk_get_ib2_multicast_mask(eth);
235 
236 	ports_pad = 0xa5a5a500 | (l4proto & 0xff);
237 	if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
238 		entry->ipv4.orig.ports = ports_pad;
239 	if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
240 		entry->ipv6.ports = ports_pad;
241 
242 	if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
243 		ether_addr_copy(entry->bridge.src_mac, src_mac);
244 		ether_addr_copy(entry->bridge.dest_mac, dest_mac);
245 		entry->bridge.ib2 = val;
246 		l2 = &entry->bridge.l2;
247 	} else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
248 		entry->ipv6.ib2 = val;
249 		l2 = &entry->ipv6.l2;
250 	} else {
251 		entry->ipv4.ib2 = val;
252 		l2 = &entry->ipv4.l2;
253 	}
254 
255 	l2->dest_mac_hi = get_unaligned_be32(dest_mac);
256 	l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
257 	l2->src_mac_hi = get_unaligned_be32(src_mac);
258 	l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
259 
260 	if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
261 		l2->etype = ETH_P_IPV6;
262 	else
263 		l2->etype = ETH_P_IP;
264 
265 	return 0;
266 }
267 
268 int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
269 			       struct mtk_foe_entry *entry, u8 port)
270 {
271 	u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
272 	u32 val = *ib2;
273 
274 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
275 		val &= ~MTK_FOE_IB2_DEST_PORT_V2;
276 		val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
277 	} else {
278 		val &= ~MTK_FOE_IB2_DEST_PORT;
279 		val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
280 	}
281 	*ib2 = val;
282 
283 	return 0;
284 }
285 
286 int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
287 				 struct mtk_foe_entry *entry, bool egress,
288 				 __be32 src_addr, __be16 src_port,
289 				 __be32 dest_addr, __be16 dest_port)
290 {
291 	int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
292 	struct mtk_ipv4_tuple *t;
293 
294 	switch (type) {
295 	case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
296 		if (egress) {
297 			t = &entry->ipv4.new;
298 			break;
299 		}
300 		fallthrough;
301 	case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
302 	case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
303 		t = &entry->ipv4.orig;
304 		break;
305 	case MTK_PPE_PKT_TYPE_IPV6_6RD:
306 		entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
307 		entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
308 		return 0;
309 	default:
310 		WARN_ON_ONCE(1);
311 		return -EINVAL;
312 	}
313 
314 	t->src_ip = be32_to_cpu(src_addr);
315 	t->dest_ip = be32_to_cpu(dest_addr);
316 
317 	if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
318 		return 0;
319 
320 	t->src_port = be16_to_cpu(src_port);
321 	t->dest_port = be16_to_cpu(dest_port);
322 
323 	return 0;
324 }
325 
326 int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
327 				 struct mtk_foe_entry *entry,
328 				 __be32 *src_addr, __be16 src_port,
329 				 __be32 *dest_addr, __be16 dest_port)
330 {
331 	int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
332 	u32 *src, *dest;
333 	int i;
334 
335 	switch (type) {
336 	case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
337 		src = entry->dslite.tunnel_src_ip;
338 		dest = entry->dslite.tunnel_dest_ip;
339 		break;
340 	case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
341 	case MTK_PPE_PKT_TYPE_IPV6_6RD:
342 		entry->ipv6.src_port = be16_to_cpu(src_port);
343 		entry->ipv6.dest_port = be16_to_cpu(dest_port);
344 		fallthrough;
345 	case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
346 		src = entry->ipv6.src_ip;
347 		dest = entry->ipv6.dest_ip;
348 		break;
349 	default:
350 		WARN_ON_ONCE(1);
351 		return -EINVAL;
352 	}
353 
354 	for (i = 0; i < 4; i++)
355 		src[i] = be32_to_cpu(src_addr[i]);
356 	for (i = 0; i < 4; i++)
357 		dest[i] = be32_to_cpu(dest_addr[i]);
358 
359 	return 0;
360 }
361 
362 int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
363 			  int port)
364 {
365 	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
366 
367 	l2->etype = BIT(port);
368 
369 	if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)))
370 		entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1);
371 	else
372 		l2->etype |= BIT(8);
373 
374 	entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth);
375 
376 	return 0;
377 }
378 
379 int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
380 			   int vid)
381 {
382 	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
383 
384 	switch (mtk_get_ib1_vlan_layer(eth, entry->ib1)) {
385 	case 0:
386 		entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) |
387 			      mtk_prep_ib1_vlan_layer(eth, 1);
388 		l2->vlan1 = vid;
389 		return 0;
390 	case 1:
391 		if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) {
392 			l2->vlan1 = vid;
393 			l2->etype |= BIT(8);
394 		} else {
395 			l2->vlan2 = vid;
396 			entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1);
397 		}
398 		return 0;
399 	default:
400 		return -ENOSPC;
401 	}
402 }
403 
404 int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
405 			    int sid)
406 {
407 	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
408 
409 	if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) ||
410 	    (entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth)))
411 		l2->etype = ETH_P_PPP_SES;
412 
413 	entry->ib1 |= mtk_get_ib1_ppoe_mask(eth);
414 	l2->pppoe_id = sid;
415 
416 	return 0;
417 }
418 
419 int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
420 			   int wdma_idx, int txq, int bss, int wcid)
421 {
422 	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
423 	u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
424 
425 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
426 		*ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
427 		*ib2 |=  FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
428 			 MTK_FOE_IB2_WDMA_WINFO_V2;
429 		l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
430 			    FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
431 	} else {
432 		*ib2 &= ~MTK_FOE_IB2_PORT_MG;
433 		*ib2 |= MTK_FOE_IB2_WDMA_WINFO;
434 		if (wdma_idx)
435 			*ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
436 		l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
437 			    FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
438 			    FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
439 	}
440 
441 	return 0;
442 }
443 
444 int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
445 			    unsigned int queue)
446 {
447 	u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
448 
449 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
450 		*ib2 &= ~MTK_FOE_IB2_QID_V2;
451 		*ib2 |= FIELD_PREP(MTK_FOE_IB2_QID_V2, queue);
452 		*ib2 |= MTK_FOE_IB2_PSE_QOS_V2;
453 	} else {
454 		*ib2 &= ~MTK_FOE_IB2_QID;
455 		*ib2 |= FIELD_PREP(MTK_FOE_IB2_QID, queue);
456 		*ib2 |= MTK_FOE_IB2_PSE_QOS;
457 	}
458 
459 	return 0;
460 }
461 
462 static bool
463 mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
464 		     struct mtk_foe_entry *data)
465 {
466 	int type, len;
467 
468 	if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
469 		return false;
470 
471 	type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
472 	if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
473 		len = offsetof(struct mtk_foe_entry, ipv6._rsv);
474 	else
475 		len = offsetof(struct mtk_foe_entry, ipv4.ib2);
476 
477 	return !memcmp(&entry->data.data, &data->data, len - 4);
478 }
479 
480 static void
481 __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
482 {
483 	struct hlist_head *head;
484 	struct hlist_node *tmp;
485 
486 	if (entry->type == MTK_FLOW_TYPE_L2) {
487 		rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
488 				       mtk_flow_l2_ht_params);
489 
490 		head = &entry->l2_flows;
491 		hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
492 			__mtk_foe_entry_clear(ppe, entry);
493 		return;
494 	}
495 
496 	hlist_del_init(&entry->list);
497 	if (entry->hash != 0xffff) {
498 		struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
499 
500 		hwe->ib1 &= ~MTK_FOE_IB1_STATE;
501 		hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
502 		dma_wmb();
503 		if (ppe->accounting) {
504 			struct mtk_foe_accounting *acct;
505 
506 			acct = ppe->acct_table + entry->hash * sizeof(*acct);
507 			acct->packets = 0;
508 			acct->bytes = 0;
509 		}
510 	}
511 	entry->hash = 0xffff;
512 
513 	if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
514 		return;
515 
516 	hlist_del_init(&entry->l2_data.list);
517 	kfree(entry);
518 }
519 
520 static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
521 {
522 	u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
523 	u16 now = mtk_eth_timestamp(ppe->eth);
524 	u16 timestamp = ib1 & ib1_ts_mask;
525 
526 	if (timestamp > now)
527 		return ib1_ts_mask + 1 - timestamp + now;
528 	else
529 		return now - timestamp;
530 }
531 
532 static void
533 mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
534 {
535 	u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
536 	struct mtk_flow_entry *cur;
537 	struct mtk_foe_entry *hwe;
538 	struct hlist_node *tmp;
539 	int idle;
540 
541 	idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
542 	hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
543 		int cur_idle;
544 		u32 ib1;
545 
546 		hwe = mtk_foe_get_entry(ppe, cur->hash);
547 		ib1 = READ_ONCE(hwe->ib1);
548 
549 		if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
550 			cur->hash = 0xffff;
551 			__mtk_foe_entry_clear(ppe, cur);
552 			continue;
553 		}
554 
555 		cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
556 		if (cur_idle >= idle)
557 			continue;
558 
559 		idle = cur_idle;
560 		entry->data.ib1 &= ~ib1_ts_mask;
561 		entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
562 	}
563 }
564 
565 static void
566 mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
567 {
568 	struct mtk_foe_entry foe = {};
569 	struct mtk_foe_entry *hwe;
570 
571 	spin_lock_bh(&ppe_lock);
572 
573 	if (entry->type == MTK_FLOW_TYPE_L2) {
574 		mtk_flow_entry_update_l2(ppe, entry);
575 		goto out;
576 	}
577 
578 	if (entry->hash == 0xffff)
579 		goto out;
580 
581 	hwe = mtk_foe_get_entry(ppe, entry->hash);
582 	memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
583 	if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
584 		entry->hash = 0xffff;
585 		goto out;
586 	}
587 
588 	entry->data.ib1 = foe.ib1;
589 
590 out:
591 	spin_unlock_bh(&ppe_lock);
592 }
593 
594 static void
595 __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
596 		       u16 hash)
597 {
598 	struct mtk_eth *eth = ppe->eth;
599 	u16 timestamp = mtk_eth_timestamp(eth);
600 	struct mtk_foe_entry *hwe;
601 
602 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
603 		entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
604 		entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
605 					 timestamp);
606 	} else {
607 		entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
608 		entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP,
609 					 timestamp);
610 	}
611 
612 	hwe = mtk_foe_get_entry(ppe, hash);
613 	memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size - sizeof(hwe->ib1));
614 	wmb();
615 	hwe->ib1 = entry->ib1;
616 
617 	if (ppe->accounting)
618 		*mtk_foe_entry_ib2(eth, hwe) |= MTK_FOE_IB2_MIB_CNT;
619 
620 	dma_wmb();
621 
622 	mtk_ppe_cache_clear(ppe);
623 }
624 
625 void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
626 {
627 	spin_lock_bh(&ppe_lock);
628 	__mtk_foe_entry_clear(ppe, entry);
629 	spin_unlock_bh(&ppe_lock);
630 }
631 
632 static int
633 mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
634 {
635 	entry->type = MTK_FLOW_TYPE_L2;
636 
637 	return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
638 				      mtk_flow_l2_ht_params);
639 }
640 
641 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
642 {
643 	const struct mtk_soc_data *soc = ppe->eth->soc;
644 	int type = mtk_get_ib1_pkt_type(ppe->eth, entry->data.ib1);
645 	u32 hash;
646 
647 	if (type == MTK_PPE_PKT_TYPE_BRIDGE)
648 		return mtk_foe_entry_commit_l2(ppe, entry);
649 
650 	hash = mtk_ppe_hash_entry(ppe->eth, &entry->data);
651 	entry->hash = 0xffff;
652 	spin_lock_bh(&ppe_lock);
653 	hlist_add_head(&entry->list, &ppe->foe_flow[hash / soc->hash_offset]);
654 	spin_unlock_bh(&ppe_lock);
655 
656 	return 0;
657 }
658 
659 static void
660 mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
661 			     u16 hash)
662 {
663 	const struct mtk_soc_data *soc = ppe->eth->soc;
664 	struct mtk_flow_entry *flow_info;
665 	struct mtk_foe_entry foe = {}, *hwe;
666 	struct mtk_foe_mac_info *l2;
667 	u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
668 	int type;
669 
670 	flow_info = kzalloc(sizeof(*flow_info), GFP_ATOMIC);
671 	if (!flow_info)
672 		return;
673 
674 	flow_info->l2_data.base_flow = entry;
675 	flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
676 	flow_info->hash = hash;
677 	hlist_add_head(&flow_info->list,
678 		       &ppe->foe_flow[hash / soc->hash_offset]);
679 	hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
680 
681 	hwe = mtk_foe_get_entry(ppe, hash);
682 	memcpy(&foe, hwe, soc->foe_entry_size);
683 	foe.ib1 &= ib1_mask;
684 	foe.ib1 |= entry->data.ib1 & ~ib1_mask;
685 
686 	l2 = mtk_foe_entry_l2(ppe->eth, &foe);
687 	memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
688 
689 	type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
690 	if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
691 		memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
692 	else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
693 		l2->etype = ETH_P_IPV6;
694 
695 	*mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
696 
697 	__mtk_foe_entry_commit(ppe, &foe, hash);
698 }
699 
700 void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
701 {
702 	const struct mtk_soc_data *soc = ppe->eth->soc;
703 	struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset];
704 	struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
705 	struct mtk_flow_entry *entry;
706 	struct mtk_foe_bridge key = {};
707 	struct hlist_node *n;
708 	struct ethhdr *eh;
709 	bool found = false;
710 	u8 *tag;
711 
712 	spin_lock_bh(&ppe_lock);
713 
714 	if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
715 		goto out;
716 
717 	hlist_for_each_entry_safe(entry, n, head, list) {
718 		if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
719 			if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
720 				     MTK_FOE_STATE_BIND))
721 				continue;
722 
723 			entry->hash = 0xffff;
724 			__mtk_foe_entry_clear(ppe, entry);
725 			continue;
726 		}
727 
728 		if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
729 			if (entry->hash != 0xffff)
730 				entry->hash = 0xffff;
731 			continue;
732 		}
733 
734 		entry->hash = hash;
735 		__mtk_foe_entry_commit(ppe, &entry->data, hash);
736 		found = true;
737 	}
738 
739 	if (found)
740 		goto out;
741 
742 	eh = eth_hdr(skb);
743 	ether_addr_copy(key.dest_mac, eh->h_dest);
744 	ether_addr_copy(key.src_mac, eh->h_source);
745 	tag = skb->data - 2;
746 	key.vlan = 0;
747 	switch (skb->protocol) {
748 #if IS_ENABLED(CONFIG_NET_DSA)
749 	case htons(ETH_P_XDSA):
750 		if (!netdev_uses_dsa(skb->dev) ||
751 		    skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
752 			goto out;
753 
754 		tag += 4;
755 		if (get_unaligned_be16(tag) != ETH_P_8021Q)
756 			break;
757 
758 		fallthrough;
759 #endif
760 	case htons(ETH_P_8021Q):
761 		key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
762 		break;
763 	default:
764 		break;
765 	}
766 
767 	entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
768 	if (!entry)
769 		goto out;
770 
771 	mtk_foe_entry_commit_subflow(ppe, entry, hash);
772 
773 out:
774 	spin_unlock_bh(&ppe_lock);
775 }
776 
777 int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
778 {
779 	mtk_flow_entry_update(ppe, entry);
780 
781 	return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
782 }
783 
784 int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
785 {
786 	if (!ppe)
787 		return -EINVAL;
788 
789 	/* disable KA */
790 	ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
791 	ppe_clear(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
792 	ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0);
793 	usleep_range(10000, 11000);
794 
795 	/* set KA timer to maximum */
796 	ppe_set(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
797 	ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0xffffffff);
798 
799 	/* set KA tick select */
800 	ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_TICK_SEL);
801 	ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
802 	usleep_range(10000, 11000);
803 
804 	/* disable scan mode */
805 	ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_SCAN_MODE);
806 	usleep_range(10000, 11000);
807 
808 	return mtk_ppe_wait_busy(ppe);
809 }
810 
811 struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index,
812 						 struct mtk_foe_accounting *diff)
813 {
814 	struct mtk_foe_accounting *acct;
815 	int size = sizeof(struct mtk_foe_accounting);
816 	u64 bytes, packets;
817 
818 	if (!ppe->accounting)
819 		return NULL;
820 
821 	if (mtk_mib_entry_read(ppe, index, &bytes, &packets))
822 		return NULL;
823 
824 	acct = ppe->acct_table + index * size;
825 
826 	acct->bytes += bytes;
827 	acct->packets += packets;
828 
829 	if (diff) {
830 		diff->bytes = bytes;
831 		diff->packets = packets;
832 	}
833 
834 	return acct;
835 }
836 
837 struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index)
838 {
839 	bool accounting = eth->soc->has_accounting;
840 	const struct mtk_soc_data *soc = eth->soc;
841 	struct mtk_foe_accounting *acct;
842 	struct device *dev = eth->dev;
843 	struct mtk_mib_entry *mib;
844 	struct mtk_ppe *ppe;
845 	u32 foe_flow_size;
846 	void *foe;
847 
848 	ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
849 	if (!ppe)
850 		return NULL;
851 
852 	rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
853 
854 	/* need to allocate a separate device, since it PPE DMA access is
855 	 * not coherent.
856 	 */
857 	ppe->base = base;
858 	ppe->eth = eth;
859 	ppe->dev = dev;
860 	ppe->version = eth->soc->offload_version;
861 	ppe->accounting = accounting;
862 
863 	foe = dmam_alloc_coherent(ppe->dev,
864 				  MTK_PPE_ENTRIES * soc->foe_entry_size,
865 				  &ppe->foe_phys, GFP_KERNEL);
866 	if (!foe)
867 		goto err_free_l2_flows;
868 
869 	ppe->foe_table = foe;
870 
871 	foe_flow_size = (MTK_PPE_ENTRIES / soc->hash_offset) *
872 			sizeof(*ppe->foe_flow);
873 	ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);
874 	if (!ppe->foe_flow)
875 		goto err_free_l2_flows;
876 
877 	if (accounting) {
878 		mib = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*mib),
879 					  &ppe->mib_phys, GFP_KERNEL);
880 		if (!mib)
881 			return NULL;
882 
883 		ppe->mib_table = mib;
884 
885 		acct = devm_kzalloc(dev, MTK_PPE_ENTRIES * sizeof(*acct),
886 				    GFP_KERNEL);
887 
888 		if (!acct)
889 			return NULL;
890 
891 		ppe->acct_table = acct;
892 	}
893 
894 	mtk_ppe_debugfs_init(ppe, index);
895 
896 	return ppe;
897 
898 err_free_l2_flows:
899 	rhashtable_destroy(&ppe->l2_flows);
900 	return NULL;
901 }
902 
903 void mtk_ppe_deinit(struct mtk_eth *eth)
904 {
905 	int i;
906 
907 	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) {
908 		if (!eth->ppe[i])
909 			return;
910 		rhashtable_destroy(&eth->ppe[i]->l2_flows);
911 	}
912 }
913 
914 static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
915 {
916 	static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
917 	int i, k;
918 
919 	memset(ppe->foe_table, 0,
920 	       MTK_PPE_ENTRIES * ppe->eth->soc->foe_entry_size);
921 
922 	if (!IS_ENABLED(CONFIG_SOC_MT7621))
923 		return;
924 
925 	/* skip all entries that cross the 1024 byte boundary */
926 	for (i = 0; i < MTK_PPE_ENTRIES; i += 128) {
927 		for (k = 0; k < ARRAY_SIZE(skip); k++) {
928 			struct mtk_foe_entry *hwe;
929 
930 			hwe = mtk_foe_get_entry(ppe, i + skip[k]);
931 			hwe->ib1 |= MTK_FOE_IB1_STATIC;
932 		}
933 	}
934 }
935 
936 void mtk_ppe_start(struct mtk_ppe *ppe)
937 {
938 	u32 val;
939 
940 	if (!ppe)
941 		return;
942 
943 	mtk_ppe_init_foe_table(ppe);
944 	ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
945 
946 	val = MTK_PPE_TB_CFG_ENTRY_80B |
947 	      MTK_PPE_TB_CFG_AGE_NON_L4 |
948 	      MTK_PPE_TB_CFG_AGE_UNBIND |
949 	      MTK_PPE_TB_CFG_AGE_TCP |
950 	      MTK_PPE_TB_CFG_AGE_UDP |
951 	      MTK_PPE_TB_CFG_AGE_TCP_FIN |
952 	      FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
953 			 MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
954 	      FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
955 			 MTK_PPE_KEEPALIVE_DISABLE) |
956 	      FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
957 	      FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
958 			 MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
959 	      FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
960 			 MTK_PPE_ENTRIES_SHIFT);
961 	if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
962 		val |= MTK_PPE_TB_CFG_INFO_SEL;
963 	ppe_w32(ppe, MTK_PPE_TB_CFG, val);
964 
965 	ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
966 		MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
967 
968 	mtk_ppe_cache_enable(ppe, true);
969 
970 	val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
971 	      MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
972 	      MTK_PPE_FLOW_CFG_IP6_6RD |
973 	      MTK_PPE_FLOW_CFG_IP4_NAT |
974 	      MTK_PPE_FLOW_CFG_IP4_NAPT |
975 	      MTK_PPE_FLOW_CFG_IP4_DSLITE |
976 	      MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
977 	if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
978 		val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
979 		       MTK_PPE_MD_TOAP_BYP_CRSN1 |
980 		       MTK_PPE_MD_TOAP_BYP_CRSN2 |
981 		       MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY;
982 	else
983 		val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
984 		       MTK_PPE_FLOW_CFG_IP4_UDP_FRAG;
985 	ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
986 
987 	val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
988 	      FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
989 	ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
990 
991 	val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
992 	      FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
993 	ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
994 
995 	val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
996 	      FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
997 	ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
998 
999 	val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
1000 	ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
1001 
1002 	val = MTK_PPE_BIND_LIMIT1_FULL |
1003 	      FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
1004 	ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
1005 
1006 	val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
1007 	      FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
1008 	ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
1009 
1010 	/* enable PPE */
1011 	val = MTK_PPE_GLO_CFG_EN |
1012 	      MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
1013 	      MTK_PPE_GLO_CFG_IP4_CS_DROP |
1014 	      MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
1015 	ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
1016 
1017 	ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
1018 
1019 	if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) {
1020 		ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
1021 		ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
1022 	}
1023 
1024 	if (ppe->accounting && ppe->mib_phys) {
1025 		ppe_w32(ppe, MTK_PPE_MIB_TB_BASE, ppe->mib_phys);
1026 		ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_EN,
1027 			MTK_PPE_MIB_CFG_EN);
1028 		ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_RD_CLR,
1029 			MTK_PPE_MIB_CFG_RD_CLR);
1030 		ppe_m32(ppe, MTK_PPE_MIB_CACHE_CTL, MTK_PPE_MIB_CACHE_CTL_EN,
1031 			MTK_PPE_MIB_CFG_RD_CLR);
1032 	}
1033 }
1034 
1035 int mtk_ppe_stop(struct mtk_ppe *ppe)
1036 {
1037 	u32 val;
1038 	int i;
1039 
1040 	if (!ppe)
1041 		return 0;
1042 
1043 	for (i = 0; i < MTK_PPE_ENTRIES; i++) {
1044 		struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, i);
1045 
1046 		hwe->ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
1047 				      MTK_FOE_STATE_INVALID);
1048 	}
1049 
1050 	mtk_ppe_cache_enable(ppe, false);
1051 
1052 	/* disable offload engine */
1053 	ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
1054 	ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
1055 
1056 	/* disable aging */
1057 	val = MTK_PPE_TB_CFG_AGE_NON_L4 |
1058 	      MTK_PPE_TB_CFG_AGE_UNBIND |
1059 	      MTK_PPE_TB_CFG_AGE_TCP |
1060 	      MTK_PPE_TB_CFG_AGE_UDP |
1061 	      MTK_PPE_TB_CFG_AGE_TCP_FIN;
1062 	ppe_clear(ppe, MTK_PPE_TB_CFG, val);
1063 
1064 	return mtk_ppe_wait_busy(ppe);
1065 }
1066