1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
3
4 #include <linux/kernel.h>
5 #include <linux/io.h>
6 #include <linux/iopoll.h>
7 #include <linux/etherdevice.h>
8 #include <linux/platform_device.h>
9 #include <linux/if_ether.h>
10 #include <linux/if_vlan.h>
11
12 #include <net/dst_metadata.h>
13 #include <net/dsa.h>
14 #include <net/ipv6.h>
15
16 #include "mtk_eth_soc.h"
17 #include "mtk_ppe.h"
18 #include "mtk_ppe_regs.h"
19
20 static DEFINE_SPINLOCK(ppe_lock);
21
22 static const struct rhashtable_params mtk_flow_l2_ht_params = {
23 .head_offset = offsetof(struct mtk_flow_entry, l2_node),
24 .key_offset = offsetof(struct mtk_flow_entry, data.bridge),
25 .key_len = offsetof(struct mtk_foe_bridge, key_end),
26 .automatic_shrinking = true,
27 };
28
ppe_w32(struct mtk_ppe * ppe,u32 reg,u32 val)29 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
30 {
31 writel(val, ppe->base + reg);
32 }
33
ppe_r32(struct mtk_ppe * ppe,u32 reg)34 static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
35 {
36 return readl(ppe->base + reg);
37 }
38
ppe_m32(struct mtk_ppe * ppe,u32 reg,u32 mask,u32 set)39 static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
40 {
41 u32 val;
42
43 val = ppe_r32(ppe, reg);
44 val &= ~mask;
45 val |= set;
46 ppe_w32(ppe, reg, val);
47
48 return val;
49 }
50
ppe_set(struct mtk_ppe * ppe,u32 reg,u32 val)51 static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
52 {
53 return ppe_m32(ppe, reg, 0, val);
54 }
55
ppe_clear(struct mtk_ppe * ppe,u32 reg,u32 val)56 static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
57 {
58 return ppe_m32(ppe, reg, val, 0);
59 }
60
mtk_eth_timestamp(struct mtk_eth * eth)61 static u32 mtk_eth_timestamp(struct mtk_eth *eth)
62 {
63 return mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth);
64 }
65
mtk_ppe_wait_busy(struct mtk_ppe * ppe)66 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
67 {
68 int ret;
69 u32 val;
70
71 ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val,
72 !(val & MTK_PPE_GLO_CFG_BUSY),
73 20, MTK_PPE_WAIT_TIMEOUT_US);
74
75 if (ret)
76 dev_err(ppe->dev, "PPE table busy");
77
78 return ret;
79 }
80
mtk_ppe_mib_wait_busy(struct mtk_ppe * ppe)81 static int mtk_ppe_mib_wait_busy(struct mtk_ppe *ppe)
82 {
83 int ret;
84 u32 val;
85
86 ret = readl_poll_timeout(ppe->base + MTK_PPE_MIB_SER_CR, val,
87 !(val & MTK_PPE_MIB_SER_CR_ST),
88 20, MTK_PPE_WAIT_TIMEOUT_US);
89
90 if (ret)
91 dev_err(ppe->dev, "MIB table busy");
92
93 return ret;
94 }
95
mtk_mib_entry_read(struct mtk_ppe * ppe,u16 index,u64 * bytes,u64 * packets)96 static int mtk_mib_entry_read(struct mtk_ppe *ppe, u16 index, u64 *bytes, u64 *packets)
97 {
98 u32 val, cnt_r0, cnt_r1, cnt_r2;
99 int ret;
100
101 val = FIELD_PREP(MTK_PPE_MIB_SER_CR_ADDR, index) | MTK_PPE_MIB_SER_CR_ST;
102 ppe_w32(ppe, MTK_PPE_MIB_SER_CR, val);
103
104 ret = mtk_ppe_mib_wait_busy(ppe);
105 if (ret)
106 return ret;
107
108 cnt_r0 = readl(ppe->base + MTK_PPE_MIB_SER_R0);
109 cnt_r1 = readl(ppe->base + MTK_PPE_MIB_SER_R1);
110 cnt_r2 = readl(ppe->base + MTK_PPE_MIB_SER_R2);
111
112 if (mtk_is_netsys_v3_or_greater(ppe->eth)) {
113 /* 64 bit for each counter */
114 u32 cnt_r3 = readl(ppe->base + MTK_PPE_MIB_SER_R3);
115 *bytes = ((u64)cnt_r1 << 32) | cnt_r0;
116 *packets = ((u64)cnt_r3 << 32) | cnt_r2;
117 } else {
118 /* 48 bit byte counter, 40 bit packet counter */
119 u32 byte_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW, cnt_r0);
120 u32 byte_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH, cnt_r1);
121 u32 pkt_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R1_PKT_CNT_LOW, cnt_r1);
122 u32 pkt_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH, cnt_r2);
123 *bytes = ((u64)byte_cnt_high << 32) | byte_cnt_low;
124 *packets = ((u64)pkt_cnt_high << 16) | pkt_cnt_low;
125 }
126
127 return 0;
128 }
129
mtk_ppe_cache_clear(struct mtk_ppe * ppe)130 static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
131 {
132 ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
133 ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
134 }
135
mtk_ppe_cache_enable(struct mtk_ppe * ppe,bool enable)136 static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
137 {
138 mtk_ppe_cache_clear(ppe);
139
140 ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
141 enable * MTK_PPE_CACHE_CTL_EN);
142 }
143
mtk_ppe_hash_entry(struct mtk_eth * eth,struct mtk_foe_entry * e)144 static u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e)
145 {
146 u32 hv1, hv2, hv3;
147 u32 hash;
148
149 switch (mtk_get_ib1_pkt_type(eth, e->ib1)) {
150 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
151 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
152 hv1 = e->ipv4.orig.ports;
153 hv2 = e->ipv4.orig.dest_ip;
154 hv3 = e->ipv4.orig.src_ip;
155 break;
156 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
157 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
158 hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
159 hv1 ^= e->ipv6.ports;
160
161 hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
162 hv2 ^= e->ipv6.dest_ip[0];
163
164 hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
165 hv3 ^= e->ipv6.src_ip[0];
166 break;
167 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
168 case MTK_PPE_PKT_TYPE_IPV6_6RD:
169 default:
170 WARN_ON_ONCE(1);
171 return MTK_PPE_HASH_MASK;
172 }
173
174 hash = (hv1 & hv2) | ((~hv1) & hv3);
175 hash = (hash >> 24) | ((hash & 0xffffff) << 8);
176 hash ^= hv1 ^ hv2 ^ hv3;
177 hash ^= hash >> 16;
178 hash <<= (ffs(eth->soc->hash_offset) - 1);
179 hash &= MTK_PPE_ENTRIES - 1;
180
181 return hash;
182 }
183
184 static inline struct mtk_foe_mac_info *
mtk_foe_entry_l2(struct mtk_eth * eth,struct mtk_foe_entry * entry)185 mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
186 {
187 int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
188
189 if (type == MTK_PPE_PKT_TYPE_BRIDGE)
190 return &entry->bridge.l2;
191
192 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
193 return &entry->ipv6.l2;
194
195 return &entry->ipv4.l2;
196 }
197
198 static inline u32 *
mtk_foe_entry_ib2(struct mtk_eth * eth,struct mtk_foe_entry * entry)199 mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
200 {
201 int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
202
203 if (type == MTK_PPE_PKT_TYPE_BRIDGE)
204 return &entry->bridge.ib2;
205
206 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
207 return &entry->ipv6.ib2;
208
209 return &entry->ipv4.ib2;
210 }
211
mtk_foe_entry_prepare(struct mtk_eth * eth,struct mtk_foe_entry * entry,int type,int l4proto,u8 pse_port,u8 * src_mac,u8 * dest_mac)212 int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
213 int type, int l4proto, u8 pse_port, u8 *src_mac,
214 u8 *dest_mac)
215 {
216 struct mtk_foe_mac_info *l2;
217 u32 ports_pad, val;
218
219 memset(entry, 0, sizeof(*entry));
220
221 if (mtk_is_netsys_v2_or_greater(eth)) {
222 val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
223 FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
224 FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
225 MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2;
226 entry->ib1 = val;
227
228 val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) |
229 FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf);
230 } else {
231 int port_mg = eth->soc->offload_version > 1 ? 0 : 0x3f;
232
233 val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
234 FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
235 FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
236 MTK_FOE_IB1_BIND_CACHE | MTK_FOE_IB1_BIND_TTL;
237 entry->ib1 = val;
238
239 val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) |
240 FIELD_PREP(MTK_FOE_IB2_PORT_MG, port_mg) |
241 FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f);
242 }
243
244 if (is_multicast_ether_addr(dest_mac))
245 val |= mtk_get_ib2_multicast_mask(eth);
246
247 ports_pad = 0xa5a5a500 | (l4proto & 0xff);
248 if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
249 entry->ipv4.orig.ports = ports_pad;
250 if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
251 entry->ipv6.ports = ports_pad;
252
253 if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
254 ether_addr_copy(entry->bridge.src_mac, src_mac);
255 ether_addr_copy(entry->bridge.dest_mac, dest_mac);
256 entry->bridge.ib2 = val;
257 l2 = &entry->bridge.l2;
258 } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
259 entry->ipv6.ib2 = val;
260 l2 = &entry->ipv6.l2;
261 } else {
262 entry->ipv4.ib2 = val;
263 l2 = &entry->ipv4.l2;
264 }
265
266 l2->dest_mac_hi = get_unaligned_be32(dest_mac);
267 l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
268 l2->src_mac_hi = get_unaligned_be32(src_mac);
269 l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
270
271 if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
272 l2->etype = ETH_P_IPV6;
273 else
274 l2->etype = ETH_P_IP;
275
276 return 0;
277 }
278
mtk_foe_entry_set_pse_port(struct mtk_eth * eth,struct mtk_foe_entry * entry,u8 port)279 int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
280 struct mtk_foe_entry *entry, u8 port)
281 {
282 u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
283 u32 val = *ib2;
284
285 if (mtk_is_netsys_v2_or_greater(eth)) {
286 val &= ~MTK_FOE_IB2_DEST_PORT_V2;
287 val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
288 } else {
289 val &= ~MTK_FOE_IB2_DEST_PORT;
290 val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
291 }
292 *ib2 = val;
293
294 return 0;
295 }
296
mtk_foe_entry_set_ipv4_tuple(struct mtk_eth * eth,struct mtk_foe_entry * entry,bool egress,__be32 src_addr,__be16 src_port,__be32 dest_addr,__be16 dest_port)297 int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
298 struct mtk_foe_entry *entry, bool egress,
299 __be32 src_addr, __be16 src_port,
300 __be32 dest_addr, __be16 dest_port)
301 {
302 int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
303 struct mtk_ipv4_tuple *t;
304
305 switch (type) {
306 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
307 if (egress) {
308 t = &entry->ipv4.new;
309 break;
310 }
311 fallthrough;
312 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
313 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
314 t = &entry->ipv4.orig;
315 break;
316 case MTK_PPE_PKT_TYPE_IPV6_6RD:
317 entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
318 entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
319 return 0;
320 default:
321 WARN_ON_ONCE(1);
322 return -EINVAL;
323 }
324
325 t->src_ip = be32_to_cpu(src_addr);
326 t->dest_ip = be32_to_cpu(dest_addr);
327
328 if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
329 return 0;
330
331 t->src_port = be16_to_cpu(src_port);
332 t->dest_port = be16_to_cpu(dest_port);
333
334 return 0;
335 }
336
mtk_foe_entry_set_ipv6_tuple(struct mtk_eth * eth,struct mtk_foe_entry * entry,__be32 * src_addr,__be16 src_port,__be32 * dest_addr,__be16 dest_port)337 int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
338 struct mtk_foe_entry *entry,
339 __be32 *src_addr, __be16 src_port,
340 __be32 *dest_addr, __be16 dest_port)
341 {
342 int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
343 u32 *src, *dest;
344
345 switch (type) {
346 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
347 src = entry->dslite.tunnel_src_ip;
348 dest = entry->dslite.tunnel_dest_ip;
349 break;
350 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
351 case MTK_PPE_PKT_TYPE_IPV6_6RD:
352 entry->ipv6.src_port = be16_to_cpu(src_port);
353 entry->ipv6.dest_port = be16_to_cpu(dest_port);
354 fallthrough;
355 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
356 src = entry->ipv6.src_ip;
357 dest = entry->ipv6.dest_ip;
358 break;
359 default:
360 WARN_ON_ONCE(1);
361 return -EINVAL;
362 }
363
364 ipv6_addr_be32_to_cpu(src, src_addr);
365 ipv6_addr_be32_to_cpu(dest, dest_addr);
366
367 return 0;
368 }
369
mtk_foe_entry_set_dsa(struct mtk_eth * eth,struct mtk_foe_entry * entry,int port)370 int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
371 int port)
372 {
373 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
374
375 l2->etype = BIT(port);
376
377 if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)))
378 entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1);
379 else
380 l2->etype |= BIT(8);
381
382 entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth);
383
384 return 0;
385 }
386
mtk_foe_entry_set_vlan(struct mtk_eth * eth,struct mtk_foe_entry * entry,int vid)387 int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
388 int vid)
389 {
390 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
391
392 switch (mtk_get_ib1_vlan_layer(eth, entry->ib1)) {
393 case 0:
394 entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) |
395 mtk_prep_ib1_vlan_layer(eth, 1);
396 l2->vlan1 = vid;
397 return 0;
398 case 1:
399 if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) {
400 l2->vlan1 = vid;
401 l2->etype |= BIT(8);
402 } else {
403 l2->vlan2 = vid;
404 entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1);
405 }
406 return 0;
407 default:
408 return -ENOSPC;
409 }
410 }
411
mtk_foe_entry_set_pppoe(struct mtk_eth * eth,struct mtk_foe_entry * entry,int sid)412 int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
413 int sid)
414 {
415 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
416
417 if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) ||
418 (entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth)))
419 l2->etype = ETH_P_PPP_SES;
420
421 entry->ib1 |= mtk_get_ib1_ppoe_mask(eth);
422 l2->pppoe_id = sid;
423
424 return 0;
425 }
426
mtk_foe_entry_set_wdma(struct mtk_eth * eth,struct mtk_foe_entry * entry,int wdma_idx,int txq,int bss,int wcid,bool amsdu_en)427 int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
428 int wdma_idx, int txq, int bss, int wcid,
429 bool amsdu_en)
430 {
431 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
432 u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
433
434 switch (eth->soc->version) {
435 case 3:
436 *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
437 *ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
438 MTK_FOE_IB2_WDMA_WINFO_V2;
439 l2->w3info = FIELD_PREP(MTK_FOE_WINFO_WCID_V3, wcid) |
440 FIELD_PREP(MTK_FOE_WINFO_BSS_V3, bss);
441 l2->amsdu = FIELD_PREP(MTK_FOE_WINFO_AMSDU_EN, amsdu_en);
442 break;
443 case 2:
444 *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
445 *ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
446 MTK_FOE_IB2_WDMA_WINFO_V2;
447 l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
448 FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
449 break;
450 default:
451 *ib2 &= ~MTK_FOE_IB2_PORT_MG;
452 *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
453 if (wdma_idx)
454 *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
455 l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
456 FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
457 FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
458 break;
459 }
460
461 return 0;
462 }
463
mtk_foe_entry_set_queue(struct mtk_eth * eth,struct mtk_foe_entry * entry,unsigned int queue)464 int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
465 unsigned int queue)
466 {
467 u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
468
469 if (mtk_is_netsys_v2_or_greater(eth)) {
470 *ib2 &= ~MTK_FOE_IB2_QID_V2;
471 *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID_V2, queue);
472 *ib2 |= MTK_FOE_IB2_PSE_QOS_V2;
473 } else {
474 *ib2 &= ~MTK_FOE_IB2_QID;
475 *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID, queue);
476 *ib2 |= MTK_FOE_IB2_PSE_QOS;
477 }
478
479 return 0;
480 }
481
482 static bool
mtk_flow_entry_match(struct mtk_eth * eth,struct mtk_flow_entry * entry,struct mtk_foe_entry * data)483 mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
484 struct mtk_foe_entry *data)
485 {
486 int type, len;
487
488 if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
489 return false;
490
491 type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
492 if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
493 len = offsetof(struct mtk_foe_entry, ipv6._rsv);
494 else
495 len = offsetof(struct mtk_foe_entry, ipv4.ib2);
496
497 return !memcmp(&entry->data.data, &data->data, len - 4);
498 }
499
500 static void
__mtk_foe_entry_clear(struct mtk_ppe * ppe,struct mtk_flow_entry * entry)501 __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
502 {
503 struct hlist_head *head;
504 struct hlist_node *tmp;
505
506 if (entry->type == MTK_FLOW_TYPE_L2) {
507 rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
508 mtk_flow_l2_ht_params);
509
510 head = &entry->l2_flows;
511 hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
512 __mtk_foe_entry_clear(ppe, entry);
513 return;
514 }
515
516 hlist_del_init(&entry->list);
517 if (entry->hash != 0xffff) {
518 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
519
520 hwe->ib1 &= ~MTK_FOE_IB1_STATE;
521 hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
522 dma_wmb();
523 mtk_ppe_cache_clear(ppe);
524
525 if (ppe->accounting) {
526 struct mtk_foe_accounting *acct;
527
528 acct = ppe->acct_table + entry->hash * sizeof(*acct);
529 acct->packets = 0;
530 acct->bytes = 0;
531 }
532 }
533 entry->hash = 0xffff;
534
535 if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
536 return;
537
538 hlist_del_init(&entry->l2_data.list);
539 kfree(entry);
540 }
541
__mtk_foe_entry_idle_time(struct mtk_ppe * ppe,u32 ib1)542 static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
543 {
544 u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
545 u16 now = mtk_eth_timestamp(ppe->eth);
546 u16 timestamp = ib1 & ib1_ts_mask;
547
548 if (timestamp > now)
549 return ib1_ts_mask + 1 - timestamp + now;
550 else
551 return now - timestamp;
552 }
553
554 static void
mtk_flow_entry_update_l2(struct mtk_ppe * ppe,struct mtk_flow_entry * entry)555 mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
556 {
557 u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
558 struct mtk_flow_entry *cur;
559 struct mtk_foe_entry *hwe;
560 struct hlist_node *tmp;
561 int idle;
562
563 idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
564 hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
565 int cur_idle;
566 u32 ib1;
567
568 hwe = mtk_foe_get_entry(ppe, cur->hash);
569 ib1 = READ_ONCE(hwe->ib1);
570
571 if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
572 cur->hash = 0xffff;
573 __mtk_foe_entry_clear(ppe, cur);
574 continue;
575 }
576
577 cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
578 if (cur_idle >= idle)
579 continue;
580
581 idle = cur_idle;
582 entry->data.ib1 &= ~ib1_ts_mask;
583 entry->data.ib1 |= ib1 & ib1_ts_mask;
584 }
585 }
586
587 static void
mtk_flow_entry_update(struct mtk_ppe * ppe,struct mtk_flow_entry * entry)588 mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
589 {
590 struct mtk_foe_entry foe = {};
591 struct mtk_foe_entry *hwe;
592
593 spin_lock_bh(&ppe_lock);
594
595 if (entry->type == MTK_FLOW_TYPE_L2) {
596 mtk_flow_entry_update_l2(ppe, entry);
597 goto out;
598 }
599
600 if (entry->hash == 0xffff)
601 goto out;
602
603 hwe = mtk_foe_get_entry(ppe, entry->hash);
604 memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
605 if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
606 entry->hash = 0xffff;
607 goto out;
608 }
609
610 entry->data.ib1 = foe.ib1;
611
612 out:
613 spin_unlock_bh(&ppe_lock);
614 }
615
616 static void
__mtk_foe_entry_commit(struct mtk_ppe * ppe,struct mtk_foe_entry * entry,u16 hash)617 __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
618 u16 hash)
619 {
620 struct mtk_eth *eth = ppe->eth;
621 u16 timestamp = mtk_eth_timestamp(eth);
622 struct mtk_foe_entry *hwe;
623 u32 val;
624
625 if (mtk_is_netsys_v2_or_greater(eth)) {
626 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
627 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
628 timestamp);
629 } else {
630 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
631 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP,
632 timestamp);
633 }
634
635 hwe = mtk_foe_get_entry(ppe, hash);
636 memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size - sizeof(hwe->ib1));
637 wmb();
638 hwe->ib1 = entry->ib1;
639
640 if (ppe->accounting) {
641 if (mtk_is_netsys_v2_or_greater(eth))
642 val = MTK_FOE_IB2_MIB_CNT_V2;
643 else
644 val = MTK_FOE_IB2_MIB_CNT;
645 *mtk_foe_entry_ib2(eth, hwe) |= val;
646 }
647
648 dma_wmb();
649
650 mtk_ppe_cache_clear(ppe);
651 }
652
mtk_foe_entry_clear(struct mtk_ppe * ppe,struct mtk_flow_entry * entry)653 void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
654 {
655 spin_lock_bh(&ppe_lock);
656 __mtk_foe_entry_clear(ppe, entry);
657 spin_unlock_bh(&ppe_lock);
658 }
659
660 static int
mtk_foe_entry_commit_l2(struct mtk_ppe * ppe,struct mtk_flow_entry * entry)661 mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
662 {
663 struct mtk_flow_entry *prev;
664
665 entry->type = MTK_FLOW_TYPE_L2;
666
667 prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &entry->l2_node,
668 mtk_flow_l2_ht_params);
669 if (likely(!prev))
670 return 0;
671
672 if (IS_ERR(prev))
673 return PTR_ERR(prev);
674
675 return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
676 &entry->l2_node, mtk_flow_l2_ht_params);
677 }
678
mtk_foe_entry_commit(struct mtk_ppe * ppe,struct mtk_flow_entry * entry)679 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
680 {
681 const struct mtk_soc_data *soc = ppe->eth->soc;
682 int type = mtk_get_ib1_pkt_type(ppe->eth, entry->data.ib1);
683 u32 hash;
684
685 if (type == MTK_PPE_PKT_TYPE_BRIDGE)
686 return mtk_foe_entry_commit_l2(ppe, entry);
687
688 hash = mtk_ppe_hash_entry(ppe->eth, &entry->data);
689 entry->hash = 0xffff;
690 spin_lock_bh(&ppe_lock);
691 hlist_add_head(&entry->list, &ppe->foe_flow[hash / soc->hash_offset]);
692 spin_unlock_bh(&ppe_lock);
693
694 return 0;
695 }
696
697 static void
mtk_foe_entry_commit_subflow(struct mtk_ppe * ppe,struct mtk_flow_entry * entry,u16 hash)698 mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
699 u16 hash)
700 {
701 const struct mtk_soc_data *soc = ppe->eth->soc;
702 struct mtk_flow_entry *flow_info;
703 struct mtk_foe_entry foe = {}, *hwe;
704 struct mtk_foe_mac_info *l2;
705 u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
706 int type;
707
708 flow_info = kzalloc(sizeof(*flow_info), GFP_ATOMIC);
709 if (!flow_info)
710 return;
711
712 flow_info->l2_data.base_flow = entry;
713 flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
714 flow_info->hash = hash;
715 hlist_add_head(&flow_info->list,
716 &ppe->foe_flow[hash / soc->hash_offset]);
717 hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
718
719 hwe = mtk_foe_get_entry(ppe, hash);
720 memcpy(&foe, hwe, soc->foe_entry_size);
721 foe.ib1 &= ib1_mask;
722 foe.ib1 |= entry->data.ib1 & ~ib1_mask;
723
724 l2 = mtk_foe_entry_l2(ppe->eth, &foe);
725 memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
726
727 type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
728 if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
729 memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
730 else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
731 l2->etype = ETH_P_IPV6;
732
733 *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
734
735 __mtk_foe_entry_commit(ppe, &foe, hash);
736 }
737
__mtk_ppe_check_skb(struct mtk_ppe * ppe,struct sk_buff * skb,u16 hash)738 void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
739 {
740 const struct mtk_soc_data *soc = ppe->eth->soc;
741 struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset];
742 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
743 struct mtk_flow_entry *entry;
744 struct mtk_foe_bridge key = {};
745 struct hlist_node *n;
746 struct ethhdr *eh;
747 bool found = false;
748 u8 *tag;
749
750 spin_lock_bh(&ppe_lock);
751
752 if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
753 goto out;
754
755 hlist_for_each_entry_safe(entry, n, head, list) {
756 if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
757 if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
758 MTK_FOE_STATE_BIND))
759 continue;
760
761 entry->hash = 0xffff;
762 __mtk_foe_entry_clear(ppe, entry);
763 continue;
764 }
765
766 if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
767 if (entry->hash != 0xffff)
768 entry->hash = 0xffff;
769 continue;
770 }
771
772 entry->hash = hash;
773 __mtk_foe_entry_commit(ppe, &entry->data, hash);
774 found = true;
775 }
776
777 if (found)
778 goto out;
779
780 eh = eth_hdr(skb);
781 ether_addr_copy(key.dest_mac, eh->h_dest);
782 ether_addr_copy(key.src_mac, eh->h_source);
783 tag = skb->data - 2;
784 key.vlan = 0;
785 switch (skb->protocol) {
786 #if IS_ENABLED(CONFIG_NET_DSA)
787 case htons(ETH_P_XDSA):
788 if (!netdev_uses_dsa(skb->dev) ||
789 skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
790 goto out;
791
792 if (!skb_metadata_dst(skb))
793 tag += 4;
794
795 if (get_unaligned_be16(tag) != ETH_P_8021Q)
796 break;
797
798 fallthrough;
799 #endif
800 case htons(ETH_P_8021Q):
801 key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
802 break;
803 default:
804 break;
805 }
806
807 entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
808 if (!entry)
809 goto out;
810
811 mtk_foe_entry_commit_subflow(ppe, entry, hash);
812
813 out:
814 spin_unlock_bh(&ppe_lock);
815 }
816
mtk_foe_entry_idle_time(struct mtk_ppe * ppe,struct mtk_flow_entry * entry)817 int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
818 {
819 mtk_flow_entry_update(ppe, entry);
820
821 return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
822 }
823
mtk_ppe_prepare_reset(struct mtk_ppe * ppe)824 int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
825 {
826 if (!ppe)
827 return -EINVAL;
828
829 /* disable KA */
830 ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
831 ppe_clear(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
832 ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0);
833 usleep_range(10000, 11000);
834
835 /* set KA timer to maximum */
836 ppe_set(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
837 ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0xffffffff);
838
839 /* set KA tick select */
840 ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_TICK_SEL);
841 ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
842 usleep_range(10000, 11000);
843
844 /* disable scan mode */
845 ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_SCAN_MODE);
846 usleep_range(10000, 11000);
847
848 return mtk_ppe_wait_busy(ppe);
849 }
850
mtk_foe_entry_get_mib(struct mtk_ppe * ppe,u32 index,struct mtk_foe_accounting * diff)851 struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index,
852 struct mtk_foe_accounting *diff)
853 {
854 struct mtk_foe_accounting *acct;
855 int size = sizeof(struct mtk_foe_accounting);
856 u64 bytes, packets;
857
858 if (!ppe->accounting)
859 return NULL;
860
861 if (mtk_mib_entry_read(ppe, index, &bytes, &packets))
862 return NULL;
863
864 acct = ppe->acct_table + index * size;
865
866 acct->bytes += bytes;
867 acct->packets += packets;
868
869 if (diff) {
870 diff->bytes = bytes;
871 diff->packets = packets;
872 }
873
874 return acct;
875 }
876
mtk_ppe_init(struct mtk_eth * eth,void __iomem * base,int index)877 struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index)
878 {
879 bool accounting = eth->soc->has_accounting;
880 const struct mtk_soc_data *soc = eth->soc;
881 struct mtk_foe_accounting *acct;
882 struct device *dev = eth->dev;
883 struct mtk_mib_entry *mib;
884 struct mtk_ppe *ppe;
885 u32 foe_flow_size;
886 void *foe;
887
888 ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
889 if (!ppe)
890 return NULL;
891
892 rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
893
894 /* need to allocate a separate device, since it PPE DMA access is
895 * not coherent.
896 */
897 ppe->base = base;
898 ppe->eth = eth;
899 ppe->dev = dev;
900 ppe->version = eth->soc->offload_version;
901 ppe->accounting = accounting;
902
903 foe = dmam_alloc_coherent(ppe->dev,
904 MTK_PPE_ENTRIES * soc->foe_entry_size,
905 &ppe->foe_phys, GFP_KERNEL);
906 if (!foe)
907 goto err_free_l2_flows;
908
909 ppe->foe_table = foe;
910
911 foe_flow_size = (MTK_PPE_ENTRIES / soc->hash_offset) *
912 sizeof(*ppe->foe_flow);
913 ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);
914 if (!ppe->foe_flow)
915 goto err_free_l2_flows;
916
917 if (accounting) {
918 mib = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*mib),
919 &ppe->mib_phys, GFP_KERNEL);
920 if (!mib)
921 return NULL;
922
923 ppe->mib_table = mib;
924
925 acct = devm_kzalloc(dev, MTK_PPE_ENTRIES * sizeof(*acct),
926 GFP_KERNEL);
927
928 if (!acct)
929 return NULL;
930
931 ppe->acct_table = acct;
932 }
933
934 mtk_ppe_debugfs_init(ppe, index);
935
936 return ppe;
937
938 err_free_l2_flows:
939 rhashtable_destroy(&ppe->l2_flows);
940 return NULL;
941 }
942
mtk_ppe_deinit(struct mtk_eth * eth)943 void mtk_ppe_deinit(struct mtk_eth *eth)
944 {
945 int i;
946
947 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) {
948 if (!eth->ppe[i])
949 return;
950 rhashtable_destroy(ð->ppe[i]->l2_flows);
951 }
952 }
953
mtk_ppe_init_foe_table(struct mtk_ppe * ppe)954 static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
955 {
956 static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
957 int i, k;
958
959 memset(ppe->foe_table, 0,
960 MTK_PPE_ENTRIES * ppe->eth->soc->foe_entry_size);
961
962 if (!IS_ENABLED(CONFIG_SOC_MT7621))
963 return;
964
965 /* skip all entries that cross the 1024 byte boundary */
966 for (i = 0; i < MTK_PPE_ENTRIES; i += 128) {
967 for (k = 0; k < ARRAY_SIZE(skip); k++) {
968 struct mtk_foe_entry *hwe;
969
970 hwe = mtk_foe_get_entry(ppe, i + skip[k]);
971 hwe->ib1 |= MTK_FOE_IB1_STATIC;
972 }
973 }
974 }
975
mtk_ppe_start(struct mtk_ppe * ppe)976 void mtk_ppe_start(struct mtk_ppe *ppe)
977 {
978 u32 val;
979
980 if (!ppe)
981 return;
982
983 mtk_ppe_init_foe_table(ppe);
984 ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
985
986 val = MTK_PPE_TB_CFG_AGE_NON_L4 |
987 MTK_PPE_TB_CFG_AGE_UNBIND |
988 MTK_PPE_TB_CFG_AGE_TCP |
989 MTK_PPE_TB_CFG_AGE_UDP |
990 MTK_PPE_TB_CFG_AGE_TCP_FIN |
991 FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
992 MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
993 FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
994 MTK_PPE_KEEPALIVE_DISABLE) |
995 FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
996 FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
997 MTK_PPE_SCAN_MODE_CHECK_AGE) |
998 FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
999 MTK_PPE_ENTRIES_SHIFT);
1000 if (mtk_is_netsys_v2_or_greater(ppe->eth))
1001 val |= MTK_PPE_TB_CFG_INFO_SEL;
1002 if (!mtk_is_netsys_v3_or_greater(ppe->eth))
1003 val |= MTK_PPE_TB_CFG_ENTRY_80B;
1004 ppe_w32(ppe, MTK_PPE_TB_CFG, val);
1005
1006 ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
1007 MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
1008
1009 mtk_ppe_cache_enable(ppe, true);
1010
1011 val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
1012 MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
1013 MTK_PPE_FLOW_CFG_IP6_6RD |
1014 MTK_PPE_FLOW_CFG_IP4_NAT |
1015 MTK_PPE_FLOW_CFG_IP4_NAPT |
1016 MTK_PPE_FLOW_CFG_IP4_DSLITE |
1017 MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
1018 if (mtk_is_netsys_v2_or_greater(ppe->eth))
1019 val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
1020 MTK_PPE_MD_TOAP_BYP_CRSN1 |
1021 MTK_PPE_MD_TOAP_BYP_CRSN2 |
1022 MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY;
1023 else
1024 val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
1025 MTK_PPE_FLOW_CFG_IP4_UDP_FRAG;
1026 ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
1027
1028 val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
1029 FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
1030 ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
1031
1032 val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
1033 FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
1034 ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
1035
1036 val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
1037 FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
1038 ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
1039
1040 val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
1041 ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
1042
1043 val = MTK_PPE_BIND_LIMIT1_FULL |
1044 FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
1045 ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
1046
1047 val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
1048 FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
1049 ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
1050
1051 /* enable PPE */
1052 val = MTK_PPE_GLO_CFG_EN |
1053 MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
1054 MTK_PPE_GLO_CFG_IP4_CS_DROP |
1055 MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
1056 ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
1057
1058 ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
1059
1060 if (mtk_is_netsys_v2_or_greater(ppe->eth)) {
1061 ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
1062 ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
1063 }
1064
1065 if (ppe->accounting && ppe->mib_phys) {
1066 ppe_w32(ppe, MTK_PPE_MIB_TB_BASE, ppe->mib_phys);
1067 ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_EN,
1068 MTK_PPE_MIB_CFG_EN);
1069 ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_RD_CLR,
1070 MTK_PPE_MIB_CFG_RD_CLR);
1071 ppe_m32(ppe, MTK_PPE_MIB_CACHE_CTL, MTK_PPE_MIB_CACHE_CTL_EN,
1072 MTK_PPE_MIB_CFG_RD_CLR);
1073 }
1074 }
1075
mtk_ppe_stop(struct mtk_ppe * ppe)1076 int mtk_ppe_stop(struct mtk_ppe *ppe)
1077 {
1078 u32 val;
1079 int i;
1080
1081 if (!ppe)
1082 return 0;
1083
1084 for (i = 0; i < MTK_PPE_ENTRIES; i++) {
1085 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, i);
1086
1087 hwe->ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
1088 MTK_FOE_STATE_INVALID);
1089 }
1090
1091 mtk_ppe_cache_enable(ppe, false);
1092
1093 /* disable aging */
1094 val = MTK_PPE_TB_CFG_AGE_NON_L4 |
1095 MTK_PPE_TB_CFG_AGE_UNBIND |
1096 MTK_PPE_TB_CFG_AGE_TCP |
1097 MTK_PPE_TB_CFG_AGE_UDP |
1098 MTK_PPE_TB_CFG_AGE_TCP_FIN |
1099 MTK_PPE_TB_CFG_SCAN_MODE;
1100 ppe_clear(ppe, MTK_PPE_TB_CFG, val);
1101
1102 if (mtk_ppe_wait_busy(ppe))
1103 return -ETIMEDOUT;
1104
1105 /* disable offload engine */
1106 ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
1107 ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
1108
1109 return 0;
1110 }
1111