1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2025 AIROHA Inc
4 * Author: Lorenzo Bianconi <lorenzo@kernel.org>
5 */
6
7 #include <linux/ip.h>
8 #include <linux/ipv6.h>
9 #include <linux/of_platform.h>
10 #include <linux/platform_device.h>
11 #include <linux/rhashtable.h>
12 #include <net/ipv6.h>
13 #include <net/pkt_cls.h>
14
15 #include "airoha_regs.h"
16 #include "airoha_eth.h"
17
18 static DEFINE_MUTEX(flow_offload_mutex);
19 static DEFINE_SPINLOCK(ppe_lock);
20
21 static const struct rhashtable_params airoha_flow_table_params = {
22 .head_offset = offsetof(struct airoha_flow_table_entry, node),
23 .key_offset = offsetof(struct airoha_flow_table_entry, cookie),
24 .key_len = sizeof(unsigned long),
25 .automatic_shrinking = true,
26 };
27
28 static const struct rhashtable_params airoha_l2_flow_table_params = {
29 .head_offset = offsetof(struct airoha_flow_table_entry, l2_node),
30 .key_offset = offsetof(struct airoha_flow_table_entry, data.bridge),
31 .key_len = 2 * ETH_ALEN,
32 .automatic_shrinking = true,
33 };
34
airoha_ppe2_is_enabled(struct airoha_eth * eth)35 static bool airoha_ppe2_is_enabled(struct airoha_eth *eth)
36 {
37 return airoha_fe_rr(eth, REG_PPE_GLO_CFG(1)) & PPE_GLO_CFG_EN_MASK;
38 }
39
airoha_ppe_get_timestamp(struct airoha_ppe * ppe)40 static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe)
41 {
42 u16 timestamp = airoha_fe_rr(ppe->eth, REG_FE_FOE_TS);
43
44 return FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, timestamp);
45 }
46
airoha_ppe_hw_init(struct airoha_ppe * ppe)47 static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
48 {
49 u32 sram_tb_size, sram_num_entries, dram_num_entries;
50 struct airoha_eth *eth = ppe->eth;
51 int i;
52
53 sram_tb_size = PPE_SRAM_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
54 dram_num_entries = PPE_RAM_NUM_ENTRIES_SHIFT(PPE_DRAM_NUM_ENTRIES);
55
56 for (i = 0; i < PPE_NUM; i++) {
57 int p;
58
59 airoha_fe_wr(eth, REG_PPE_TB_BASE(i),
60 ppe->foe_dma + sram_tb_size);
61
62 airoha_fe_rmw(eth, REG_PPE_BND_AGE0(i),
63 PPE_BIND_AGE0_DELTA_NON_L4 |
64 PPE_BIND_AGE0_DELTA_UDP,
65 FIELD_PREP(PPE_BIND_AGE0_DELTA_NON_L4, 1) |
66 FIELD_PREP(PPE_BIND_AGE0_DELTA_UDP, 12));
67 airoha_fe_rmw(eth, REG_PPE_BND_AGE1(i),
68 PPE_BIND_AGE1_DELTA_TCP_FIN |
69 PPE_BIND_AGE1_DELTA_TCP,
70 FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
71 FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP, 7));
72
73 airoha_fe_rmw(eth, REG_PPE_TB_HASH_CFG(i),
74 PPE_SRAM_TABLE_EN_MASK |
75 PPE_SRAM_HASH1_EN_MASK |
76 PPE_DRAM_TABLE_EN_MASK |
77 PPE_SRAM_HASH0_MODE_MASK |
78 PPE_SRAM_HASH1_MODE_MASK |
79 PPE_DRAM_HASH0_MODE_MASK |
80 PPE_DRAM_HASH1_MODE_MASK,
81 FIELD_PREP(PPE_SRAM_TABLE_EN_MASK, 1) |
82 FIELD_PREP(PPE_SRAM_HASH1_EN_MASK, 1) |
83 FIELD_PREP(PPE_SRAM_HASH1_MODE_MASK, 1) |
84 FIELD_PREP(PPE_DRAM_HASH1_MODE_MASK, 3));
85
86 airoha_fe_rmw(eth, REG_PPE_TB_CFG(i),
87 PPE_TB_CFG_SEARCH_MISS_MASK |
88 PPE_TB_CFG_KEEPALIVE_MASK |
89 PPE_TB_ENTRY_SIZE_MASK,
90 FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) |
91 FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0));
92
93 airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED);
94
95 for (p = 0; p < ARRAY_SIZE(eth->ports); p++)
96 airoha_fe_rmw(eth, REG_PPE_MTU(i, p),
97 FP0_EGRESS_MTU_MASK |
98 FP1_EGRESS_MTU_MASK,
99 FIELD_PREP(FP0_EGRESS_MTU_MASK,
100 AIROHA_MAX_MTU) |
101 FIELD_PREP(FP1_EGRESS_MTU_MASK,
102 AIROHA_MAX_MTU));
103 }
104
105 if (airoha_ppe2_is_enabled(eth)) {
106 sram_num_entries =
107 PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_DATA_ENTRIES);
108 airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
109 PPE_SRAM_TB_NUM_ENTRY_MASK |
110 PPE_DRAM_TB_NUM_ENTRY_MASK,
111 FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
112 sram_num_entries) |
113 FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
114 dram_num_entries));
115 airoha_fe_rmw(eth, REG_PPE_TB_CFG(1),
116 PPE_SRAM_TB_NUM_ENTRY_MASK |
117 PPE_DRAM_TB_NUM_ENTRY_MASK,
118 FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
119 sram_num_entries) |
120 FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
121 dram_num_entries));
122 } else {
123 sram_num_entries =
124 PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_DATA_ENTRIES);
125 airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
126 PPE_SRAM_TB_NUM_ENTRY_MASK |
127 PPE_DRAM_TB_NUM_ENTRY_MASK,
128 FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
129 sram_num_entries) |
130 FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
131 dram_num_entries));
132 }
133 }
134
airoha_ppe_flow_mangle_eth(const struct flow_action_entry * act,void * eth)135 static void airoha_ppe_flow_mangle_eth(const struct flow_action_entry *act, void *eth)
136 {
137 void *dest = eth + act->mangle.offset;
138 const void *src = &act->mangle.val;
139
140 if (act->mangle.offset > 8)
141 return;
142
143 if (act->mangle.mask == 0xffff) {
144 src += 2;
145 dest += 2;
146 }
147
148 memcpy(dest, src, act->mangle.mask ? 2 : 4);
149 }
150
airoha_ppe_flow_mangle_ports(const struct flow_action_entry * act,struct airoha_flow_data * data)151 static int airoha_ppe_flow_mangle_ports(const struct flow_action_entry *act,
152 struct airoha_flow_data *data)
153 {
154 u32 val = be32_to_cpu((__force __be32)act->mangle.val);
155
156 switch (act->mangle.offset) {
157 case 0:
158 if ((__force __be32)act->mangle.mask == ~cpu_to_be32(0xffff))
159 data->dst_port = cpu_to_be16(val);
160 else
161 data->src_port = cpu_to_be16(val >> 16);
162 break;
163 case 2:
164 data->dst_port = cpu_to_be16(val);
165 break;
166 default:
167 return -EINVAL;
168 }
169
170 return 0;
171 }
172
airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry * act,struct airoha_flow_data * data)173 static int airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry *act,
174 struct airoha_flow_data *data)
175 {
176 __be32 *dest;
177
178 switch (act->mangle.offset) {
179 case offsetof(struct iphdr, saddr):
180 dest = &data->v4.src_addr;
181 break;
182 case offsetof(struct iphdr, daddr):
183 dest = &data->v4.dst_addr;
184 break;
185 default:
186 return -EINVAL;
187 }
188
189 memcpy(dest, &act->mangle.val, sizeof(u32));
190
191 return 0;
192 }
193
airoha_ppe_get_wdma_info(struct net_device * dev,const u8 * addr,struct airoha_wdma_info * info)194 static int airoha_ppe_get_wdma_info(struct net_device *dev, const u8 *addr,
195 struct airoha_wdma_info *info)
196 {
197 struct net_device_path_stack stack;
198 struct net_device_path *path;
199 int err;
200
201 if (!dev)
202 return -ENODEV;
203
204 err = dev_fill_forward_path(dev, addr, &stack);
205 if (err)
206 return err;
207
208 path = &stack.path[stack.num_paths - 1];
209 if (path->type != DEV_PATH_MTK_WDMA)
210 return -1;
211
212 info->idx = path->mtk_wdma.wdma_idx;
213 info->bss = path->mtk_wdma.bss;
214 info->wcid = path->mtk_wdma.wcid;
215
216 return 0;
217 }
218
airoha_get_dsa_port(struct net_device ** dev)219 static int airoha_get_dsa_port(struct net_device **dev)
220 {
221 #if IS_ENABLED(CONFIG_NET_DSA)
222 struct dsa_port *dp = dsa_port_from_netdev(*dev);
223
224 if (IS_ERR(dp))
225 return -ENODEV;
226
227 *dev = dsa_port_to_conduit(dp);
228 return dp->index;
229 #else
230 return -ENODEV;
231 #endif
232 }
233
airoha_ppe_foe_set_bridge_addrs(struct airoha_foe_bridge * br,struct ethhdr * eh)234 static void airoha_ppe_foe_set_bridge_addrs(struct airoha_foe_bridge *br,
235 struct ethhdr *eh)
236 {
237 br->dest_mac_hi = get_unaligned_be32(eh->h_dest);
238 br->dest_mac_lo = get_unaligned_be16(eh->h_dest + 4);
239 br->src_mac_hi = get_unaligned_be16(eh->h_source);
240 br->src_mac_lo = get_unaligned_be32(eh->h_source + 2);
241 }
242
airoha_ppe_foe_entry_prepare(struct airoha_eth * eth,struct airoha_foe_entry * hwe,struct net_device * dev,int type,struct airoha_flow_data * data,int l4proto)243 static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
244 struct airoha_foe_entry *hwe,
245 struct net_device *dev, int type,
246 struct airoha_flow_data *data,
247 int l4proto)
248 {
249 u32 qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f), ports_pad, val;
250 int wlan_etype = -EINVAL, dsa_port = airoha_get_dsa_port(&dev);
251 struct airoha_foe_mac_info_common *l2;
252 u8 smac_id = 0xf;
253
254 memset(hwe, 0, sizeof(*hwe));
255
256 val = FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE, AIROHA_FOE_STATE_BIND) |
257 FIELD_PREP(AIROHA_FOE_IB1_BIND_PACKET_TYPE, type) |
258 FIELD_PREP(AIROHA_FOE_IB1_BIND_UDP, l4proto == IPPROTO_UDP) |
259 FIELD_PREP(AIROHA_FOE_IB1_BIND_VLAN_LAYER, data->vlan.num) |
260 FIELD_PREP(AIROHA_FOE_IB1_BIND_VPM, data->vlan.num) |
261 FIELD_PREP(AIROHA_FOE_IB1_BIND_PPPOE, data->pppoe.num) |
262 AIROHA_FOE_IB1_BIND_TTL;
263 hwe->ib1 = val;
264
265 val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f);
266 if (dev) {
267 struct airoha_wdma_info info = {};
268
269 if (!airoha_ppe_get_wdma_info(dev, data->eth.h_dest, &info)) {
270 val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, info.idx) |
271 FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT,
272 FE_PSE_PORT_CDM4);
273 qdata |= FIELD_PREP(AIROHA_FOE_ACTDP, info.bss);
274 wlan_etype = FIELD_PREP(AIROHA_FOE_MAC_WDMA_BAND,
275 info.idx) |
276 FIELD_PREP(AIROHA_FOE_MAC_WDMA_WCID,
277 info.wcid);
278 } else {
279 struct airoha_gdm_port *port = netdev_priv(dev);
280 u8 pse_port;
281
282 if (!airoha_is_valid_gdm_port(eth, port))
283 return -EINVAL;
284
285 if (dsa_port >= 0 || eth->ports[1])
286 pse_port = port->id == 4 ? FE_PSE_PORT_GDM4
287 : port->id;
288 else
289 pse_port = 2; /* uplink relies on GDM2
290 * loopback
291 */
292
293 val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port) |
294 AIROHA_FOE_IB2_PSE_QOS;
295 /* For downlink traffic consume SRAM memory for hw
296 * forwarding descriptors queue.
297 */
298 if (airhoa_is_lan_gdm_port(port))
299 val |= AIROHA_FOE_IB2_FAST_PATH;
300 if (dsa_port >= 0)
301 val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ,
302 dsa_port);
303
304 smac_id = port->id;
305 }
306 }
307
308 if (is_multicast_ether_addr(data->eth.h_dest))
309 val |= AIROHA_FOE_IB2_MULTICAST;
310
311 ports_pad = 0xa5a5a500 | (l4proto & 0xff);
312 if (type == PPE_PKT_TYPE_IPV4_ROUTE)
313 hwe->ipv4.orig_tuple.ports = ports_pad;
314 if (type == PPE_PKT_TYPE_IPV6_ROUTE_3T)
315 hwe->ipv6.ports = ports_pad;
316
317 if (type == PPE_PKT_TYPE_BRIDGE) {
318 airoha_ppe_foe_set_bridge_addrs(&hwe->bridge, &data->eth);
319 hwe->bridge.data = qdata;
320 hwe->bridge.ib2 = val;
321 l2 = &hwe->bridge.l2.common;
322 } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
323 hwe->ipv6.data = qdata;
324 hwe->ipv6.ib2 = val;
325 l2 = &hwe->ipv6.l2;
326 l2->etype = ETH_P_IPV6;
327 } else {
328 hwe->ipv4.data = qdata;
329 hwe->ipv4.ib2 = val;
330 l2 = &hwe->ipv4.l2.common;
331 l2->etype = ETH_P_IP;
332 }
333
334 l2->dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
335 l2->dest_mac_lo = get_unaligned_be16(data->eth.h_dest + 4);
336 if (type <= PPE_PKT_TYPE_IPV4_DSLITE) {
337 struct airoha_foe_mac_info *mac_info;
338
339 l2->src_mac_hi = get_unaligned_be32(data->eth.h_source);
340 hwe->ipv4.l2.src_mac_lo =
341 get_unaligned_be16(data->eth.h_source + 4);
342
343 mac_info = (struct airoha_foe_mac_info *)l2;
344 mac_info->pppoe_id = data->pppoe.sid;
345 } else {
346 l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id) |
347 FIELD_PREP(AIROHA_FOE_MAC_PPPOE_ID,
348 data->pppoe.sid);
349 }
350
351 if (data->vlan.num) {
352 l2->vlan1 = data->vlan.hdr[0].id;
353 if (data->vlan.num == 2)
354 l2->vlan2 = data->vlan.hdr[1].id;
355 }
356
357 if (wlan_etype >= 0) {
358 l2->etype = wlan_etype;
359 } else if (dsa_port >= 0) {
360 l2->etype = BIT(dsa_port);
361 l2->etype |= !data->vlan.num ? BIT(15) : 0;
362 } else if (data->pppoe.num) {
363 l2->etype = ETH_P_PPP_SES;
364 }
365
366 return 0;
367 }
368
airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry * hwe,struct airoha_flow_data * data,bool egress)369 static int airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry *hwe,
370 struct airoha_flow_data *data,
371 bool egress)
372 {
373 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
374 struct airoha_foe_ipv4_tuple *t;
375
376 switch (type) {
377 case PPE_PKT_TYPE_IPV4_HNAPT:
378 if (egress) {
379 t = &hwe->ipv4.new_tuple;
380 break;
381 }
382 fallthrough;
383 case PPE_PKT_TYPE_IPV4_DSLITE:
384 case PPE_PKT_TYPE_IPV4_ROUTE:
385 t = &hwe->ipv4.orig_tuple;
386 break;
387 default:
388 WARN_ON_ONCE(1);
389 return -EINVAL;
390 }
391
392 t->src_ip = be32_to_cpu(data->v4.src_addr);
393 t->dest_ip = be32_to_cpu(data->v4.dst_addr);
394
395 if (type != PPE_PKT_TYPE_IPV4_ROUTE) {
396 t->src_port = be16_to_cpu(data->src_port);
397 t->dest_port = be16_to_cpu(data->dst_port);
398 }
399
400 return 0;
401 }
402
airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry * hwe,struct airoha_flow_data * data)403 static int airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry *hwe,
404 struct airoha_flow_data *data)
405
406 {
407 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
408 u32 *src, *dest;
409
410 switch (type) {
411 case PPE_PKT_TYPE_IPV6_ROUTE_5T:
412 case PPE_PKT_TYPE_IPV6_6RD:
413 hwe->ipv6.src_port = be16_to_cpu(data->src_port);
414 hwe->ipv6.dest_port = be16_to_cpu(data->dst_port);
415 fallthrough;
416 case PPE_PKT_TYPE_IPV6_ROUTE_3T:
417 src = hwe->ipv6.src_ip;
418 dest = hwe->ipv6.dest_ip;
419 break;
420 default:
421 WARN_ON_ONCE(1);
422 return -EINVAL;
423 }
424
425 ipv6_addr_be32_to_cpu(src, data->v6.src_addr.s6_addr32);
426 ipv6_addr_be32_to_cpu(dest, data->v6.dst_addr.s6_addr32);
427
428 return 0;
429 }
430
airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry * hwe)431 static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe)
432 {
433 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
434 u32 hash, hv1, hv2, hv3;
435
436 switch (type) {
437 case PPE_PKT_TYPE_IPV4_ROUTE:
438 case PPE_PKT_TYPE_IPV4_HNAPT:
439 hv1 = hwe->ipv4.orig_tuple.ports;
440 hv2 = hwe->ipv4.orig_tuple.dest_ip;
441 hv3 = hwe->ipv4.orig_tuple.src_ip;
442 break;
443 case PPE_PKT_TYPE_IPV6_ROUTE_3T:
444 case PPE_PKT_TYPE_IPV6_ROUTE_5T:
445 hv1 = hwe->ipv6.src_ip[3] ^ hwe->ipv6.dest_ip[3];
446 hv1 ^= hwe->ipv6.ports;
447
448 hv2 = hwe->ipv6.src_ip[2] ^ hwe->ipv6.dest_ip[2];
449 hv2 ^= hwe->ipv6.dest_ip[0];
450
451 hv3 = hwe->ipv6.src_ip[1] ^ hwe->ipv6.dest_ip[1];
452 hv3 ^= hwe->ipv6.src_ip[0];
453 break;
454 case PPE_PKT_TYPE_BRIDGE: {
455 struct airoha_foe_mac_info *l2 = &hwe->bridge.l2;
456
457 hv1 = l2->common.src_mac_hi & 0xffff;
458 hv1 = hv1 << 16 | l2->src_mac_lo;
459
460 hv2 = l2->common.dest_mac_lo;
461 hv2 = hv2 << 16;
462 hv2 = hv2 | ((l2->common.src_mac_hi & 0xffff0000) >> 16);
463
464 hv3 = l2->common.dest_mac_hi;
465 break;
466 }
467 case PPE_PKT_TYPE_IPV4_DSLITE:
468 case PPE_PKT_TYPE_IPV6_6RD:
469 default:
470 WARN_ON_ONCE(1);
471 return PPE_HASH_MASK;
472 }
473
474 hash = (hv1 & hv2) | ((~hv1) & hv3);
475 hash = (hash >> 24) | ((hash & 0xffffff) << 8);
476 hash ^= hv1 ^ hv2 ^ hv3;
477 hash ^= hash >> 16;
478 hash &= PPE_NUM_ENTRIES - 1;
479
480 return hash;
481 }
482
airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe * ppe,u32 hash)483 static u32 airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe, u32 hash)
484 {
485 if (!airoha_ppe2_is_enabled(ppe->eth))
486 return hash;
487
488 return hash >= PPE_STATS_NUM_ENTRIES ? hash - PPE1_STATS_NUM_ENTRIES
489 : hash;
490 }
491
airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe * ppe,struct airoha_npu * npu,int index)492 static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe,
493 struct airoha_npu *npu,
494 int index)
495 {
496 memset_io(&npu->stats[index], 0, sizeof(*npu->stats));
497 memset(&ppe->foe_stats[index], 0, sizeof(*ppe->foe_stats));
498 }
499
airoha_ppe_foe_flow_stats_reset(struct airoha_ppe * ppe,struct airoha_npu * npu)500 static void airoha_ppe_foe_flow_stats_reset(struct airoha_ppe *ppe,
501 struct airoha_npu *npu)
502 {
503 int i;
504
505 for (i = 0; i < PPE_STATS_NUM_ENTRIES; i++)
506 airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, i);
507 }
508
airoha_ppe_foe_flow_stats_update(struct airoha_ppe * ppe,struct airoha_npu * npu,struct airoha_foe_entry * hwe,u32 hash)509 static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe,
510 struct airoha_npu *npu,
511 struct airoha_foe_entry *hwe,
512 u32 hash)
513 {
514 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
515 u32 index, pse_port, val, *data, *ib2, *meter;
516 u8 nbq;
517
518 index = airoha_ppe_foe_get_flow_stats_index(ppe, hash);
519 if (index >= PPE_STATS_NUM_ENTRIES)
520 return;
521
522 if (type == PPE_PKT_TYPE_BRIDGE) {
523 data = &hwe->bridge.data;
524 ib2 = &hwe->bridge.ib2;
525 meter = &hwe->bridge.l2.meter;
526 } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
527 data = &hwe->ipv6.data;
528 ib2 = &hwe->ipv6.ib2;
529 meter = &hwe->ipv6.meter;
530 } else {
531 data = &hwe->ipv4.data;
532 ib2 = &hwe->ipv4.ib2;
533 meter = &hwe->ipv4.l2.meter;
534 }
535
536 pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2);
537 if (pse_port == FE_PSE_PORT_CDM4)
538 return;
539
540 airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, index);
541
542 val = FIELD_GET(AIROHA_FOE_CHANNEL | AIROHA_FOE_QID, *data);
543 *data = (*data & ~AIROHA_FOE_ACTDP) |
544 FIELD_PREP(AIROHA_FOE_ACTDP, val);
545
546 val = *ib2 & (AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
547 AIROHA_FOE_IB2_PSE_QOS | AIROHA_FOE_IB2_FAST_PATH);
548 *meter |= FIELD_PREP(AIROHA_FOE_TUNNEL_MTU, val);
549
550 nbq = pse_port == 1 ? 6 : 5;
551 *ib2 &= ~(AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
552 AIROHA_FOE_IB2_PSE_QOS);
553 *ib2 |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, 6) |
554 FIELD_PREP(AIROHA_FOE_IB2_NBQ, nbq);
555 }
556
557 static struct airoha_foe_entry *
airoha_ppe_foe_get_entry_locked(struct airoha_ppe * ppe,u32 hash)558 airoha_ppe_foe_get_entry_locked(struct airoha_ppe *ppe, u32 hash)
559 {
560 lockdep_assert_held(&ppe_lock);
561
562 if (hash < PPE_SRAM_NUM_ENTRIES) {
563 u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry);
564 struct airoha_eth *eth = ppe->eth;
565 bool ppe2;
566 u32 val;
567 int i;
568
569 ppe2 = airoha_ppe2_is_enabled(ppe->eth) &&
570 hash >= PPE1_SRAM_NUM_ENTRIES;
571 airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2),
572 FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) |
573 PPE_SRAM_CTRL_REQ_MASK);
574 if (read_poll_timeout_atomic(airoha_fe_rr, val,
575 val & PPE_SRAM_CTRL_ACK_MASK,
576 10, 100, false, eth,
577 REG_PPE_RAM_CTRL(ppe2)))
578 return NULL;
579
580 for (i = 0; i < sizeof(struct airoha_foe_entry) / 4; i++)
581 hwe[i] = airoha_fe_rr(eth,
582 REG_PPE_RAM_ENTRY(ppe2, i));
583 }
584
585 return ppe->foe + hash * sizeof(struct airoha_foe_entry);
586 }
587
airoha_ppe_foe_get_entry(struct airoha_ppe * ppe,u32 hash)588 struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
589 u32 hash)
590 {
591 struct airoha_foe_entry *hwe;
592
593 spin_lock_bh(&ppe_lock);
594 hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
595 spin_unlock_bh(&ppe_lock);
596
597 return hwe;
598 }
599
airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry * e,struct airoha_foe_entry * hwe)600 static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e,
601 struct airoha_foe_entry *hwe)
602 {
603 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
604 int len;
605
606 if ((hwe->ib1 ^ e->data.ib1) & AIROHA_FOE_IB1_BIND_UDP)
607 return false;
608
609 if (type > PPE_PKT_TYPE_IPV4_DSLITE)
610 len = offsetof(struct airoha_foe_entry, ipv6.data);
611 else
612 len = offsetof(struct airoha_foe_entry, ipv4.ib2);
613
614 return !memcmp(&e->data.d, &hwe->d, len - sizeof(hwe->ib1));
615 }
616
airoha_ppe_foe_commit_entry(struct airoha_ppe * ppe,struct airoha_foe_entry * e,u32 hash,bool rx_wlan)617 static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
618 struct airoha_foe_entry *e,
619 u32 hash, bool rx_wlan)
620 {
621 struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
622 u32 ts = airoha_ppe_get_timestamp(ppe);
623 struct airoha_eth *eth = ppe->eth;
624 struct airoha_npu *npu;
625 int err = 0;
626
627 memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1));
628 wmb();
629
630 e->ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
631 e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts);
632 hwe->ib1 = e->ib1;
633
634 rcu_read_lock();
635
636 npu = rcu_dereference(eth->npu);
637 if (!npu) {
638 err = -ENODEV;
639 goto unlock;
640 }
641
642 if (!rx_wlan)
643 airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash);
644
645 if (hash < PPE_SRAM_NUM_ENTRIES) {
646 dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe);
647 bool ppe2 = airoha_ppe2_is_enabled(eth) &&
648 hash >= PPE1_SRAM_NUM_ENTRIES;
649
650 err = npu->ops.ppe_foe_commit_entry(npu, addr, sizeof(*hwe),
651 hash, ppe2);
652 }
653 unlock:
654 rcu_read_unlock();
655
656 return err;
657 }
658
airoha_ppe_foe_remove_flow(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)659 static void airoha_ppe_foe_remove_flow(struct airoha_ppe *ppe,
660 struct airoha_flow_table_entry *e)
661 {
662 lockdep_assert_held(&ppe_lock);
663
664 hlist_del_init(&e->list);
665 if (e->hash != 0xffff) {
666 e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
667 e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
668 AIROHA_FOE_STATE_INVALID);
669 airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash, false);
670 e->hash = 0xffff;
671 }
672 if (e->type == FLOW_TYPE_L2_SUBFLOW) {
673 hlist_del_init(&e->l2_subflow_node);
674 kfree(e);
675 }
676 }
677
airoha_ppe_foe_remove_l2_flow(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)678 static void airoha_ppe_foe_remove_l2_flow(struct airoha_ppe *ppe,
679 struct airoha_flow_table_entry *e)
680 {
681 struct hlist_head *head = &e->l2_flows;
682 struct hlist_node *n;
683
684 lockdep_assert_held(&ppe_lock);
685
686 rhashtable_remove_fast(&ppe->l2_flows, &e->l2_node,
687 airoha_l2_flow_table_params);
688 hlist_for_each_entry_safe(e, n, head, l2_subflow_node)
689 airoha_ppe_foe_remove_flow(ppe, e);
690 }
691
airoha_ppe_foe_flow_remove_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)692 static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
693 struct airoha_flow_table_entry *e)
694 {
695 spin_lock_bh(&ppe_lock);
696
697 if (e->type == FLOW_TYPE_L2)
698 airoha_ppe_foe_remove_l2_flow(ppe, e);
699 else
700 airoha_ppe_foe_remove_flow(ppe, e);
701
702 spin_unlock_bh(&ppe_lock);
703 }
704
705 static int
airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e,u32 hash,bool rx_wlan)706 airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
707 struct airoha_flow_table_entry *e,
708 u32 hash, bool rx_wlan)
709 {
710 u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP;
711 struct airoha_foe_entry *hwe_p, hwe;
712 struct airoha_flow_table_entry *f;
713 int type;
714
715 hwe_p = airoha_ppe_foe_get_entry_locked(ppe, hash);
716 if (!hwe_p)
717 return -EINVAL;
718
719 f = kzalloc(sizeof(*f), GFP_ATOMIC);
720 if (!f)
721 return -ENOMEM;
722
723 hlist_add_head(&f->l2_subflow_node, &e->l2_flows);
724 f->type = FLOW_TYPE_L2_SUBFLOW;
725 f->hash = hash;
726
727 memcpy(&hwe, hwe_p, sizeof(*hwe_p));
728 hwe.ib1 = (hwe.ib1 & mask) | (e->data.ib1 & ~mask);
729
730 type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe.ib1);
731 if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
732 memcpy(&hwe.ipv6.l2, &e->data.bridge.l2, sizeof(hwe.ipv6.l2));
733 hwe.ipv6.ib2 = e->data.bridge.ib2;
734 /* setting smac_id to 0xf instruct the hw to keep original
735 * source mac address
736 */
737 hwe.ipv6.l2.src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID,
738 0xf);
739 } else {
740 memcpy(&hwe.bridge.l2, &e->data.bridge.l2,
741 sizeof(hwe.bridge.l2));
742 hwe.bridge.ib2 = e->data.bridge.ib2;
743 if (type == PPE_PKT_TYPE_IPV4_HNAPT)
744 memcpy(&hwe.ipv4.new_tuple, &hwe.ipv4.orig_tuple,
745 sizeof(hwe.ipv4.new_tuple));
746 }
747
748 hwe.bridge.data = e->data.bridge.data;
749 airoha_ppe_foe_commit_entry(ppe, &hwe, hash, rx_wlan);
750
751 return 0;
752 }
753
airoha_ppe_foe_insert_entry(struct airoha_ppe * ppe,struct sk_buff * skb,u32 hash,bool rx_wlan)754 static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
755 struct sk_buff *skb,
756 u32 hash, bool rx_wlan)
757 {
758 struct airoha_flow_table_entry *e;
759 struct airoha_foe_bridge br = {};
760 struct airoha_foe_entry *hwe;
761 bool commit_done = false;
762 struct hlist_node *n;
763 u32 index, state;
764
765 spin_lock_bh(&ppe_lock);
766
767 hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
768 if (!hwe)
769 goto unlock;
770
771 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
772 if (state == AIROHA_FOE_STATE_BIND)
773 goto unlock;
774
775 index = airoha_ppe_foe_get_entry_hash(hwe);
776 hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) {
777 if (e->type == FLOW_TYPE_L2_SUBFLOW) {
778 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
779 if (state != AIROHA_FOE_STATE_BIND) {
780 e->hash = 0xffff;
781 airoha_ppe_foe_remove_flow(ppe, e);
782 }
783 continue;
784 }
785
786 if (!airoha_ppe_foe_compare_entry(e, hwe))
787 continue;
788
789 airoha_ppe_foe_commit_entry(ppe, &e->data, hash, rx_wlan);
790 commit_done = true;
791 e->hash = hash;
792 }
793
794 if (commit_done)
795 goto unlock;
796
797 airoha_ppe_foe_set_bridge_addrs(&br, eth_hdr(skb));
798 e = rhashtable_lookup_fast(&ppe->l2_flows, &br,
799 airoha_l2_flow_table_params);
800 if (e)
801 airoha_ppe_foe_commit_subflow_entry(ppe, e, hash, rx_wlan);
802 unlock:
803 spin_unlock_bh(&ppe_lock);
804 }
805
806 static int
airoha_ppe_foe_l2_flow_commit_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)807 airoha_ppe_foe_l2_flow_commit_entry(struct airoha_ppe *ppe,
808 struct airoha_flow_table_entry *e)
809 {
810 struct airoha_flow_table_entry *prev;
811
812 e->type = FLOW_TYPE_L2;
813 prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &e->l2_node,
814 airoha_l2_flow_table_params);
815 if (!prev)
816 return 0;
817
818 if (IS_ERR(prev))
819 return PTR_ERR(prev);
820
821 return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
822 &e->l2_node,
823 airoha_l2_flow_table_params);
824 }
825
airoha_ppe_foe_flow_commit_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)826 static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe,
827 struct airoha_flow_table_entry *e)
828 {
829 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
830 u32 hash;
831
832 if (type == PPE_PKT_TYPE_BRIDGE)
833 return airoha_ppe_foe_l2_flow_commit_entry(ppe, e);
834
835 hash = airoha_ppe_foe_get_entry_hash(&e->data);
836 e->type = FLOW_TYPE_L4;
837 e->hash = 0xffff;
838
839 spin_lock_bh(&ppe_lock);
840 hlist_add_head(&e->list, &ppe->foe_flow[hash]);
841 spin_unlock_bh(&ppe_lock);
842
843 return 0;
844 }
845
airoha_ppe_get_entry_idle_time(struct airoha_ppe * ppe,u32 ib1)846 static int airoha_ppe_get_entry_idle_time(struct airoha_ppe *ppe, u32 ib1)
847 {
848 u32 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
849 u32 ts, ts_mask, now = airoha_ppe_get_timestamp(ppe);
850 int idle;
851
852 if (state == AIROHA_FOE_STATE_BIND) {
853 ts = FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, ib1);
854 ts_mask = AIROHA_FOE_IB1_BIND_TIMESTAMP;
855 } else {
856 ts = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, ib1);
857 now = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, now);
858 ts_mask = AIROHA_FOE_IB1_UNBIND_TIMESTAMP;
859 }
860 idle = now - ts;
861
862 return idle < 0 ? idle + ts_mask + 1 : idle;
863 }
864
865 static void
airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)866 airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe,
867 struct airoha_flow_table_entry *e)
868 {
869 int min_idle = airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
870 struct airoha_flow_table_entry *iter;
871 struct hlist_node *n;
872
873 lockdep_assert_held(&ppe_lock);
874
875 hlist_for_each_entry_safe(iter, n, &e->l2_flows, l2_subflow_node) {
876 struct airoha_foe_entry *hwe;
877 u32 ib1, state;
878 int idle;
879
880 hwe = airoha_ppe_foe_get_entry_locked(ppe, iter->hash);
881 if (!hwe)
882 continue;
883
884 ib1 = READ_ONCE(hwe->ib1);
885 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
886 if (state != AIROHA_FOE_STATE_BIND) {
887 iter->hash = 0xffff;
888 airoha_ppe_foe_remove_flow(ppe, iter);
889 continue;
890 }
891
892 idle = airoha_ppe_get_entry_idle_time(ppe, ib1);
893 if (idle >= min_idle)
894 continue;
895
896 min_idle = idle;
897 e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
898 e->data.ib1 |= ib1 & AIROHA_FOE_IB1_BIND_TIMESTAMP;
899 }
900 }
901
airoha_ppe_foe_flow_entry_update(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)902 static void airoha_ppe_foe_flow_entry_update(struct airoha_ppe *ppe,
903 struct airoha_flow_table_entry *e)
904 {
905 struct airoha_foe_entry *hwe_p, hwe = {};
906
907 spin_lock_bh(&ppe_lock);
908
909 if (e->type == FLOW_TYPE_L2) {
910 airoha_ppe_foe_flow_l2_entry_update(ppe, e);
911 goto unlock;
912 }
913
914 if (e->hash == 0xffff)
915 goto unlock;
916
917 hwe_p = airoha_ppe_foe_get_entry_locked(ppe, e->hash);
918 if (!hwe_p)
919 goto unlock;
920
921 memcpy(&hwe, hwe_p, sizeof(*hwe_p));
922 if (!airoha_ppe_foe_compare_entry(e, &hwe)) {
923 e->hash = 0xffff;
924 goto unlock;
925 }
926
927 e->data.ib1 = hwe.ib1;
928 unlock:
929 spin_unlock_bh(&ppe_lock);
930 }
931
airoha_ppe_entry_idle_time(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)932 static int airoha_ppe_entry_idle_time(struct airoha_ppe *ppe,
933 struct airoha_flow_table_entry *e)
934 {
935 airoha_ppe_foe_flow_entry_update(ppe, e);
936
937 return airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
938 }
939
airoha_ppe_flow_offload_replace(struct airoha_eth * eth,struct flow_cls_offload * f)940 static int airoha_ppe_flow_offload_replace(struct airoha_eth *eth,
941 struct flow_cls_offload *f)
942 {
943 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
944 struct airoha_flow_table_entry *e;
945 struct airoha_flow_data data = {};
946 struct net_device *odev = NULL;
947 struct flow_action_entry *act;
948 struct airoha_foe_entry hwe;
949 int err, i, offload_type;
950 u16 addr_type = 0;
951 u8 l4proto = 0;
952
953 if (rhashtable_lookup(ð->flow_table, &f->cookie,
954 airoha_flow_table_params))
955 return -EEXIST;
956
957 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
958 return -EOPNOTSUPP;
959
960 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
961 struct flow_match_control match;
962
963 flow_rule_match_control(rule, &match);
964 addr_type = match.key->addr_type;
965 if (flow_rule_has_control_flags(match.mask->flags,
966 f->common.extack))
967 return -EOPNOTSUPP;
968 } else {
969 return -EOPNOTSUPP;
970 }
971
972 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
973 struct flow_match_basic match;
974
975 flow_rule_match_basic(rule, &match);
976 l4proto = match.key->ip_proto;
977 } else {
978 return -EOPNOTSUPP;
979 }
980
981 switch (addr_type) {
982 case 0:
983 offload_type = PPE_PKT_TYPE_BRIDGE;
984 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
985 struct flow_match_eth_addrs match;
986
987 flow_rule_match_eth_addrs(rule, &match);
988 memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
989 memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
990 } else {
991 return -EOPNOTSUPP;
992 }
993 break;
994 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
995 offload_type = PPE_PKT_TYPE_IPV4_HNAPT;
996 break;
997 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
998 offload_type = PPE_PKT_TYPE_IPV6_ROUTE_5T;
999 break;
1000 default:
1001 return -EOPNOTSUPP;
1002 }
1003
1004 flow_action_for_each(i, act, &rule->action) {
1005 switch (act->id) {
1006 case FLOW_ACTION_MANGLE:
1007 if (offload_type == PPE_PKT_TYPE_BRIDGE)
1008 return -EOPNOTSUPP;
1009
1010 if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
1011 airoha_ppe_flow_mangle_eth(act, &data.eth);
1012 break;
1013 case FLOW_ACTION_REDIRECT:
1014 odev = act->dev;
1015 break;
1016 case FLOW_ACTION_CSUM:
1017 break;
1018 case FLOW_ACTION_VLAN_PUSH:
1019 if (data.vlan.num == 2 ||
1020 act->vlan.proto != htons(ETH_P_8021Q))
1021 return -EOPNOTSUPP;
1022
1023 data.vlan.hdr[data.vlan.num].id = act->vlan.vid;
1024 data.vlan.hdr[data.vlan.num].proto = act->vlan.proto;
1025 data.vlan.num++;
1026 break;
1027 case FLOW_ACTION_VLAN_POP:
1028 break;
1029 case FLOW_ACTION_PPPOE_PUSH:
1030 if (data.pppoe.num == 1 || data.vlan.num == 2)
1031 return -EOPNOTSUPP;
1032
1033 data.pppoe.sid = act->pppoe.sid;
1034 data.pppoe.num++;
1035 break;
1036 default:
1037 return -EOPNOTSUPP;
1038 }
1039 }
1040
1041 if (!is_valid_ether_addr(data.eth.h_source) ||
1042 !is_valid_ether_addr(data.eth.h_dest))
1043 return -EINVAL;
1044
1045 err = airoha_ppe_foe_entry_prepare(eth, &hwe, odev, offload_type,
1046 &data, l4proto);
1047 if (err)
1048 return err;
1049
1050 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1051 struct flow_match_ports ports;
1052
1053 if (offload_type == PPE_PKT_TYPE_BRIDGE)
1054 return -EOPNOTSUPP;
1055
1056 flow_rule_match_ports(rule, &ports);
1057 data.src_port = ports.key->src;
1058 data.dst_port = ports.key->dst;
1059 } else if (offload_type != PPE_PKT_TYPE_BRIDGE) {
1060 return -EOPNOTSUPP;
1061 }
1062
1063 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1064 struct flow_match_ipv4_addrs addrs;
1065
1066 flow_rule_match_ipv4_addrs(rule, &addrs);
1067 data.v4.src_addr = addrs.key->src;
1068 data.v4.dst_addr = addrs.key->dst;
1069 airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, false);
1070 }
1071
1072 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1073 struct flow_match_ipv6_addrs addrs;
1074
1075 flow_rule_match_ipv6_addrs(rule, &addrs);
1076
1077 data.v6.src_addr = addrs.key->src;
1078 data.v6.dst_addr = addrs.key->dst;
1079 airoha_ppe_foe_entry_set_ipv6_tuple(&hwe, &data);
1080 }
1081
1082 flow_action_for_each(i, act, &rule->action) {
1083 if (act->id != FLOW_ACTION_MANGLE)
1084 continue;
1085
1086 if (offload_type == PPE_PKT_TYPE_BRIDGE)
1087 return -EOPNOTSUPP;
1088
1089 switch (act->mangle.htype) {
1090 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
1091 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
1092 err = airoha_ppe_flow_mangle_ports(act, &data);
1093 break;
1094 case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
1095 err = airoha_ppe_flow_mangle_ipv4(act, &data);
1096 break;
1097 case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
1098 /* handled earlier */
1099 break;
1100 default:
1101 return -EOPNOTSUPP;
1102 }
1103
1104 if (err)
1105 return err;
1106 }
1107
1108 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1109 err = airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, true);
1110 if (err)
1111 return err;
1112 }
1113
1114 e = kzalloc(sizeof(*e), GFP_KERNEL);
1115 if (!e)
1116 return -ENOMEM;
1117
1118 e->cookie = f->cookie;
1119 memcpy(&e->data, &hwe, sizeof(e->data));
1120
1121 err = airoha_ppe_foe_flow_commit_entry(eth->ppe, e);
1122 if (err)
1123 goto free_entry;
1124
1125 err = rhashtable_insert_fast(ð->flow_table, &e->node,
1126 airoha_flow_table_params);
1127 if (err < 0)
1128 goto remove_foe_entry;
1129
1130 return 0;
1131
1132 remove_foe_entry:
1133 airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
1134 free_entry:
1135 kfree(e);
1136
1137 return err;
1138 }
1139
airoha_ppe_flow_offload_destroy(struct airoha_eth * eth,struct flow_cls_offload * f)1140 static int airoha_ppe_flow_offload_destroy(struct airoha_eth *eth,
1141 struct flow_cls_offload *f)
1142 {
1143 struct airoha_flow_table_entry *e;
1144
1145 e = rhashtable_lookup(ð->flow_table, &f->cookie,
1146 airoha_flow_table_params);
1147 if (!e)
1148 return -ENOENT;
1149
1150 airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
1151 rhashtable_remove_fast(ð->flow_table, &e->node,
1152 airoha_flow_table_params);
1153 kfree(e);
1154
1155 return 0;
1156 }
1157
airoha_ppe_foe_entry_get_stats(struct airoha_ppe * ppe,u32 hash,struct airoha_foe_stats64 * stats)1158 void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
1159 struct airoha_foe_stats64 *stats)
1160 {
1161 u32 index = airoha_ppe_foe_get_flow_stats_index(ppe, hash);
1162 struct airoha_eth *eth = ppe->eth;
1163 struct airoha_npu *npu;
1164
1165 if (index >= PPE_STATS_NUM_ENTRIES)
1166 return;
1167
1168 rcu_read_lock();
1169
1170 npu = rcu_dereference(eth->npu);
1171 if (npu) {
1172 u64 packets = ppe->foe_stats[index].packets;
1173 u64 bytes = ppe->foe_stats[index].bytes;
1174 struct airoha_foe_stats npu_stats;
1175
1176 memcpy_fromio(&npu_stats, &npu->stats[index],
1177 sizeof(*npu->stats));
1178 stats->packets = packets << 32 | npu_stats.packets;
1179 stats->bytes = bytes << 32 | npu_stats.bytes;
1180 }
1181
1182 rcu_read_unlock();
1183 }
1184
airoha_ppe_flow_offload_stats(struct airoha_eth * eth,struct flow_cls_offload * f)1185 static int airoha_ppe_flow_offload_stats(struct airoha_eth *eth,
1186 struct flow_cls_offload *f)
1187 {
1188 struct airoha_flow_table_entry *e;
1189 u32 idle;
1190
1191 e = rhashtable_lookup(ð->flow_table, &f->cookie,
1192 airoha_flow_table_params);
1193 if (!e)
1194 return -ENOENT;
1195
1196 idle = airoha_ppe_entry_idle_time(eth->ppe, e);
1197 f->stats.lastused = jiffies - idle * HZ;
1198
1199 if (e->hash != 0xffff) {
1200 struct airoha_foe_stats64 stats = {};
1201
1202 airoha_ppe_foe_entry_get_stats(eth->ppe, e->hash, &stats);
1203 f->stats.pkts += (stats.packets - e->stats.packets);
1204 f->stats.bytes += (stats.bytes - e->stats.bytes);
1205 e->stats = stats;
1206 }
1207
1208 return 0;
1209 }
1210
airoha_ppe_flow_offload_cmd(struct airoha_eth * eth,struct flow_cls_offload * f)1211 static int airoha_ppe_flow_offload_cmd(struct airoha_eth *eth,
1212 struct flow_cls_offload *f)
1213 {
1214 switch (f->command) {
1215 case FLOW_CLS_REPLACE:
1216 return airoha_ppe_flow_offload_replace(eth, f);
1217 case FLOW_CLS_DESTROY:
1218 return airoha_ppe_flow_offload_destroy(eth, f);
1219 case FLOW_CLS_STATS:
1220 return airoha_ppe_flow_offload_stats(eth, f);
1221 default:
1222 break;
1223 }
1224
1225 return -EOPNOTSUPP;
1226 }
1227
airoha_ppe_flush_sram_entries(struct airoha_ppe * ppe,struct airoha_npu * npu)1228 static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe,
1229 struct airoha_npu *npu)
1230 {
1231 int i, sram_num_entries = PPE_SRAM_NUM_ENTRIES;
1232 struct airoha_foe_entry *hwe = ppe->foe;
1233
1234 if (airoha_ppe2_is_enabled(ppe->eth))
1235 sram_num_entries = sram_num_entries / 2;
1236
1237 for (i = 0; i < sram_num_entries; i++)
1238 memset(&hwe[i], 0, sizeof(*hwe));
1239
1240 return npu->ops.ppe_flush_sram_entries(npu, ppe->foe_dma,
1241 PPE_SRAM_NUM_ENTRIES);
1242 }
1243
airoha_ppe_npu_get(struct airoha_eth * eth)1244 static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
1245 {
1246 struct airoha_npu *npu = airoha_npu_get(eth->dev);
1247
1248 if (IS_ERR(npu)) {
1249 request_module("airoha-npu");
1250 npu = airoha_npu_get(eth->dev);
1251 }
1252
1253 return npu;
1254 }
1255
airoha_ppe_offload_setup(struct airoha_eth * eth)1256 static int airoha_ppe_offload_setup(struct airoha_eth *eth)
1257 {
1258 struct airoha_npu *npu = airoha_ppe_npu_get(eth);
1259 struct airoha_ppe *ppe = eth->ppe;
1260 int err;
1261
1262 if (IS_ERR(npu))
1263 return PTR_ERR(npu);
1264
1265 err = npu->ops.ppe_init(npu);
1266 if (err)
1267 goto error_npu_put;
1268
1269 if (PPE_STATS_NUM_ENTRIES) {
1270 err = npu->ops.ppe_init_stats(npu, ppe->foe_stats_dma,
1271 PPE_STATS_NUM_ENTRIES);
1272 if (err)
1273 goto error_npu_put;
1274 }
1275
1276 airoha_ppe_hw_init(ppe);
1277 err = airoha_ppe_flush_sram_entries(ppe, npu);
1278 if (err)
1279 goto error_npu_put;
1280
1281 airoha_ppe_foe_flow_stats_reset(ppe, npu);
1282
1283 rcu_assign_pointer(eth->npu, npu);
1284 synchronize_rcu();
1285
1286 return 0;
1287
1288 error_npu_put:
1289 airoha_npu_put(npu);
1290
1291 return err;
1292 }
1293
airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev * dev,void * type_data)1294 int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev, void *type_data)
1295 {
1296 struct airoha_ppe *ppe = dev->priv;
1297 struct airoha_eth *eth = ppe->eth;
1298 int err = 0;
1299
1300 mutex_lock(&flow_offload_mutex);
1301
1302 if (!eth->npu)
1303 err = airoha_ppe_offload_setup(eth);
1304 if (!err)
1305 err = airoha_ppe_flow_offload_cmd(eth, type_data);
1306
1307 mutex_unlock(&flow_offload_mutex);
1308
1309 return err;
1310 }
1311
airoha_ppe_check_skb(struct airoha_ppe_dev * dev,struct sk_buff * skb,u16 hash,bool rx_wlan)1312 void airoha_ppe_check_skb(struct airoha_ppe_dev *dev, struct sk_buff *skb,
1313 u16 hash, bool rx_wlan)
1314 {
1315 struct airoha_ppe *ppe = dev->priv;
1316 u16 now, diff;
1317
1318 if (hash > PPE_HASH_MASK)
1319 return;
1320
1321 now = (u16)jiffies;
1322 diff = now - ppe->foe_check_time[hash];
1323 if (diff < HZ / 10)
1324 return;
1325
1326 ppe->foe_check_time[hash] = now;
1327 airoha_ppe_foe_insert_entry(ppe, skb, hash, rx_wlan);
1328 }
1329
airoha_ppe_init_upd_mem(struct airoha_gdm_port * port)1330 void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port)
1331 {
1332 struct airoha_eth *eth = port->qdma->eth;
1333 struct net_device *dev = port->dev;
1334 const u8 *addr = dev->dev_addr;
1335 u32 val;
1336
1337 val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
1338 airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
1339 airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
1340 FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
1341 PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
1342
1343 val = (addr[0] << 8) | addr[1];
1344 airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
1345 airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
1346 FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
1347 FIELD_PREP(PPE_UPDMEM_OFFSET_MASK, 1) |
1348 PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
1349 }
1350
airoha_ppe_get_dev(struct device * dev)1351 struct airoha_ppe_dev *airoha_ppe_get_dev(struct device *dev)
1352 {
1353 struct platform_device *pdev;
1354 struct device_node *np;
1355 struct airoha_eth *eth;
1356
1357 np = of_parse_phandle(dev->of_node, "airoha,eth", 0);
1358 if (!np)
1359 return ERR_PTR(-ENODEV);
1360
1361 pdev = of_find_device_by_node(np);
1362 if (!pdev) {
1363 dev_err(dev, "cannot find device node %s\n", np->name);
1364 of_node_put(np);
1365 return ERR_PTR(-ENODEV);
1366 }
1367 of_node_put(np);
1368
1369 if (!try_module_get(THIS_MODULE)) {
1370 dev_err(dev, "failed to get the device driver module\n");
1371 goto error_pdev_put;
1372 }
1373
1374 eth = platform_get_drvdata(pdev);
1375 if (!eth)
1376 goto error_module_put;
1377
1378 if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER)) {
1379 dev_err(&pdev->dev,
1380 "failed to create device link to consumer %s\n",
1381 dev_name(dev));
1382 goto error_module_put;
1383 }
1384
1385 return ð->ppe->dev;
1386
1387 error_module_put:
1388 module_put(THIS_MODULE);
1389 error_pdev_put:
1390 platform_device_put(pdev);
1391
1392 return ERR_PTR(-ENODEV);
1393 }
1394 EXPORT_SYMBOL_GPL(airoha_ppe_get_dev);
1395
airoha_ppe_put_dev(struct airoha_ppe_dev * dev)1396 void airoha_ppe_put_dev(struct airoha_ppe_dev *dev)
1397 {
1398 struct airoha_ppe *ppe = dev->priv;
1399 struct airoha_eth *eth = ppe->eth;
1400
1401 module_put(THIS_MODULE);
1402 put_device(eth->dev);
1403 }
1404 EXPORT_SYMBOL_GPL(airoha_ppe_put_dev);
1405
airoha_ppe_init(struct airoha_eth * eth)1406 int airoha_ppe_init(struct airoha_eth *eth)
1407 {
1408 struct airoha_ppe *ppe;
1409 int foe_size, err;
1410
1411 ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL);
1412 if (!ppe)
1413 return -ENOMEM;
1414
1415 ppe->dev.ops.setup_tc_block_cb = airoha_ppe_setup_tc_block_cb;
1416 ppe->dev.ops.check_skb = airoha_ppe_check_skb;
1417 ppe->dev.priv = ppe;
1418
1419 foe_size = PPE_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
1420 ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma,
1421 GFP_KERNEL);
1422 if (!ppe->foe)
1423 return -ENOMEM;
1424
1425 ppe->eth = eth;
1426 eth->ppe = ppe;
1427
1428 ppe->foe_flow = devm_kzalloc(eth->dev,
1429 PPE_NUM_ENTRIES * sizeof(*ppe->foe_flow),
1430 GFP_KERNEL);
1431 if (!ppe->foe_flow)
1432 return -ENOMEM;
1433
1434 foe_size = PPE_STATS_NUM_ENTRIES * sizeof(*ppe->foe_stats);
1435 if (foe_size) {
1436 ppe->foe_stats = dmam_alloc_coherent(eth->dev, foe_size,
1437 &ppe->foe_stats_dma,
1438 GFP_KERNEL);
1439 if (!ppe->foe_stats)
1440 return -ENOMEM;
1441 }
1442
1443 err = rhashtable_init(ð->flow_table, &airoha_flow_table_params);
1444 if (err)
1445 return err;
1446
1447 err = rhashtable_init(&ppe->l2_flows, &airoha_l2_flow_table_params);
1448 if (err)
1449 goto error_flow_table_destroy;
1450
1451 err = airoha_ppe_debugfs_init(ppe);
1452 if (err)
1453 goto error_l2_flow_table_destroy;
1454
1455 return 0;
1456
1457 error_l2_flow_table_destroy:
1458 rhashtable_destroy(&ppe->l2_flows);
1459 error_flow_table_destroy:
1460 rhashtable_destroy(ð->flow_table);
1461
1462 return err;
1463 }
1464
airoha_ppe_deinit(struct airoha_eth * eth)1465 void airoha_ppe_deinit(struct airoha_eth *eth)
1466 {
1467 struct airoha_npu *npu;
1468
1469 rcu_read_lock();
1470 npu = rcu_dereference(eth->npu);
1471 if (npu) {
1472 npu->ops.ppe_deinit(npu);
1473 airoha_npu_put(npu);
1474 }
1475 rcu_read_unlock();
1476
1477 rhashtable_destroy(ð->ppe->l2_flows);
1478 rhashtable_destroy(ð->flow_table);
1479 debugfs_remove(eth->ppe->debugfs_dir);
1480 }
1481