1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2025 AIROHA Inc
4 * Author: Lorenzo Bianconi <lorenzo@kernel.org>
5 */
6
7 #include <linux/ip.h>
8 #include <linux/ipv6.h>
9 #include <linux/of_platform.h>
10 #include <linux/platform_device.h>
11 #include <linux/rhashtable.h>
12 #include <net/ipv6.h>
13 #include <net/pkt_cls.h>
14
15 #include "airoha_regs.h"
16 #include "airoha_eth.h"
17
18 static DEFINE_MUTEX(flow_offload_mutex);
19 static DEFINE_SPINLOCK(ppe_lock);
20
21 static const struct rhashtable_params airoha_flow_table_params = {
22 .head_offset = offsetof(struct airoha_flow_table_entry, node),
23 .key_offset = offsetof(struct airoha_flow_table_entry, cookie),
24 .key_len = sizeof(unsigned long),
25 .automatic_shrinking = true,
26 };
27
28 static const struct rhashtable_params airoha_l2_flow_table_params = {
29 .head_offset = offsetof(struct airoha_flow_table_entry, l2_node),
30 .key_offset = offsetof(struct airoha_flow_table_entry, data.bridge),
31 .key_len = 2 * ETH_ALEN,
32 .automatic_shrinking = true,
33 };
34
airoha_ppe_get_num_stats_entries(struct airoha_ppe * ppe)35 static int airoha_ppe_get_num_stats_entries(struct airoha_ppe *ppe)
36 {
37 if (!IS_ENABLED(CONFIG_NET_AIROHA_FLOW_STATS))
38 return -EOPNOTSUPP;
39
40 if (airoha_is_7583(ppe->eth))
41 return -EOPNOTSUPP;
42
43 return PPE_STATS_NUM_ENTRIES;
44 }
45
airoha_ppe_get_total_num_stats_entries(struct airoha_ppe * ppe)46 static int airoha_ppe_get_total_num_stats_entries(struct airoha_ppe *ppe)
47 {
48 int num_stats = airoha_ppe_get_num_stats_entries(ppe);
49
50 if (num_stats > 0) {
51 struct airoha_eth *eth = ppe->eth;
52
53 num_stats = num_stats * eth->soc->num_ppe;
54 }
55
56 return num_stats;
57 }
58
airoha_ppe_get_total_sram_num_entries(struct airoha_ppe * ppe)59 static u32 airoha_ppe_get_total_sram_num_entries(struct airoha_ppe *ppe)
60 {
61 struct airoha_eth *eth = ppe->eth;
62
63 return PPE_SRAM_NUM_ENTRIES * eth->soc->num_ppe;
64 }
65
airoha_ppe_get_total_num_entries(struct airoha_ppe * ppe)66 u32 airoha_ppe_get_total_num_entries(struct airoha_ppe *ppe)
67 {
68 u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
69
70 return sram_num_entries + PPE_DRAM_NUM_ENTRIES;
71 }
72
airoha_ppe_is_enabled(struct airoha_eth * eth,int index)73 bool airoha_ppe_is_enabled(struct airoha_eth *eth, int index)
74 {
75 if (index >= eth->soc->num_ppe)
76 return false;
77
78 return airoha_fe_rr(eth, REG_PPE_GLO_CFG(index)) & PPE_GLO_CFG_EN_MASK;
79 }
80
airoha_ppe_get_timestamp(struct airoha_ppe * ppe)81 static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe)
82 {
83 u16 timestamp = airoha_fe_rr(ppe->eth, REG_FE_FOE_TS);
84
85 return FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, timestamp);
86 }
87
airoha_ppe_hw_init(struct airoha_ppe * ppe)88 static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
89 {
90 u32 sram_ppe_num_data_entries = PPE_SRAM_NUM_ENTRIES, sram_num_entries;
91 u32 sram_tb_size, dram_num_entries;
92 struct airoha_eth *eth = ppe->eth;
93 int i, sram_num_stats_entries;
94
95 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
96 sram_tb_size = sram_num_entries * sizeof(struct airoha_foe_entry);
97 dram_num_entries = PPE_RAM_NUM_ENTRIES_SHIFT(PPE_DRAM_NUM_ENTRIES);
98
99 sram_num_stats_entries = airoha_ppe_get_num_stats_entries(ppe);
100 if (sram_num_stats_entries > 0)
101 sram_ppe_num_data_entries -= sram_num_stats_entries;
102 sram_ppe_num_data_entries =
103 PPE_RAM_NUM_ENTRIES_SHIFT(sram_ppe_num_data_entries);
104
105 for (i = 0; i < eth->soc->num_ppe; i++) {
106 int p;
107
108 airoha_fe_wr(eth, REG_PPE_TB_BASE(i),
109 ppe->foe_dma + sram_tb_size);
110
111 airoha_fe_rmw(eth, REG_PPE_BND_AGE0(i),
112 PPE_BIND_AGE0_DELTA_NON_L4 |
113 PPE_BIND_AGE0_DELTA_UDP,
114 FIELD_PREP(PPE_BIND_AGE0_DELTA_NON_L4, 1) |
115 FIELD_PREP(PPE_BIND_AGE0_DELTA_UDP, 12));
116 airoha_fe_rmw(eth, REG_PPE_BND_AGE1(i),
117 PPE_BIND_AGE1_DELTA_TCP_FIN |
118 PPE_BIND_AGE1_DELTA_TCP,
119 FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
120 FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP, 7));
121
122 airoha_fe_rmw(eth, REG_PPE_TB_HASH_CFG(i),
123 PPE_SRAM_TABLE_EN_MASK |
124 PPE_SRAM_HASH1_EN_MASK |
125 PPE_DRAM_TABLE_EN_MASK |
126 PPE_SRAM_HASH0_MODE_MASK |
127 PPE_SRAM_HASH1_MODE_MASK |
128 PPE_DRAM_HASH0_MODE_MASK |
129 PPE_DRAM_HASH1_MODE_MASK,
130 FIELD_PREP(PPE_SRAM_TABLE_EN_MASK, 1) |
131 FIELD_PREP(PPE_SRAM_HASH1_EN_MASK, 1) |
132 FIELD_PREP(PPE_SRAM_HASH1_MODE_MASK, 1) |
133 FIELD_PREP(PPE_DRAM_HASH1_MODE_MASK, 3));
134
135 airoha_fe_rmw(eth, REG_PPE_TB_CFG(i),
136 PPE_TB_CFG_SEARCH_MISS_MASK |
137 PPE_SRAM_TB_NUM_ENTRY_MASK |
138 PPE_DRAM_TB_NUM_ENTRY_MASK |
139 PPE_TB_CFG_KEEPALIVE_MASK |
140 PPE_TB_ENTRY_SIZE_MASK,
141 FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) |
142 FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0) |
143 FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
144 sram_ppe_num_data_entries) |
145 FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
146 dram_num_entries));
147
148 airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED);
149
150 for (p = 0; p < ARRAY_SIZE(eth->ports); p++)
151 airoha_fe_rmw(eth, REG_PPE_MTU(i, p),
152 FP0_EGRESS_MTU_MASK |
153 FP1_EGRESS_MTU_MASK,
154 FIELD_PREP(FP0_EGRESS_MTU_MASK,
155 AIROHA_MAX_MTU) |
156 FIELD_PREP(FP1_EGRESS_MTU_MASK,
157 AIROHA_MAX_MTU));
158 }
159 }
160
airoha_ppe_flow_mangle_eth(const struct flow_action_entry * act,void * eth)161 static void airoha_ppe_flow_mangle_eth(const struct flow_action_entry *act, void *eth)
162 {
163 void *dest = eth + act->mangle.offset;
164 const void *src = &act->mangle.val;
165
166 if (act->mangle.offset > 8)
167 return;
168
169 if (act->mangle.mask == 0xffff) {
170 src += 2;
171 dest += 2;
172 }
173
174 memcpy(dest, src, act->mangle.mask ? 2 : 4);
175 }
176
airoha_ppe_flow_mangle_ports(const struct flow_action_entry * act,struct airoha_flow_data * data)177 static int airoha_ppe_flow_mangle_ports(const struct flow_action_entry *act,
178 struct airoha_flow_data *data)
179 {
180 u32 val = be32_to_cpu((__force __be32)act->mangle.val);
181
182 switch (act->mangle.offset) {
183 case 0:
184 if ((__force __be32)act->mangle.mask == ~cpu_to_be32(0xffff))
185 data->dst_port = cpu_to_be16(val);
186 else
187 data->src_port = cpu_to_be16(val >> 16);
188 break;
189 case 2:
190 data->dst_port = cpu_to_be16(val);
191 break;
192 default:
193 return -EINVAL;
194 }
195
196 return 0;
197 }
198
airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry * act,struct airoha_flow_data * data)199 static int airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry *act,
200 struct airoha_flow_data *data)
201 {
202 __be32 *dest;
203
204 switch (act->mangle.offset) {
205 case offsetof(struct iphdr, saddr):
206 dest = &data->v4.src_addr;
207 break;
208 case offsetof(struct iphdr, daddr):
209 dest = &data->v4.dst_addr;
210 break;
211 default:
212 return -EINVAL;
213 }
214
215 memcpy(dest, &act->mangle.val, sizeof(u32));
216
217 return 0;
218 }
219
airoha_ppe_get_wdma_info(struct net_device * dev,const u8 * addr,struct airoha_wdma_info * info)220 static int airoha_ppe_get_wdma_info(struct net_device *dev, const u8 *addr,
221 struct airoha_wdma_info *info)
222 {
223 struct net_device_path_stack stack;
224 struct net_device_path *path;
225 int err;
226
227 if (!dev)
228 return -ENODEV;
229
230 err = dev_fill_forward_path(dev, addr, &stack);
231 if (err)
232 return err;
233
234 path = &stack.path[stack.num_paths - 1];
235 if (path->type != DEV_PATH_MTK_WDMA)
236 return -1;
237
238 info->idx = path->mtk_wdma.wdma_idx;
239 info->bss = path->mtk_wdma.bss;
240 info->wcid = path->mtk_wdma.wcid;
241
242 return 0;
243 }
244
airoha_get_dsa_port(struct net_device ** dev)245 static int airoha_get_dsa_port(struct net_device **dev)
246 {
247 #if IS_ENABLED(CONFIG_NET_DSA)
248 struct dsa_port *dp = dsa_port_from_netdev(*dev);
249
250 if (IS_ERR(dp))
251 return -ENODEV;
252
253 *dev = dsa_port_to_conduit(dp);
254 return dp->index;
255 #else
256 return -ENODEV;
257 #endif
258 }
259
airoha_ppe_foe_set_bridge_addrs(struct airoha_foe_bridge * br,struct ethhdr * eh)260 static void airoha_ppe_foe_set_bridge_addrs(struct airoha_foe_bridge *br,
261 struct ethhdr *eh)
262 {
263 br->dest_mac_hi = get_unaligned_be32(eh->h_dest);
264 br->dest_mac_lo = get_unaligned_be16(eh->h_dest + 4);
265 br->src_mac_hi = get_unaligned_be16(eh->h_source);
266 br->src_mac_lo = get_unaligned_be32(eh->h_source + 2);
267 }
268
airoha_ppe_foe_entry_prepare(struct airoha_eth * eth,struct airoha_foe_entry * hwe,struct net_device * dev,int type,struct airoha_flow_data * data,int l4proto)269 static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
270 struct airoha_foe_entry *hwe,
271 struct net_device *dev, int type,
272 struct airoha_flow_data *data,
273 int l4proto)
274 {
275 u32 qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f), ports_pad, val;
276 int wlan_etype = -EINVAL, dsa_port = airoha_get_dsa_port(&dev);
277 struct airoha_foe_mac_info_common *l2;
278 u8 smac_id = 0xf;
279
280 memset(hwe, 0, sizeof(*hwe));
281
282 val = FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE, AIROHA_FOE_STATE_BIND) |
283 FIELD_PREP(AIROHA_FOE_IB1_BIND_PACKET_TYPE, type) |
284 FIELD_PREP(AIROHA_FOE_IB1_BIND_UDP, l4proto == IPPROTO_UDP) |
285 FIELD_PREP(AIROHA_FOE_IB1_BIND_VLAN_LAYER, data->vlan.num) |
286 FIELD_PREP(AIROHA_FOE_IB1_BIND_VPM, data->vlan.num) |
287 FIELD_PREP(AIROHA_FOE_IB1_BIND_PPPOE, data->pppoe.num) |
288 AIROHA_FOE_IB1_BIND_TTL;
289 hwe->ib1 = val;
290
291 val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f);
292 if (dev) {
293 struct airoha_wdma_info info = {};
294
295 if (!airoha_ppe_get_wdma_info(dev, data->eth.h_dest, &info)) {
296 val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, info.idx) |
297 FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT,
298 FE_PSE_PORT_CDM4);
299 qdata |= FIELD_PREP(AIROHA_FOE_ACTDP, info.bss);
300 wlan_etype = FIELD_PREP(AIROHA_FOE_MAC_WDMA_BAND,
301 info.idx) |
302 FIELD_PREP(AIROHA_FOE_MAC_WDMA_WCID,
303 info.wcid);
304 } else {
305 struct airoha_gdm_port *port = netdev_priv(dev);
306 u8 pse_port;
307
308 if (!airoha_is_valid_gdm_port(eth, port))
309 return -EINVAL;
310
311 if (dsa_port >= 0 || eth->ports[1])
312 pse_port = port->id == 4 ? FE_PSE_PORT_GDM4
313 : port->id;
314 else
315 pse_port = 2; /* uplink relies on GDM2
316 * loopback
317 */
318
319 val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port) |
320 AIROHA_FOE_IB2_PSE_QOS;
321 /* For downlink traffic consume SRAM memory for hw
322 * forwarding descriptors queue.
323 */
324 if (airhoa_is_lan_gdm_port(port))
325 val |= AIROHA_FOE_IB2_FAST_PATH;
326 if (dsa_port >= 0)
327 val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ,
328 dsa_port);
329
330 smac_id = port->id;
331 }
332 }
333
334 if (is_multicast_ether_addr(data->eth.h_dest))
335 val |= AIROHA_FOE_IB2_MULTICAST;
336
337 ports_pad = 0xa5a5a500 | (l4proto & 0xff);
338 if (type == PPE_PKT_TYPE_IPV4_ROUTE)
339 hwe->ipv4.orig_tuple.ports = ports_pad;
340 if (type == PPE_PKT_TYPE_IPV6_ROUTE_3T)
341 hwe->ipv6.ports = ports_pad;
342
343 if (type == PPE_PKT_TYPE_BRIDGE) {
344 airoha_ppe_foe_set_bridge_addrs(&hwe->bridge, &data->eth);
345 hwe->bridge.data = qdata;
346 hwe->bridge.ib2 = val;
347 l2 = &hwe->bridge.l2.common;
348 } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
349 hwe->ipv6.data = qdata;
350 hwe->ipv6.ib2 = val;
351 l2 = &hwe->ipv6.l2;
352 l2->etype = ETH_P_IPV6;
353 } else {
354 hwe->ipv4.data = qdata;
355 hwe->ipv4.ib2 = val;
356 l2 = &hwe->ipv4.l2.common;
357 l2->etype = ETH_P_IP;
358 }
359
360 l2->dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
361 l2->dest_mac_lo = get_unaligned_be16(data->eth.h_dest + 4);
362 if (type <= PPE_PKT_TYPE_IPV4_DSLITE) {
363 struct airoha_foe_mac_info *mac_info;
364
365 l2->src_mac_hi = get_unaligned_be32(data->eth.h_source);
366 hwe->ipv4.l2.src_mac_lo =
367 get_unaligned_be16(data->eth.h_source + 4);
368
369 mac_info = (struct airoha_foe_mac_info *)l2;
370 mac_info->pppoe_id = data->pppoe.sid;
371 } else {
372 l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id) |
373 FIELD_PREP(AIROHA_FOE_MAC_PPPOE_ID,
374 data->pppoe.sid);
375 }
376
377 if (data->vlan.num) {
378 l2->vlan1 = data->vlan.hdr[0].id;
379 if (data->vlan.num == 2)
380 l2->vlan2 = data->vlan.hdr[1].id;
381 }
382
383 if (wlan_etype >= 0) {
384 l2->etype = wlan_etype;
385 } else if (dsa_port >= 0) {
386 l2->etype = BIT(dsa_port);
387 l2->etype |= !data->vlan.num ? BIT(15) : 0;
388 } else if (data->pppoe.num) {
389 l2->etype = ETH_P_PPP_SES;
390 }
391
392 return 0;
393 }
394
airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry * hwe,struct airoha_flow_data * data,bool egress)395 static int airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry *hwe,
396 struct airoha_flow_data *data,
397 bool egress)
398 {
399 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
400 struct airoha_foe_ipv4_tuple *t;
401
402 switch (type) {
403 case PPE_PKT_TYPE_IPV4_HNAPT:
404 if (egress) {
405 t = &hwe->ipv4.new_tuple;
406 break;
407 }
408 fallthrough;
409 case PPE_PKT_TYPE_IPV4_DSLITE:
410 case PPE_PKT_TYPE_IPV4_ROUTE:
411 t = &hwe->ipv4.orig_tuple;
412 break;
413 default:
414 WARN_ON_ONCE(1);
415 return -EINVAL;
416 }
417
418 t->src_ip = be32_to_cpu(data->v4.src_addr);
419 t->dest_ip = be32_to_cpu(data->v4.dst_addr);
420
421 if (type != PPE_PKT_TYPE_IPV4_ROUTE) {
422 t->src_port = be16_to_cpu(data->src_port);
423 t->dest_port = be16_to_cpu(data->dst_port);
424 }
425
426 return 0;
427 }
428
airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry * hwe,struct airoha_flow_data * data)429 static int airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry *hwe,
430 struct airoha_flow_data *data)
431
432 {
433 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
434 u32 *src, *dest;
435
436 switch (type) {
437 case PPE_PKT_TYPE_IPV6_ROUTE_5T:
438 case PPE_PKT_TYPE_IPV6_6RD:
439 hwe->ipv6.src_port = be16_to_cpu(data->src_port);
440 hwe->ipv6.dest_port = be16_to_cpu(data->dst_port);
441 fallthrough;
442 case PPE_PKT_TYPE_IPV6_ROUTE_3T:
443 src = hwe->ipv6.src_ip;
444 dest = hwe->ipv6.dest_ip;
445 break;
446 default:
447 WARN_ON_ONCE(1);
448 return -EINVAL;
449 }
450
451 ipv6_addr_be32_to_cpu(src, data->v6.src_addr.s6_addr32);
452 ipv6_addr_be32_to_cpu(dest, data->v6.dst_addr.s6_addr32);
453
454 return 0;
455 }
456
airoha_ppe_foe_get_entry_hash(struct airoha_ppe * ppe,struct airoha_foe_entry * hwe)457 static u32 airoha_ppe_foe_get_entry_hash(struct airoha_ppe *ppe,
458 struct airoha_foe_entry *hwe)
459 {
460 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
461 u32 ppe_hash_mask = airoha_ppe_get_total_num_entries(ppe) - 1;
462 u32 hash, hv1, hv2, hv3;
463
464 switch (type) {
465 case PPE_PKT_TYPE_IPV4_ROUTE:
466 case PPE_PKT_TYPE_IPV4_HNAPT:
467 hv1 = hwe->ipv4.orig_tuple.ports;
468 hv2 = hwe->ipv4.orig_tuple.dest_ip;
469 hv3 = hwe->ipv4.orig_tuple.src_ip;
470 break;
471 case PPE_PKT_TYPE_IPV6_ROUTE_3T:
472 case PPE_PKT_TYPE_IPV6_ROUTE_5T:
473 hv1 = hwe->ipv6.src_ip[3] ^ hwe->ipv6.dest_ip[3];
474 hv1 ^= hwe->ipv6.ports;
475
476 hv2 = hwe->ipv6.src_ip[2] ^ hwe->ipv6.dest_ip[2];
477 hv2 ^= hwe->ipv6.dest_ip[0];
478
479 hv3 = hwe->ipv6.src_ip[1] ^ hwe->ipv6.dest_ip[1];
480 hv3 ^= hwe->ipv6.src_ip[0];
481 break;
482 case PPE_PKT_TYPE_BRIDGE: {
483 struct airoha_foe_mac_info *l2 = &hwe->bridge.l2;
484
485 hv1 = l2->common.src_mac_hi & 0xffff;
486 hv1 = hv1 << 16 | l2->src_mac_lo;
487
488 hv2 = l2->common.dest_mac_lo;
489 hv2 = hv2 << 16;
490 hv2 = hv2 | ((l2->common.src_mac_hi & 0xffff0000) >> 16);
491
492 hv3 = l2->common.dest_mac_hi;
493 break;
494 }
495 case PPE_PKT_TYPE_IPV4_DSLITE:
496 case PPE_PKT_TYPE_IPV6_6RD:
497 default:
498 WARN_ON_ONCE(1);
499 return ppe_hash_mask;
500 }
501
502 hash = (hv1 & hv2) | ((~hv1) & hv3);
503 hash = (hash >> 24) | ((hash & 0xffffff) << 8);
504 hash ^= hv1 ^ hv2 ^ hv3;
505 hash ^= hash >> 16;
506 hash &= ppe_hash_mask;
507
508 return hash;
509 }
510
airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe * ppe,u32 hash,u32 * index)511 static int airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe,
512 u32 hash, u32 *index)
513 {
514 int ppe_num_stats_entries;
515
516 ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
517 if (ppe_num_stats_entries < 0)
518 return ppe_num_stats_entries;
519
520 *index = hash >= ppe_num_stats_entries ? hash - PPE_STATS_NUM_ENTRIES
521 : hash;
522
523 return 0;
524 }
525
airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe * ppe,struct airoha_npu * npu,int index)526 static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe,
527 struct airoha_npu *npu,
528 int index)
529 {
530 memset_io(&npu->stats[index], 0, sizeof(*npu->stats));
531 memset(&ppe->foe_stats[index], 0, sizeof(*ppe->foe_stats));
532 }
533
airoha_ppe_foe_flow_stats_reset(struct airoha_ppe * ppe,struct airoha_npu * npu)534 static void airoha_ppe_foe_flow_stats_reset(struct airoha_ppe *ppe,
535 struct airoha_npu *npu)
536 {
537 int i, ppe_num_stats_entries;
538
539 ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
540 if (ppe_num_stats_entries < 0)
541 return;
542
543 for (i = 0; i < ppe_num_stats_entries; i++)
544 airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, i);
545 }
546
airoha_ppe_foe_flow_stats_update(struct airoha_ppe * ppe,struct airoha_npu * npu,struct airoha_foe_entry * hwe,u32 hash)547 static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe,
548 struct airoha_npu *npu,
549 struct airoha_foe_entry *hwe,
550 u32 hash)
551 {
552 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
553 u32 index, pse_port, val, *data, *ib2, *meter;
554 int ppe_num_stats_entries;
555 u8 nbq;
556
557 ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
558 if (ppe_num_stats_entries < 0)
559 return;
560
561 if (airoha_ppe_foe_get_flow_stats_index(ppe, hash, &index))
562 return;
563
564 if (index >= ppe_num_stats_entries)
565 return;
566
567 if (type == PPE_PKT_TYPE_BRIDGE) {
568 data = &hwe->bridge.data;
569 ib2 = &hwe->bridge.ib2;
570 meter = &hwe->bridge.l2.meter;
571 } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
572 data = &hwe->ipv6.data;
573 ib2 = &hwe->ipv6.ib2;
574 meter = &hwe->ipv6.meter;
575 } else {
576 data = &hwe->ipv4.data;
577 ib2 = &hwe->ipv4.ib2;
578 meter = &hwe->ipv4.l2.meter;
579 }
580
581 pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2);
582 if (pse_port == FE_PSE_PORT_CDM4)
583 return;
584
585 airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, index);
586
587 val = FIELD_GET(AIROHA_FOE_CHANNEL | AIROHA_FOE_QID, *data);
588 *data = (*data & ~AIROHA_FOE_ACTDP) |
589 FIELD_PREP(AIROHA_FOE_ACTDP, val);
590
591 val = *ib2 & (AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
592 AIROHA_FOE_IB2_PSE_QOS | AIROHA_FOE_IB2_FAST_PATH);
593 *meter |= FIELD_PREP(AIROHA_FOE_TUNNEL_MTU, val);
594
595 nbq = pse_port == 1 ? 6 : 5;
596 *ib2 &= ~(AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
597 AIROHA_FOE_IB2_PSE_QOS);
598 *ib2 |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, 6) |
599 FIELD_PREP(AIROHA_FOE_IB2_NBQ, nbq);
600 }
601
602 static struct airoha_foe_entry *
airoha_ppe_foe_get_entry_locked(struct airoha_ppe * ppe,u32 hash)603 airoha_ppe_foe_get_entry_locked(struct airoha_ppe *ppe, u32 hash)
604 {
605 u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
606
607 lockdep_assert_held(&ppe_lock);
608
609 if (hash < sram_num_entries) {
610 u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry);
611 bool ppe2 = hash >= PPE_SRAM_NUM_ENTRIES;
612 struct airoha_eth *eth = ppe->eth;
613 u32 val;
614 int i;
615
616 airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2),
617 FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) |
618 PPE_SRAM_CTRL_REQ_MASK);
619 if (read_poll_timeout_atomic(airoha_fe_rr, val,
620 val & PPE_SRAM_CTRL_ACK_MASK,
621 10, 100, false, eth,
622 REG_PPE_RAM_CTRL(ppe2)))
623 return NULL;
624
625 for (i = 0; i < sizeof(struct airoha_foe_entry) / sizeof(*hwe);
626 i++)
627 hwe[i] = airoha_fe_rr(eth,
628 REG_PPE_RAM_ENTRY(ppe2, i));
629 }
630
631 return ppe->foe + hash * sizeof(struct airoha_foe_entry);
632 }
633
airoha_ppe_foe_get_entry(struct airoha_ppe * ppe,u32 hash)634 struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
635 u32 hash)
636 {
637 struct airoha_foe_entry *hwe;
638
639 spin_lock_bh(&ppe_lock);
640 hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
641 spin_unlock_bh(&ppe_lock);
642
643 return hwe;
644 }
645
airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry * e,struct airoha_foe_entry * hwe)646 static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e,
647 struct airoha_foe_entry *hwe)
648 {
649 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
650 int len;
651
652 if ((hwe->ib1 ^ e->data.ib1) & AIROHA_FOE_IB1_BIND_UDP)
653 return false;
654
655 if (type > PPE_PKT_TYPE_IPV4_DSLITE)
656 len = offsetof(struct airoha_foe_entry, ipv6.data);
657 else
658 len = offsetof(struct airoha_foe_entry, ipv4.ib2);
659
660 return !memcmp(&e->data.d, &hwe->d, len - sizeof(hwe->ib1));
661 }
662
airoha_ppe_foe_commit_sram_entry(struct airoha_ppe * ppe,u32 hash)663 static int airoha_ppe_foe_commit_sram_entry(struct airoha_ppe *ppe, u32 hash)
664 {
665 struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
666 bool ppe2 = hash >= PPE_SRAM_NUM_ENTRIES;
667 u32 *ptr = (u32 *)hwe, val;
668 int i;
669
670 for (i = 0; i < sizeof(*hwe) / sizeof(*ptr); i++)
671 airoha_fe_wr(ppe->eth, REG_PPE_RAM_ENTRY(ppe2, i), ptr[i]);
672
673 wmb();
674 airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2),
675 FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) |
676 PPE_SRAM_CTRL_WR_MASK | PPE_SRAM_CTRL_REQ_MASK);
677
678 return read_poll_timeout_atomic(airoha_fe_rr, val,
679 val & PPE_SRAM_CTRL_ACK_MASK,
680 10, 100, false, ppe->eth,
681 REG_PPE_RAM_CTRL(ppe2));
682 }
683
airoha_ppe_foe_commit_entry(struct airoha_ppe * ppe,struct airoha_foe_entry * e,u32 hash,bool rx_wlan)684 static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
685 struct airoha_foe_entry *e,
686 u32 hash, bool rx_wlan)
687 {
688 u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
689 struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
690 u32 ts = airoha_ppe_get_timestamp(ppe);
691 struct airoha_eth *eth = ppe->eth;
692 struct airoha_npu *npu;
693 int err = 0;
694
695 memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1));
696 wmb();
697
698 e->ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
699 e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts);
700 hwe->ib1 = e->ib1;
701
702 rcu_read_lock();
703
704 npu = rcu_dereference(eth->npu);
705 if (!npu) {
706 err = -ENODEV;
707 goto unlock;
708 }
709
710 if (!rx_wlan)
711 airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash);
712
713 if (hash < sram_num_entries)
714 err = airoha_ppe_foe_commit_sram_entry(ppe, hash);
715 unlock:
716 rcu_read_unlock();
717
718 return err;
719 }
720
airoha_ppe_foe_remove_flow(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)721 static void airoha_ppe_foe_remove_flow(struct airoha_ppe *ppe,
722 struct airoha_flow_table_entry *e)
723 {
724 lockdep_assert_held(&ppe_lock);
725
726 hlist_del_init(&e->list);
727 if (e->hash != 0xffff) {
728 e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
729 e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
730 AIROHA_FOE_STATE_INVALID);
731 airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash, false);
732 e->hash = 0xffff;
733 }
734 if (e->type == FLOW_TYPE_L2_SUBFLOW) {
735 hlist_del_init(&e->l2_subflow_node);
736 kfree(e);
737 }
738 }
739
airoha_ppe_foe_remove_l2_flow(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)740 static void airoha_ppe_foe_remove_l2_flow(struct airoha_ppe *ppe,
741 struct airoha_flow_table_entry *e)
742 {
743 struct hlist_head *head = &e->l2_flows;
744 struct hlist_node *n;
745
746 lockdep_assert_held(&ppe_lock);
747
748 rhashtable_remove_fast(&ppe->l2_flows, &e->l2_node,
749 airoha_l2_flow_table_params);
750 hlist_for_each_entry_safe(e, n, head, l2_subflow_node)
751 airoha_ppe_foe_remove_flow(ppe, e);
752 }
753
airoha_ppe_foe_flow_remove_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)754 static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
755 struct airoha_flow_table_entry *e)
756 {
757 spin_lock_bh(&ppe_lock);
758
759 if (e->type == FLOW_TYPE_L2)
760 airoha_ppe_foe_remove_l2_flow(ppe, e);
761 else
762 airoha_ppe_foe_remove_flow(ppe, e);
763
764 spin_unlock_bh(&ppe_lock);
765 }
766
767 static int
airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e,u32 hash,bool rx_wlan)768 airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
769 struct airoha_flow_table_entry *e,
770 u32 hash, bool rx_wlan)
771 {
772 u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP;
773 struct airoha_foe_entry *hwe_p, hwe;
774 struct airoha_flow_table_entry *f;
775 int type;
776
777 hwe_p = airoha_ppe_foe_get_entry_locked(ppe, hash);
778 if (!hwe_p)
779 return -EINVAL;
780
781 f = kzalloc(sizeof(*f), GFP_ATOMIC);
782 if (!f)
783 return -ENOMEM;
784
785 hlist_add_head(&f->l2_subflow_node, &e->l2_flows);
786 f->type = FLOW_TYPE_L2_SUBFLOW;
787 f->hash = hash;
788
789 memcpy(&hwe, hwe_p, sizeof(*hwe_p));
790 hwe.ib1 = (hwe.ib1 & mask) | (e->data.ib1 & ~mask);
791
792 type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe.ib1);
793 if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
794 memcpy(&hwe.ipv6.l2, &e->data.bridge.l2, sizeof(hwe.ipv6.l2));
795 hwe.ipv6.ib2 = e->data.bridge.ib2;
796 /* setting smac_id to 0xf instruct the hw to keep original
797 * source mac address
798 */
799 hwe.ipv6.l2.src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID,
800 0xf);
801 } else {
802 memcpy(&hwe.bridge.l2, &e->data.bridge.l2,
803 sizeof(hwe.bridge.l2));
804 hwe.bridge.ib2 = e->data.bridge.ib2;
805 if (type == PPE_PKT_TYPE_IPV4_HNAPT)
806 memcpy(&hwe.ipv4.new_tuple, &hwe.ipv4.orig_tuple,
807 sizeof(hwe.ipv4.new_tuple));
808 }
809
810 hwe.bridge.data = e->data.bridge.data;
811 airoha_ppe_foe_commit_entry(ppe, &hwe, hash, rx_wlan);
812
813 return 0;
814 }
815
airoha_ppe_foe_insert_entry(struct airoha_ppe * ppe,struct sk_buff * skb,u32 hash,bool rx_wlan)816 static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
817 struct sk_buff *skb,
818 u32 hash, bool rx_wlan)
819 {
820 struct airoha_flow_table_entry *e;
821 struct airoha_foe_bridge br = {};
822 struct airoha_foe_entry *hwe;
823 bool commit_done = false;
824 struct hlist_node *n;
825 u32 index, state;
826
827 spin_lock_bh(&ppe_lock);
828
829 hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
830 if (!hwe)
831 goto unlock;
832
833 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
834 if (state == AIROHA_FOE_STATE_BIND)
835 goto unlock;
836
837 index = airoha_ppe_foe_get_entry_hash(ppe, hwe);
838 hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) {
839 if (e->type == FLOW_TYPE_L2_SUBFLOW) {
840 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
841 if (state != AIROHA_FOE_STATE_BIND) {
842 e->hash = 0xffff;
843 airoha_ppe_foe_remove_flow(ppe, e);
844 }
845 continue;
846 }
847
848 if (!airoha_ppe_foe_compare_entry(e, hwe))
849 continue;
850
851 airoha_ppe_foe_commit_entry(ppe, &e->data, hash, rx_wlan);
852 commit_done = true;
853 e->hash = hash;
854 }
855
856 if (commit_done)
857 goto unlock;
858
859 airoha_ppe_foe_set_bridge_addrs(&br, eth_hdr(skb));
860 e = rhashtable_lookup_fast(&ppe->l2_flows, &br,
861 airoha_l2_flow_table_params);
862 if (e)
863 airoha_ppe_foe_commit_subflow_entry(ppe, e, hash, rx_wlan);
864 unlock:
865 spin_unlock_bh(&ppe_lock);
866 }
867
868 static int
airoha_ppe_foe_l2_flow_commit_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)869 airoha_ppe_foe_l2_flow_commit_entry(struct airoha_ppe *ppe,
870 struct airoha_flow_table_entry *e)
871 {
872 struct airoha_flow_table_entry *prev;
873
874 e->type = FLOW_TYPE_L2;
875 prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &e->l2_node,
876 airoha_l2_flow_table_params);
877 if (!prev)
878 return 0;
879
880 if (IS_ERR(prev))
881 return PTR_ERR(prev);
882
883 return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
884 &e->l2_node,
885 airoha_l2_flow_table_params);
886 }
887
airoha_ppe_foe_flow_commit_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)888 static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe,
889 struct airoha_flow_table_entry *e)
890 {
891 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
892 u32 hash;
893
894 if (type == PPE_PKT_TYPE_BRIDGE)
895 return airoha_ppe_foe_l2_flow_commit_entry(ppe, e);
896
897 hash = airoha_ppe_foe_get_entry_hash(ppe, &e->data);
898 e->type = FLOW_TYPE_L4;
899 e->hash = 0xffff;
900
901 spin_lock_bh(&ppe_lock);
902 hlist_add_head(&e->list, &ppe->foe_flow[hash]);
903 spin_unlock_bh(&ppe_lock);
904
905 return 0;
906 }
907
airoha_ppe_get_entry_idle_time(struct airoha_ppe * ppe,u32 ib1)908 static int airoha_ppe_get_entry_idle_time(struct airoha_ppe *ppe, u32 ib1)
909 {
910 u32 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
911 u32 ts, ts_mask, now = airoha_ppe_get_timestamp(ppe);
912 int idle;
913
914 if (state == AIROHA_FOE_STATE_BIND) {
915 ts = FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, ib1);
916 ts_mask = AIROHA_FOE_IB1_BIND_TIMESTAMP;
917 } else {
918 ts = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, ib1);
919 now = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, now);
920 ts_mask = AIROHA_FOE_IB1_UNBIND_TIMESTAMP;
921 }
922 idle = now - ts;
923
924 return idle < 0 ? idle + ts_mask + 1 : idle;
925 }
926
927 static void
airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)928 airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe,
929 struct airoha_flow_table_entry *e)
930 {
931 int min_idle = airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
932 struct airoha_flow_table_entry *iter;
933 struct hlist_node *n;
934
935 lockdep_assert_held(&ppe_lock);
936
937 hlist_for_each_entry_safe(iter, n, &e->l2_flows, l2_subflow_node) {
938 struct airoha_foe_entry *hwe;
939 u32 ib1, state;
940 int idle;
941
942 hwe = airoha_ppe_foe_get_entry_locked(ppe, iter->hash);
943 if (!hwe)
944 continue;
945
946 ib1 = READ_ONCE(hwe->ib1);
947 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
948 if (state != AIROHA_FOE_STATE_BIND) {
949 iter->hash = 0xffff;
950 airoha_ppe_foe_remove_flow(ppe, iter);
951 continue;
952 }
953
954 idle = airoha_ppe_get_entry_idle_time(ppe, ib1);
955 if (idle >= min_idle)
956 continue;
957
958 min_idle = idle;
959 e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
960 e->data.ib1 |= ib1 & AIROHA_FOE_IB1_BIND_TIMESTAMP;
961 }
962 }
963
airoha_ppe_foe_flow_entry_update(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)964 static void airoha_ppe_foe_flow_entry_update(struct airoha_ppe *ppe,
965 struct airoha_flow_table_entry *e)
966 {
967 struct airoha_foe_entry *hwe_p, hwe = {};
968
969 spin_lock_bh(&ppe_lock);
970
971 if (e->type == FLOW_TYPE_L2) {
972 airoha_ppe_foe_flow_l2_entry_update(ppe, e);
973 goto unlock;
974 }
975
976 if (e->hash == 0xffff)
977 goto unlock;
978
979 hwe_p = airoha_ppe_foe_get_entry_locked(ppe, e->hash);
980 if (!hwe_p)
981 goto unlock;
982
983 memcpy(&hwe, hwe_p, sizeof(*hwe_p));
984 if (!airoha_ppe_foe_compare_entry(e, &hwe)) {
985 e->hash = 0xffff;
986 goto unlock;
987 }
988
989 e->data.ib1 = hwe.ib1;
990 unlock:
991 spin_unlock_bh(&ppe_lock);
992 }
993
airoha_ppe_entry_idle_time(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)994 static int airoha_ppe_entry_idle_time(struct airoha_ppe *ppe,
995 struct airoha_flow_table_entry *e)
996 {
997 airoha_ppe_foe_flow_entry_update(ppe, e);
998
999 return airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
1000 }
1001
airoha_ppe_flow_offload_replace(struct airoha_eth * eth,struct flow_cls_offload * f)1002 static int airoha_ppe_flow_offload_replace(struct airoha_eth *eth,
1003 struct flow_cls_offload *f)
1004 {
1005 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1006 struct airoha_flow_table_entry *e;
1007 struct airoha_flow_data data = {};
1008 struct net_device *odev = NULL;
1009 struct flow_action_entry *act;
1010 struct airoha_foe_entry hwe;
1011 int err, i, offload_type;
1012 u16 addr_type = 0;
1013 u8 l4proto = 0;
1014
1015 if (rhashtable_lookup(ð->flow_table, &f->cookie,
1016 airoha_flow_table_params))
1017 return -EEXIST;
1018
1019 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
1020 return -EOPNOTSUPP;
1021
1022 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
1023 struct flow_match_control match;
1024
1025 flow_rule_match_control(rule, &match);
1026 addr_type = match.key->addr_type;
1027 if (flow_rule_has_control_flags(match.mask->flags,
1028 f->common.extack))
1029 return -EOPNOTSUPP;
1030 } else {
1031 return -EOPNOTSUPP;
1032 }
1033
1034 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
1035 struct flow_match_basic match;
1036
1037 flow_rule_match_basic(rule, &match);
1038 l4proto = match.key->ip_proto;
1039 } else {
1040 return -EOPNOTSUPP;
1041 }
1042
1043 switch (addr_type) {
1044 case 0:
1045 offload_type = PPE_PKT_TYPE_BRIDGE;
1046 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1047 struct flow_match_eth_addrs match;
1048
1049 flow_rule_match_eth_addrs(rule, &match);
1050 memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
1051 memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
1052 } else {
1053 return -EOPNOTSUPP;
1054 }
1055 break;
1056 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1057 offload_type = PPE_PKT_TYPE_IPV4_HNAPT;
1058 break;
1059 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1060 offload_type = PPE_PKT_TYPE_IPV6_ROUTE_5T;
1061 break;
1062 default:
1063 return -EOPNOTSUPP;
1064 }
1065
1066 flow_action_for_each(i, act, &rule->action) {
1067 switch (act->id) {
1068 case FLOW_ACTION_MANGLE:
1069 if (offload_type == PPE_PKT_TYPE_BRIDGE)
1070 return -EOPNOTSUPP;
1071
1072 if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
1073 airoha_ppe_flow_mangle_eth(act, &data.eth);
1074 break;
1075 case FLOW_ACTION_REDIRECT:
1076 odev = act->dev;
1077 break;
1078 case FLOW_ACTION_CSUM:
1079 break;
1080 case FLOW_ACTION_VLAN_PUSH:
1081 if (data.vlan.num == 2 ||
1082 act->vlan.proto != htons(ETH_P_8021Q))
1083 return -EOPNOTSUPP;
1084
1085 data.vlan.hdr[data.vlan.num].id = act->vlan.vid;
1086 data.vlan.hdr[data.vlan.num].proto = act->vlan.proto;
1087 data.vlan.num++;
1088 break;
1089 case FLOW_ACTION_VLAN_POP:
1090 break;
1091 case FLOW_ACTION_PPPOE_PUSH:
1092 if (data.pppoe.num == 1 || data.vlan.num == 2)
1093 return -EOPNOTSUPP;
1094
1095 data.pppoe.sid = act->pppoe.sid;
1096 data.pppoe.num++;
1097 break;
1098 default:
1099 return -EOPNOTSUPP;
1100 }
1101 }
1102
1103 if (!is_valid_ether_addr(data.eth.h_source) ||
1104 !is_valid_ether_addr(data.eth.h_dest))
1105 return -EINVAL;
1106
1107 err = airoha_ppe_foe_entry_prepare(eth, &hwe, odev, offload_type,
1108 &data, l4proto);
1109 if (err)
1110 return err;
1111
1112 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1113 struct flow_match_ports ports;
1114
1115 if (offload_type == PPE_PKT_TYPE_BRIDGE)
1116 return -EOPNOTSUPP;
1117
1118 flow_rule_match_ports(rule, &ports);
1119 data.src_port = ports.key->src;
1120 data.dst_port = ports.key->dst;
1121 } else if (offload_type != PPE_PKT_TYPE_BRIDGE) {
1122 return -EOPNOTSUPP;
1123 }
1124
1125 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1126 struct flow_match_ipv4_addrs addrs;
1127
1128 flow_rule_match_ipv4_addrs(rule, &addrs);
1129 data.v4.src_addr = addrs.key->src;
1130 data.v4.dst_addr = addrs.key->dst;
1131 airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, false);
1132 }
1133
1134 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1135 struct flow_match_ipv6_addrs addrs;
1136
1137 flow_rule_match_ipv6_addrs(rule, &addrs);
1138
1139 data.v6.src_addr = addrs.key->src;
1140 data.v6.dst_addr = addrs.key->dst;
1141 airoha_ppe_foe_entry_set_ipv6_tuple(&hwe, &data);
1142 }
1143
1144 flow_action_for_each(i, act, &rule->action) {
1145 if (act->id != FLOW_ACTION_MANGLE)
1146 continue;
1147
1148 if (offload_type == PPE_PKT_TYPE_BRIDGE)
1149 return -EOPNOTSUPP;
1150
1151 switch (act->mangle.htype) {
1152 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
1153 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
1154 err = airoha_ppe_flow_mangle_ports(act, &data);
1155 break;
1156 case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
1157 err = airoha_ppe_flow_mangle_ipv4(act, &data);
1158 break;
1159 case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
1160 /* handled earlier */
1161 break;
1162 default:
1163 return -EOPNOTSUPP;
1164 }
1165
1166 if (err)
1167 return err;
1168 }
1169
1170 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1171 err = airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, true);
1172 if (err)
1173 return err;
1174 }
1175
1176 e = kzalloc(sizeof(*e), GFP_KERNEL);
1177 if (!e)
1178 return -ENOMEM;
1179
1180 e->cookie = f->cookie;
1181 memcpy(&e->data, &hwe, sizeof(e->data));
1182
1183 err = airoha_ppe_foe_flow_commit_entry(eth->ppe, e);
1184 if (err)
1185 goto free_entry;
1186
1187 err = rhashtable_insert_fast(ð->flow_table, &e->node,
1188 airoha_flow_table_params);
1189 if (err < 0)
1190 goto remove_foe_entry;
1191
1192 return 0;
1193
1194 remove_foe_entry:
1195 airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
1196 free_entry:
1197 kfree(e);
1198
1199 return err;
1200 }
1201
airoha_ppe_flow_offload_destroy(struct airoha_eth * eth,struct flow_cls_offload * f)1202 static int airoha_ppe_flow_offload_destroy(struct airoha_eth *eth,
1203 struct flow_cls_offload *f)
1204 {
1205 struct airoha_flow_table_entry *e;
1206
1207 e = rhashtable_lookup(ð->flow_table, &f->cookie,
1208 airoha_flow_table_params);
1209 if (!e)
1210 return -ENOENT;
1211
1212 airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
1213 rhashtable_remove_fast(ð->flow_table, &e->node,
1214 airoha_flow_table_params);
1215 kfree(e);
1216
1217 return 0;
1218 }
1219
airoha_ppe_foe_entry_get_stats(struct airoha_ppe * ppe,u32 hash,struct airoha_foe_stats64 * stats)1220 void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
1221 struct airoha_foe_stats64 *stats)
1222 {
1223 struct airoha_eth *eth = ppe->eth;
1224 int ppe_num_stats_entries;
1225 struct airoha_npu *npu;
1226 u32 index;
1227
1228 ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
1229 if (ppe_num_stats_entries < 0)
1230 return;
1231
1232 if (airoha_ppe_foe_get_flow_stats_index(ppe, hash, &index))
1233 return;
1234
1235 if (index >= ppe_num_stats_entries)
1236 return;
1237
1238 rcu_read_lock();
1239
1240 npu = rcu_dereference(eth->npu);
1241 if (npu) {
1242 u64 packets = ppe->foe_stats[index].packets;
1243 u64 bytes = ppe->foe_stats[index].bytes;
1244 struct airoha_foe_stats npu_stats;
1245
1246 memcpy_fromio(&npu_stats, &npu->stats[index],
1247 sizeof(*npu->stats));
1248 stats->packets = packets << 32 | npu_stats.packets;
1249 stats->bytes = bytes << 32 | npu_stats.bytes;
1250 }
1251
1252 rcu_read_unlock();
1253 }
1254
airoha_ppe_flow_offload_stats(struct airoha_eth * eth,struct flow_cls_offload * f)1255 static int airoha_ppe_flow_offload_stats(struct airoha_eth *eth,
1256 struct flow_cls_offload *f)
1257 {
1258 struct airoha_flow_table_entry *e;
1259 u32 idle;
1260
1261 e = rhashtable_lookup(ð->flow_table, &f->cookie,
1262 airoha_flow_table_params);
1263 if (!e)
1264 return -ENOENT;
1265
1266 idle = airoha_ppe_entry_idle_time(eth->ppe, e);
1267 f->stats.lastused = jiffies - idle * HZ;
1268
1269 if (e->hash != 0xffff) {
1270 struct airoha_foe_stats64 stats = {};
1271
1272 airoha_ppe_foe_entry_get_stats(eth->ppe, e->hash, &stats);
1273 f->stats.pkts += (stats.packets - e->stats.packets);
1274 f->stats.bytes += (stats.bytes - e->stats.bytes);
1275 e->stats = stats;
1276 }
1277
1278 return 0;
1279 }
1280
airoha_ppe_flow_offload_cmd(struct airoha_eth * eth,struct flow_cls_offload * f)1281 static int airoha_ppe_flow_offload_cmd(struct airoha_eth *eth,
1282 struct flow_cls_offload *f)
1283 {
1284 switch (f->command) {
1285 case FLOW_CLS_REPLACE:
1286 return airoha_ppe_flow_offload_replace(eth, f);
1287 case FLOW_CLS_DESTROY:
1288 return airoha_ppe_flow_offload_destroy(eth, f);
1289 case FLOW_CLS_STATS:
1290 return airoha_ppe_flow_offload_stats(eth, f);
1291 default:
1292 break;
1293 }
1294
1295 return -EOPNOTSUPP;
1296 }
1297
airoha_ppe_flush_sram_entries(struct airoha_ppe * ppe)1298 static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe)
1299 {
1300 u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
1301 struct airoha_foe_entry *hwe = ppe->foe;
1302 int i, err = 0;
1303
1304 for (i = 0; i < sram_num_entries; i++) {
1305 int err;
1306
1307 memset(&hwe[i], 0, sizeof(*hwe));
1308 err = airoha_ppe_foe_commit_sram_entry(ppe, i);
1309 if (err)
1310 break;
1311 }
1312
1313 return err;
1314 }
1315
airoha_ppe_npu_get(struct airoha_eth * eth)1316 static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
1317 {
1318 struct airoha_npu *npu = airoha_npu_get(eth->dev);
1319
1320 if (IS_ERR(npu)) {
1321 request_module("airoha-npu");
1322 npu = airoha_npu_get(eth->dev);
1323 }
1324
1325 return npu;
1326 }
1327
airoha_ppe_offload_setup(struct airoha_eth * eth)1328 static int airoha_ppe_offload_setup(struct airoha_eth *eth)
1329 {
1330 struct airoha_npu *npu = airoha_ppe_npu_get(eth);
1331 struct airoha_ppe *ppe = eth->ppe;
1332 int err, ppe_num_stats_entries;
1333
1334 if (IS_ERR(npu))
1335 return PTR_ERR(npu);
1336
1337 err = npu->ops.ppe_init(npu);
1338 if (err)
1339 goto error_npu_put;
1340
1341 ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
1342 if (ppe_num_stats_entries > 0) {
1343 err = npu->ops.ppe_init_stats(npu, ppe->foe_stats_dma,
1344 ppe_num_stats_entries);
1345 if (err)
1346 goto error_npu_put;
1347 }
1348
1349 airoha_ppe_hw_init(ppe);
1350 airoha_ppe_foe_flow_stats_reset(ppe, npu);
1351
1352 rcu_assign_pointer(eth->npu, npu);
1353 synchronize_rcu();
1354
1355 return 0;
1356
1357 error_npu_put:
1358 airoha_npu_put(npu);
1359
1360 return err;
1361 }
1362
airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev * dev,void * type_data)1363 int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev, void *type_data)
1364 {
1365 struct airoha_ppe *ppe = dev->priv;
1366 struct airoha_eth *eth = ppe->eth;
1367 int err = 0;
1368
1369 mutex_lock(&flow_offload_mutex);
1370
1371 if (!eth->npu)
1372 err = airoha_ppe_offload_setup(eth);
1373 if (!err)
1374 err = airoha_ppe_flow_offload_cmd(eth, type_data);
1375
1376 mutex_unlock(&flow_offload_mutex);
1377
1378 return err;
1379 }
1380
airoha_ppe_check_skb(struct airoha_ppe_dev * dev,struct sk_buff * skb,u16 hash,bool rx_wlan)1381 void airoha_ppe_check_skb(struct airoha_ppe_dev *dev, struct sk_buff *skb,
1382 u16 hash, bool rx_wlan)
1383 {
1384 struct airoha_ppe *ppe = dev->priv;
1385 u32 ppe_hash_mask = airoha_ppe_get_total_num_entries(ppe) - 1;
1386 u16 now, diff;
1387
1388 if (hash > ppe_hash_mask)
1389 return;
1390
1391 now = (u16)jiffies;
1392 diff = now - ppe->foe_check_time[hash];
1393 if (diff < HZ / 10)
1394 return;
1395
1396 ppe->foe_check_time[hash] = now;
1397 airoha_ppe_foe_insert_entry(ppe, skb, hash, rx_wlan);
1398 }
1399
airoha_ppe_init_upd_mem(struct airoha_gdm_port * port)1400 void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port)
1401 {
1402 struct airoha_eth *eth = port->qdma->eth;
1403 struct net_device *dev = port->dev;
1404 const u8 *addr = dev->dev_addr;
1405 u32 val;
1406
1407 val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
1408 airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
1409 airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
1410 FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
1411 PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
1412
1413 val = (addr[0] << 8) | addr[1];
1414 airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
1415 airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
1416 FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
1417 FIELD_PREP(PPE_UPDMEM_OFFSET_MASK, 1) |
1418 PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
1419 }
1420
airoha_ppe_get_dev(struct device * dev)1421 struct airoha_ppe_dev *airoha_ppe_get_dev(struct device *dev)
1422 {
1423 struct platform_device *pdev;
1424 struct device_node *np;
1425 struct airoha_eth *eth;
1426
1427 np = of_parse_phandle(dev->of_node, "airoha,eth", 0);
1428 if (!np)
1429 return ERR_PTR(-ENODEV);
1430
1431 pdev = of_find_device_by_node(np);
1432 if (!pdev) {
1433 dev_err(dev, "cannot find device node %s\n", np->name);
1434 of_node_put(np);
1435 return ERR_PTR(-ENODEV);
1436 }
1437 of_node_put(np);
1438
1439 if (!try_module_get(THIS_MODULE)) {
1440 dev_err(dev, "failed to get the device driver module\n");
1441 goto error_pdev_put;
1442 }
1443
1444 eth = platform_get_drvdata(pdev);
1445 if (!eth)
1446 goto error_module_put;
1447
1448 if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER)) {
1449 dev_err(&pdev->dev,
1450 "failed to create device link to consumer %s\n",
1451 dev_name(dev));
1452 goto error_module_put;
1453 }
1454
1455 return ð->ppe->dev;
1456
1457 error_module_put:
1458 module_put(THIS_MODULE);
1459 error_pdev_put:
1460 platform_device_put(pdev);
1461
1462 return ERR_PTR(-ENODEV);
1463 }
1464 EXPORT_SYMBOL_GPL(airoha_ppe_get_dev);
1465
airoha_ppe_put_dev(struct airoha_ppe_dev * dev)1466 void airoha_ppe_put_dev(struct airoha_ppe_dev *dev)
1467 {
1468 struct airoha_ppe *ppe = dev->priv;
1469 struct airoha_eth *eth = ppe->eth;
1470
1471 module_put(THIS_MODULE);
1472 put_device(eth->dev);
1473 }
1474 EXPORT_SYMBOL_GPL(airoha_ppe_put_dev);
1475
airoha_ppe_init(struct airoha_eth * eth)1476 int airoha_ppe_init(struct airoha_eth *eth)
1477 {
1478 int foe_size, err, ppe_num_stats_entries;
1479 u32 ppe_num_entries;
1480 struct airoha_ppe *ppe;
1481
1482 ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL);
1483 if (!ppe)
1484 return -ENOMEM;
1485
1486 ppe->dev.ops.setup_tc_block_cb = airoha_ppe_setup_tc_block_cb;
1487 ppe->dev.ops.check_skb = airoha_ppe_check_skb;
1488 ppe->dev.priv = ppe;
1489 ppe->eth = eth;
1490 eth->ppe = ppe;
1491
1492 ppe_num_entries = airoha_ppe_get_total_num_entries(ppe);
1493 foe_size = ppe_num_entries * sizeof(struct airoha_foe_entry);
1494 ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma,
1495 GFP_KERNEL);
1496 if (!ppe->foe)
1497 return -ENOMEM;
1498
1499 ppe->foe_flow = devm_kzalloc(eth->dev,
1500 ppe_num_entries * sizeof(*ppe->foe_flow),
1501 GFP_KERNEL);
1502 if (!ppe->foe_flow)
1503 return -ENOMEM;
1504
1505 ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
1506 if (ppe_num_stats_entries > 0) {
1507 foe_size = ppe_num_stats_entries * sizeof(*ppe->foe_stats);
1508 ppe->foe_stats = dmam_alloc_coherent(eth->dev, foe_size,
1509 &ppe->foe_stats_dma,
1510 GFP_KERNEL);
1511 if (!ppe->foe_stats)
1512 return -ENOMEM;
1513 }
1514
1515 ppe->foe_check_time = devm_kzalloc(eth->dev, ppe_num_entries,
1516 GFP_KERNEL);
1517 if (!ppe->foe_check_time)
1518 return -ENOMEM;
1519
1520 err = airoha_ppe_flush_sram_entries(ppe);
1521 if (err)
1522 return err;
1523
1524 err = rhashtable_init(ð->flow_table, &airoha_flow_table_params);
1525 if (err)
1526 return err;
1527
1528 err = rhashtable_init(&ppe->l2_flows, &airoha_l2_flow_table_params);
1529 if (err)
1530 goto error_flow_table_destroy;
1531
1532 err = airoha_ppe_debugfs_init(ppe);
1533 if (err)
1534 goto error_l2_flow_table_destroy;
1535
1536 return 0;
1537
1538 error_l2_flow_table_destroy:
1539 rhashtable_destroy(&ppe->l2_flows);
1540 error_flow_table_destroy:
1541 rhashtable_destroy(ð->flow_table);
1542
1543 return err;
1544 }
1545
airoha_ppe_deinit(struct airoha_eth * eth)1546 void airoha_ppe_deinit(struct airoha_eth *eth)
1547 {
1548 struct airoha_npu *npu;
1549
1550 mutex_lock(&flow_offload_mutex);
1551
1552 npu = rcu_replace_pointer(eth->npu, NULL,
1553 lockdep_is_held(&flow_offload_mutex));
1554 if (npu) {
1555 npu->ops.ppe_deinit(npu);
1556 airoha_npu_put(npu);
1557 }
1558
1559 mutex_unlock(&flow_offload_mutex);
1560
1561 rhashtable_destroy(ð->ppe->l2_flows);
1562 rhashtable_destroy(ð->flow_table);
1563 debugfs_remove(eth->ppe->debugfs_dir);
1564 }
1565