1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2025 AIROHA Inc
4 * Author: Lorenzo Bianconi <lorenzo@kernel.org>
5 */
6
7 #include <linux/ip.h>
8 #include <linux/ipv6.h>
9 #include <linux/of_platform.h>
10 #include <linux/platform_device.h>
11 #include <linux/rhashtable.h>
12 #include <net/ipv6.h>
13 #include <net/pkt_cls.h>
14
15 #include "airoha_regs.h"
16 #include "airoha_eth.h"
17
18 static DEFINE_MUTEX(flow_offload_mutex);
19 static DEFINE_SPINLOCK(ppe_lock);
20
21 static const struct rhashtable_params airoha_flow_table_params = {
22 .head_offset = offsetof(struct airoha_flow_table_entry, node),
23 .key_offset = offsetof(struct airoha_flow_table_entry, cookie),
24 .key_len = sizeof(unsigned long),
25 .automatic_shrinking = true,
26 };
27
28 static const struct rhashtable_params airoha_l2_flow_table_params = {
29 .head_offset = offsetof(struct airoha_flow_table_entry, l2_node),
30 .key_offset = offsetof(struct airoha_flow_table_entry, data.bridge),
31 .key_len = 2 * ETH_ALEN,
32 .automatic_shrinking = true,
33 };
34
airoha_ppe_get_num_stats_entries(struct airoha_ppe * ppe)35 static int airoha_ppe_get_num_stats_entries(struct airoha_ppe *ppe)
36 {
37 if (!IS_ENABLED(CONFIG_NET_AIROHA_FLOW_STATS))
38 return -EOPNOTSUPP;
39
40 if (airoha_is_7583(ppe->eth))
41 return -EOPNOTSUPP;
42
43 return PPE_STATS_NUM_ENTRIES;
44 }
45
airoha_ppe_get_total_num_stats_entries(struct airoha_ppe * ppe)46 static int airoha_ppe_get_total_num_stats_entries(struct airoha_ppe *ppe)
47 {
48 int num_stats = airoha_ppe_get_num_stats_entries(ppe);
49
50 if (num_stats > 0) {
51 struct airoha_eth *eth = ppe->eth;
52
53 num_stats = num_stats * eth->soc->num_ppe;
54 }
55
56 return num_stats;
57 }
58
airoha_ppe_get_total_sram_num_entries(struct airoha_ppe * ppe)59 static u32 airoha_ppe_get_total_sram_num_entries(struct airoha_ppe *ppe)
60 {
61 struct airoha_eth *eth = ppe->eth;
62
63 return PPE_SRAM_NUM_ENTRIES * eth->soc->num_ppe;
64 }
65
airoha_ppe_get_total_num_entries(struct airoha_ppe * ppe)66 u32 airoha_ppe_get_total_num_entries(struct airoha_ppe *ppe)
67 {
68 u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
69
70 return sram_num_entries + PPE_DRAM_NUM_ENTRIES;
71 }
72
airoha_ppe_is_enabled(struct airoha_eth * eth,int index)73 bool airoha_ppe_is_enabled(struct airoha_eth *eth, int index)
74 {
75 if (index >= eth->soc->num_ppe)
76 return false;
77
78 return airoha_fe_rr(eth, REG_PPE_GLO_CFG(index)) & PPE_GLO_CFG_EN_MASK;
79 }
80
airoha_ppe_get_timestamp(struct airoha_ppe * ppe)81 static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe)
82 {
83 u16 timestamp = airoha_fe_rr(ppe->eth, REG_FE_FOE_TS);
84
85 return FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, timestamp);
86 }
87
airoha_ppe_hw_init(struct airoha_ppe * ppe)88 static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
89 {
90 u32 sram_ppe_num_data_entries = PPE_SRAM_NUM_ENTRIES, sram_num_entries;
91 u32 sram_tb_size, dram_num_entries;
92 struct airoha_eth *eth = ppe->eth;
93 int i, sram_num_stats_entries;
94
95 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
96 sram_tb_size = sram_num_entries * sizeof(struct airoha_foe_entry);
97 dram_num_entries = PPE_RAM_NUM_ENTRIES_SHIFT(PPE_DRAM_NUM_ENTRIES);
98
99 sram_num_stats_entries = airoha_ppe_get_num_stats_entries(ppe);
100 if (sram_num_stats_entries > 0)
101 sram_ppe_num_data_entries -= sram_num_stats_entries;
102 sram_ppe_num_data_entries =
103 PPE_RAM_NUM_ENTRIES_SHIFT(sram_ppe_num_data_entries);
104
105 for (i = 0; i < eth->soc->num_ppe; i++) {
106 int p;
107
108 airoha_fe_wr(eth, REG_PPE_TB_BASE(i),
109 ppe->foe_dma + sram_tb_size);
110
111 airoha_fe_rmw(eth, REG_PPE_BND_AGE0(i),
112 PPE_BIND_AGE0_DELTA_NON_L4 |
113 PPE_BIND_AGE0_DELTA_UDP,
114 FIELD_PREP(PPE_BIND_AGE0_DELTA_NON_L4, 1) |
115 FIELD_PREP(PPE_BIND_AGE0_DELTA_UDP, 12));
116 airoha_fe_rmw(eth, REG_PPE_BND_AGE1(i),
117 PPE_BIND_AGE1_DELTA_TCP_FIN |
118 PPE_BIND_AGE1_DELTA_TCP,
119 FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
120 FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP, 7));
121
122 airoha_fe_rmw(eth, REG_PPE_TB_HASH_CFG(i),
123 PPE_SRAM_TABLE_EN_MASK |
124 PPE_SRAM_HASH1_EN_MASK |
125 PPE_DRAM_TABLE_EN_MASK |
126 PPE_SRAM_HASH0_MODE_MASK |
127 PPE_SRAM_HASH1_MODE_MASK |
128 PPE_DRAM_HASH0_MODE_MASK |
129 PPE_DRAM_HASH1_MODE_MASK,
130 FIELD_PREP(PPE_SRAM_TABLE_EN_MASK, 1) |
131 FIELD_PREP(PPE_SRAM_HASH1_EN_MASK, 1) |
132 FIELD_PREP(PPE_SRAM_HASH1_MODE_MASK, 1) |
133 FIELD_PREP(PPE_DRAM_HASH1_MODE_MASK, 3));
134
135 airoha_fe_rmw(eth, REG_PPE_TB_CFG(i),
136 PPE_TB_CFG_SEARCH_MISS_MASK |
137 PPE_SRAM_TB_NUM_ENTRY_MASK |
138 PPE_DRAM_TB_NUM_ENTRY_MASK |
139 PPE_TB_CFG_KEEPALIVE_MASK |
140 PPE_TB_ENTRY_SIZE_MASK,
141 FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) |
142 FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0) |
143 FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
144 sram_ppe_num_data_entries) |
145 FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
146 dram_num_entries));
147
148 airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED);
149
150 for (p = 0; p < ARRAY_SIZE(eth->ports); p++)
151 airoha_fe_rmw(eth, REG_PPE_MTU(i, p),
152 FP0_EGRESS_MTU_MASK |
153 FP1_EGRESS_MTU_MASK,
154 FIELD_PREP(FP0_EGRESS_MTU_MASK,
155 AIROHA_MAX_MTU) |
156 FIELD_PREP(FP1_EGRESS_MTU_MASK,
157 AIROHA_MAX_MTU));
158 }
159 }
160
airoha_ppe_flow_mangle_eth(const struct flow_action_entry * act,void * eth)161 static void airoha_ppe_flow_mangle_eth(const struct flow_action_entry *act, void *eth)
162 {
163 void *dest = eth + act->mangle.offset;
164 const void *src = &act->mangle.val;
165
166 if (act->mangle.offset > 8)
167 return;
168
169 if (act->mangle.mask == 0xffff) {
170 src += 2;
171 dest += 2;
172 }
173
174 memcpy(dest, src, act->mangle.mask ? 2 : 4);
175 }
176
airoha_ppe_flow_mangle_ports(const struct flow_action_entry * act,struct airoha_flow_data * data)177 static int airoha_ppe_flow_mangle_ports(const struct flow_action_entry *act,
178 struct airoha_flow_data *data)
179 {
180 u32 val = be32_to_cpu((__force __be32)act->mangle.val);
181
182 switch (act->mangle.offset) {
183 case 0:
184 if ((__force __be32)act->mangle.mask == ~cpu_to_be32(0xffff))
185 data->dst_port = cpu_to_be16(val);
186 else
187 data->src_port = cpu_to_be16(val >> 16);
188 break;
189 case 2:
190 data->dst_port = cpu_to_be16(val);
191 break;
192 default:
193 return -EINVAL;
194 }
195
196 return 0;
197 }
198
airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry * act,struct airoha_flow_data * data)199 static int airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry *act,
200 struct airoha_flow_data *data)
201 {
202 __be32 *dest;
203
204 switch (act->mangle.offset) {
205 case offsetof(struct iphdr, saddr):
206 dest = &data->v4.src_addr;
207 break;
208 case offsetof(struct iphdr, daddr):
209 dest = &data->v4.dst_addr;
210 break;
211 default:
212 return -EINVAL;
213 }
214
215 memcpy(dest, &act->mangle.val, sizeof(u32));
216
217 return 0;
218 }
219
airoha_ppe_get_wdma_info(struct net_device * dev,const u8 * addr,struct airoha_wdma_info * info)220 static int airoha_ppe_get_wdma_info(struct net_device *dev, const u8 *addr,
221 struct airoha_wdma_info *info)
222 {
223 struct net_device_path_stack stack;
224 struct net_device_path *path;
225 int err;
226
227 if (!dev)
228 return -ENODEV;
229
230 rcu_read_lock();
231 err = dev_fill_forward_path(dev, addr, &stack);
232 rcu_read_unlock();
233 if (err)
234 return err;
235
236 path = &stack.path[stack.num_paths - 1];
237 if (path->type != DEV_PATH_MTK_WDMA)
238 return -1;
239
240 info->idx = path->mtk_wdma.wdma_idx;
241 info->bss = path->mtk_wdma.bss;
242 info->wcid = path->mtk_wdma.wcid;
243
244 return 0;
245 }
246
airoha_get_dsa_port(struct net_device ** dev)247 static int airoha_get_dsa_port(struct net_device **dev)
248 {
249 #if IS_ENABLED(CONFIG_NET_DSA)
250 struct dsa_port *dp = dsa_port_from_netdev(*dev);
251
252 if (IS_ERR(dp))
253 return -ENODEV;
254
255 *dev = dsa_port_to_conduit(dp);
256 return dp->index;
257 #else
258 return -ENODEV;
259 #endif
260 }
261
airoha_ppe_foe_set_bridge_addrs(struct airoha_foe_bridge * br,struct ethhdr * eh)262 static void airoha_ppe_foe_set_bridge_addrs(struct airoha_foe_bridge *br,
263 struct ethhdr *eh)
264 {
265 br->dest_mac_hi = get_unaligned_be32(eh->h_dest);
266 br->dest_mac_lo = get_unaligned_be16(eh->h_dest + 4);
267 br->src_mac_hi = get_unaligned_be16(eh->h_source);
268 br->src_mac_lo = get_unaligned_be32(eh->h_source + 2);
269 }
270
airoha_ppe_foe_entry_prepare(struct airoha_eth * eth,struct airoha_foe_entry * hwe,struct net_device * dev,int type,struct airoha_flow_data * data,int l4proto)271 static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
272 struct airoha_foe_entry *hwe,
273 struct net_device *dev, int type,
274 struct airoha_flow_data *data,
275 int l4proto)
276 {
277 u32 qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f), ports_pad, val;
278 int wlan_etype = -EINVAL, dsa_port = airoha_get_dsa_port(&dev);
279 struct airoha_foe_mac_info_common *l2;
280 u8 smac_id = 0xf;
281
282 memset(hwe, 0, sizeof(*hwe));
283
284 val = FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE, AIROHA_FOE_STATE_BIND) |
285 FIELD_PREP(AIROHA_FOE_IB1_BIND_PACKET_TYPE, type) |
286 FIELD_PREP(AIROHA_FOE_IB1_BIND_UDP, l4proto == IPPROTO_UDP) |
287 FIELD_PREP(AIROHA_FOE_IB1_BIND_VLAN_LAYER, data->vlan.num) |
288 FIELD_PREP(AIROHA_FOE_IB1_BIND_VPM, data->vlan.num) |
289 FIELD_PREP(AIROHA_FOE_IB1_BIND_PPPOE, data->pppoe.num) |
290 AIROHA_FOE_IB1_BIND_TTL;
291 hwe->ib1 = val;
292
293 val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f);
294 if (dev) {
295 struct airoha_wdma_info info = {};
296
297 if (!airoha_ppe_get_wdma_info(dev, data->eth.h_dest, &info)) {
298 val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, info.idx) |
299 FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT,
300 FE_PSE_PORT_CDM4);
301 qdata |= FIELD_PREP(AIROHA_FOE_ACTDP, info.bss);
302 wlan_etype = FIELD_PREP(AIROHA_FOE_MAC_WDMA_BAND,
303 info.idx) |
304 FIELD_PREP(AIROHA_FOE_MAC_WDMA_WCID,
305 info.wcid);
306 } else {
307 struct airoha_gdm_port *port = netdev_priv(dev);
308 u8 pse_port;
309
310 if (!airoha_is_valid_gdm_port(eth, port))
311 return -EINVAL;
312
313 if (dsa_port >= 0 || eth->ports[1])
314 pse_port = port->id == 4 ? FE_PSE_PORT_GDM4
315 : port->id;
316 else
317 pse_port = 2; /* uplink relies on GDM2
318 * loopback
319 */
320
321 val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port) |
322 AIROHA_FOE_IB2_PSE_QOS;
323 /* For downlink traffic consume SRAM memory for hw
324 * forwarding descriptors queue.
325 */
326 if (airhoa_is_lan_gdm_port(port))
327 val |= AIROHA_FOE_IB2_FAST_PATH;
328 if (dsa_port >= 0)
329 val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ,
330 dsa_port);
331
332 smac_id = port->id;
333 }
334 }
335
336 if (is_multicast_ether_addr(data->eth.h_dest))
337 val |= AIROHA_FOE_IB2_MULTICAST;
338
339 ports_pad = 0xa5a5a500 | (l4proto & 0xff);
340 if (type == PPE_PKT_TYPE_IPV4_ROUTE)
341 hwe->ipv4.orig_tuple.ports = ports_pad;
342 if (type == PPE_PKT_TYPE_IPV6_ROUTE_3T)
343 hwe->ipv6.ports = ports_pad;
344
345 if (type == PPE_PKT_TYPE_BRIDGE) {
346 airoha_ppe_foe_set_bridge_addrs(&hwe->bridge, &data->eth);
347 hwe->bridge.data = qdata;
348 hwe->bridge.ib2 = val;
349 l2 = &hwe->bridge.l2.common;
350 } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
351 hwe->ipv6.data = qdata;
352 hwe->ipv6.ib2 = val;
353 l2 = &hwe->ipv6.l2;
354 l2->etype = ETH_P_IPV6;
355 } else {
356 hwe->ipv4.data = qdata;
357 hwe->ipv4.ib2 = val;
358 l2 = &hwe->ipv4.l2.common;
359 l2->etype = ETH_P_IP;
360 }
361
362 l2->dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
363 l2->dest_mac_lo = get_unaligned_be16(data->eth.h_dest + 4);
364 if (type <= PPE_PKT_TYPE_IPV4_DSLITE) {
365 struct airoha_foe_mac_info *mac_info;
366
367 l2->src_mac_hi = get_unaligned_be32(data->eth.h_source);
368 hwe->ipv4.l2.src_mac_lo =
369 get_unaligned_be16(data->eth.h_source + 4);
370
371 mac_info = (struct airoha_foe_mac_info *)l2;
372 mac_info->pppoe_id = data->pppoe.sid;
373 } else {
374 l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id) |
375 FIELD_PREP(AIROHA_FOE_MAC_PPPOE_ID,
376 data->pppoe.sid);
377 }
378
379 if (data->vlan.num) {
380 l2->vlan1 = data->vlan.hdr[0].id;
381 if (data->vlan.num == 2)
382 l2->vlan2 = data->vlan.hdr[1].id;
383 }
384
385 if (wlan_etype >= 0) {
386 l2->etype = wlan_etype;
387 } else if (dsa_port >= 0) {
388 l2->etype = BIT(dsa_port);
389 l2->etype |= !data->vlan.num ? BIT(15) : 0;
390 } else if (data->pppoe.num) {
391 l2->etype = ETH_P_PPP_SES;
392 }
393
394 return 0;
395 }
396
airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry * hwe,struct airoha_flow_data * data,bool egress)397 static int airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry *hwe,
398 struct airoha_flow_data *data,
399 bool egress)
400 {
401 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
402 struct airoha_foe_ipv4_tuple *t;
403
404 switch (type) {
405 case PPE_PKT_TYPE_IPV4_HNAPT:
406 if (egress) {
407 t = &hwe->ipv4.new_tuple;
408 break;
409 }
410 fallthrough;
411 case PPE_PKT_TYPE_IPV4_DSLITE:
412 case PPE_PKT_TYPE_IPV4_ROUTE:
413 t = &hwe->ipv4.orig_tuple;
414 break;
415 default:
416 WARN_ON_ONCE(1);
417 return -EINVAL;
418 }
419
420 t->src_ip = be32_to_cpu(data->v4.src_addr);
421 t->dest_ip = be32_to_cpu(data->v4.dst_addr);
422
423 if (type != PPE_PKT_TYPE_IPV4_ROUTE) {
424 t->src_port = be16_to_cpu(data->src_port);
425 t->dest_port = be16_to_cpu(data->dst_port);
426 }
427
428 return 0;
429 }
430
airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry * hwe,struct airoha_flow_data * data)431 static int airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry *hwe,
432 struct airoha_flow_data *data)
433
434 {
435 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
436 u32 *src, *dest;
437
438 switch (type) {
439 case PPE_PKT_TYPE_IPV6_ROUTE_5T:
440 case PPE_PKT_TYPE_IPV6_6RD:
441 hwe->ipv6.src_port = be16_to_cpu(data->src_port);
442 hwe->ipv6.dest_port = be16_to_cpu(data->dst_port);
443 fallthrough;
444 case PPE_PKT_TYPE_IPV6_ROUTE_3T:
445 src = hwe->ipv6.src_ip;
446 dest = hwe->ipv6.dest_ip;
447 break;
448 default:
449 WARN_ON_ONCE(1);
450 return -EINVAL;
451 }
452
453 ipv6_addr_be32_to_cpu(src, data->v6.src_addr.s6_addr32);
454 ipv6_addr_be32_to_cpu(dest, data->v6.dst_addr.s6_addr32);
455
456 return 0;
457 }
458
airoha_ppe_foe_get_entry_hash(struct airoha_ppe * ppe,struct airoha_foe_entry * hwe)459 static u32 airoha_ppe_foe_get_entry_hash(struct airoha_ppe *ppe,
460 struct airoha_foe_entry *hwe)
461 {
462 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
463 u32 ppe_hash_mask = airoha_ppe_get_total_num_entries(ppe) - 1;
464 u32 hash, hv1, hv2, hv3;
465
466 switch (type) {
467 case PPE_PKT_TYPE_IPV4_ROUTE:
468 case PPE_PKT_TYPE_IPV4_HNAPT:
469 hv1 = hwe->ipv4.orig_tuple.ports;
470 hv2 = hwe->ipv4.orig_tuple.dest_ip;
471 hv3 = hwe->ipv4.orig_tuple.src_ip;
472 break;
473 case PPE_PKT_TYPE_IPV6_ROUTE_3T:
474 case PPE_PKT_TYPE_IPV6_ROUTE_5T:
475 hv1 = hwe->ipv6.src_ip[3] ^ hwe->ipv6.dest_ip[3];
476 hv1 ^= hwe->ipv6.ports;
477
478 hv2 = hwe->ipv6.src_ip[2] ^ hwe->ipv6.dest_ip[2];
479 hv2 ^= hwe->ipv6.dest_ip[0];
480
481 hv3 = hwe->ipv6.src_ip[1] ^ hwe->ipv6.dest_ip[1];
482 hv3 ^= hwe->ipv6.src_ip[0];
483 break;
484 case PPE_PKT_TYPE_BRIDGE: {
485 struct airoha_foe_mac_info *l2 = &hwe->bridge.l2;
486
487 hv1 = l2->common.src_mac_hi & 0xffff;
488 hv1 = hv1 << 16 | l2->src_mac_lo;
489
490 hv2 = l2->common.dest_mac_lo;
491 hv2 = hv2 << 16;
492 hv2 = hv2 | ((l2->common.src_mac_hi & 0xffff0000) >> 16);
493
494 hv3 = l2->common.dest_mac_hi;
495 break;
496 }
497 case PPE_PKT_TYPE_IPV4_DSLITE:
498 case PPE_PKT_TYPE_IPV6_6RD:
499 default:
500 WARN_ON_ONCE(1);
501 return ppe_hash_mask;
502 }
503
504 hash = (hv1 & hv2) | ((~hv1) & hv3);
505 hash = (hash >> 24) | ((hash & 0xffffff) << 8);
506 hash ^= hv1 ^ hv2 ^ hv3;
507 hash ^= hash >> 16;
508 hash &= ppe_hash_mask;
509
510 return hash;
511 }
512
airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe * ppe,u32 hash,u32 * index)513 static int airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe,
514 u32 hash, u32 *index)
515 {
516 int ppe_num_stats_entries;
517
518 ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
519 if (ppe_num_stats_entries < 0)
520 return ppe_num_stats_entries;
521
522 *index = hash >= ppe_num_stats_entries ? hash - PPE_STATS_NUM_ENTRIES
523 : hash;
524
525 return 0;
526 }
527
airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe * ppe,struct airoha_npu * npu,int index)528 static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe,
529 struct airoha_npu *npu,
530 int index)
531 {
532 memset_io(&npu->stats[index], 0, sizeof(*npu->stats));
533 memset(&ppe->foe_stats[index], 0, sizeof(*ppe->foe_stats));
534 }
535
airoha_ppe_foe_flow_stats_reset(struct airoha_ppe * ppe,struct airoha_npu * npu)536 static void airoha_ppe_foe_flow_stats_reset(struct airoha_ppe *ppe,
537 struct airoha_npu *npu)
538 {
539 int i, ppe_num_stats_entries;
540
541 ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
542 if (ppe_num_stats_entries < 0)
543 return;
544
545 for (i = 0; i < ppe_num_stats_entries; i++)
546 airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, i);
547 }
548
airoha_ppe_foe_flow_stats_update(struct airoha_ppe * ppe,struct airoha_npu * npu,struct airoha_foe_entry * hwe,u32 hash)549 static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe,
550 struct airoha_npu *npu,
551 struct airoha_foe_entry *hwe,
552 u32 hash)
553 {
554 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
555 u32 index, pse_port, val, *data, *ib2, *meter;
556 int ppe_num_stats_entries;
557 u8 nbq;
558
559 ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
560 if (ppe_num_stats_entries < 0)
561 return;
562
563 if (airoha_ppe_foe_get_flow_stats_index(ppe, hash, &index))
564 return;
565
566 if (index >= ppe_num_stats_entries)
567 return;
568
569 if (type == PPE_PKT_TYPE_BRIDGE) {
570 data = &hwe->bridge.data;
571 ib2 = &hwe->bridge.ib2;
572 meter = &hwe->bridge.l2.meter;
573 } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
574 data = &hwe->ipv6.data;
575 ib2 = &hwe->ipv6.ib2;
576 meter = &hwe->ipv6.meter;
577 } else {
578 data = &hwe->ipv4.data;
579 ib2 = &hwe->ipv4.ib2;
580 meter = &hwe->ipv4.l2.meter;
581 }
582
583 pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2);
584 if (pse_port == FE_PSE_PORT_CDM4)
585 return;
586
587 airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, index);
588
589 val = FIELD_GET(AIROHA_FOE_CHANNEL | AIROHA_FOE_QID, *data);
590 *data = (*data & ~AIROHA_FOE_ACTDP) |
591 FIELD_PREP(AIROHA_FOE_ACTDP, val);
592
593 val = *ib2 & (AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
594 AIROHA_FOE_IB2_PSE_QOS | AIROHA_FOE_IB2_FAST_PATH);
595 *meter |= FIELD_PREP(AIROHA_FOE_TUNNEL_MTU, val);
596
597 nbq = pse_port == 1 ? 6 : 5;
598 *ib2 &= ~(AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
599 AIROHA_FOE_IB2_PSE_QOS);
600 *ib2 |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, 6) |
601 FIELD_PREP(AIROHA_FOE_IB2_NBQ, nbq);
602 }
603
604 static struct airoha_foe_entry *
airoha_ppe_foe_get_entry_locked(struct airoha_ppe * ppe,u32 hash)605 airoha_ppe_foe_get_entry_locked(struct airoha_ppe *ppe, u32 hash)
606 {
607 u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
608
609 lockdep_assert_held(&ppe_lock);
610
611 if (hash < sram_num_entries) {
612 u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry);
613 bool ppe2 = hash >= PPE_SRAM_NUM_ENTRIES;
614 struct airoha_eth *eth = ppe->eth;
615 u32 val;
616 int i;
617
618 airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2),
619 FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) |
620 PPE_SRAM_CTRL_REQ_MASK);
621 if (read_poll_timeout_atomic(airoha_fe_rr, val,
622 val & PPE_SRAM_CTRL_ACK_MASK,
623 10, 100, false, eth,
624 REG_PPE_RAM_CTRL(ppe2)))
625 return NULL;
626
627 for (i = 0; i < sizeof(struct airoha_foe_entry) / sizeof(*hwe);
628 i++)
629 hwe[i] = airoha_fe_rr(eth,
630 REG_PPE_RAM_ENTRY(ppe2, i));
631 }
632
633 return ppe->foe + hash * sizeof(struct airoha_foe_entry);
634 }
635
airoha_ppe_foe_get_entry(struct airoha_ppe * ppe,u32 hash)636 struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
637 u32 hash)
638 {
639 struct airoha_foe_entry *hwe;
640
641 spin_lock_bh(&ppe_lock);
642 hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
643 spin_unlock_bh(&ppe_lock);
644
645 return hwe;
646 }
647
airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry * e,struct airoha_foe_entry * hwe)648 static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e,
649 struct airoha_foe_entry *hwe)
650 {
651 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
652 int len;
653
654 if ((hwe->ib1 ^ e->data.ib1) & AIROHA_FOE_IB1_BIND_UDP)
655 return false;
656
657 if (type > PPE_PKT_TYPE_IPV4_DSLITE)
658 len = offsetof(struct airoha_foe_entry, ipv6.data);
659 else
660 len = offsetof(struct airoha_foe_entry, ipv4.ib2);
661
662 return !memcmp(&e->data.d, &hwe->d, len - sizeof(hwe->ib1));
663 }
664
airoha_ppe_foe_commit_sram_entry(struct airoha_ppe * ppe,u32 hash)665 static int airoha_ppe_foe_commit_sram_entry(struct airoha_ppe *ppe, u32 hash)
666 {
667 struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
668 bool ppe2 = hash >= PPE_SRAM_NUM_ENTRIES;
669 u32 *ptr = (u32 *)hwe, val;
670 int i;
671
672 for (i = 0; i < sizeof(*hwe) / sizeof(*ptr); i++)
673 airoha_fe_wr(ppe->eth, REG_PPE_RAM_ENTRY(ppe2, i), ptr[i]);
674
675 wmb();
676 airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2),
677 FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) |
678 PPE_SRAM_CTRL_WR_MASK | PPE_SRAM_CTRL_REQ_MASK);
679
680 return read_poll_timeout_atomic(airoha_fe_rr, val,
681 val & PPE_SRAM_CTRL_ACK_MASK,
682 10, 100, false, ppe->eth,
683 REG_PPE_RAM_CTRL(ppe2));
684 }
685
airoha_ppe_foe_commit_entry(struct airoha_ppe * ppe,struct airoha_foe_entry * e,u32 hash,bool rx_wlan)686 static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
687 struct airoha_foe_entry *e,
688 u32 hash, bool rx_wlan)
689 {
690 u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
691 struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
692 u32 ts = airoha_ppe_get_timestamp(ppe);
693 struct airoha_eth *eth = ppe->eth;
694 struct airoha_npu *npu;
695 int err = 0;
696
697 memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1));
698 wmb();
699
700 e->ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
701 e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts);
702 hwe->ib1 = e->ib1;
703
704 rcu_read_lock();
705
706 npu = rcu_dereference(eth->npu);
707 if (!npu) {
708 err = -ENODEV;
709 goto unlock;
710 }
711
712 if (!rx_wlan)
713 airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash);
714
715 if (hash < sram_num_entries)
716 err = airoha_ppe_foe_commit_sram_entry(ppe, hash);
717 unlock:
718 rcu_read_unlock();
719
720 return err;
721 }
722
airoha_ppe_foe_remove_flow(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)723 static void airoha_ppe_foe_remove_flow(struct airoha_ppe *ppe,
724 struct airoha_flow_table_entry *e)
725 {
726 lockdep_assert_held(&ppe_lock);
727
728 hlist_del_init(&e->list);
729 if (e->hash != 0xffff) {
730 e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
731 e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
732 AIROHA_FOE_STATE_INVALID);
733 airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash, false);
734 e->hash = 0xffff;
735 }
736 if (e->type == FLOW_TYPE_L2_SUBFLOW) {
737 hlist_del_init(&e->l2_subflow_node);
738 kfree(e);
739 }
740 }
741
airoha_ppe_foe_remove_l2_flow(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)742 static void airoha_ppe_foe_remove_l2_flow(struct airoha_ppe *ppe,
743 struct airoha_flow_table_entry *e)
744 {
745 struct hlist_head *head = &e->l2_flows;
746 struct hlist_node *n;
747
748 lockdep_assert_held(&ppe_lock);
749
750 rhashtable_remove_fast(&ppe->l2_flows, &e->l2_node,
751 airoha_l2_flow_table_params);
752 hlist_for_each_entry_safe(e, n, head, l2_subflow_node)
753 airoha_ppe_foe_remove_flow(ppe, e);
754 }
755
airoha_ppe_foe_flow_remove_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)756 static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
757 struct airoha_flow_table_entry *e)
758 {
759 spin_lock_bh(&ppe_lock);
760
761 if (e->type == FLOW_TYPE_L2)
762 airoha_ppe_foe_remove_l2_flow(ppe, e);
763 else
764 airoha_ppe_foe_remove_flow(ppe, e);
765
766 spin_unlock_bh(&ppe_lock);
767 }
768
769 static int
airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e,u32 hash,bool rx_wlan)770 airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
771 struct airoha_flow_table_entry *e,
772 u32 hash, bool rx_wlan)
773 {
774 u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP;
775 struct airoha_foe_entry *hwe_p, hwe;
776 struct airoha_flow_table_entry *f;
777 int type;
778
779 hwe_p = airoha_ppe_foe_get_entry_locked(ppe, hash);
780 if (!hwe_p)
781 return -EINVAL;
782
783 f = kzalloc_obj(*f, GFP_ATOMIC);
784 if (!f)
785 return -ENOMEM;
786
787 hlist_add_head(&f->l2_subflow_node, &e->l2_flows);
788 f->type = FLOW_TYPE_L2_SUBFLOW;
789 f->hash = hash;
790
791 memcpy(&hwe, hwe_p, sizeof(*hwe_p));
792 hwe.ib1 = (hwe.ib1 & mask) | (e->data.ib1 & ~mask);
793
794 type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe.ib1);
795 if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
796 memcpy(&hwe.ipv6.l2, &e->data.bridge.l2, sizeof(hwe.ipv6.l2));
797 hwe.ipv6.ib2 = e->data.bridge.ib2;
798 /* setting smac_id to 0xf instruct the hw to keep original
799 * source mac address
800 */
801 hwe.ipv6.l2.src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID,
802 0xf);
803 } else {
804 memcpy(&hwe.bridge.l2, &e->data.bridge.l2,
805 sizeof(hwe.bridge.l2));
806 hwe.bridge.ib2 = e->data.bridge.ib2;
807 if (type == PPE_PKT_TYPE_IPV4_HNAPT)
808 memcpy(&hwe.ipv4.new_tuple, &hwe.ipv4.orig_tuple,
809 sizeof(hwe.ipv4.new_tuple));
810 }
811
812 hwe.bridge.data = e->data.bridge.data;
813 airoha_ppe_foe_commit_entry(ppe, &hwe, hash, rx_wlan);
814
815 return 0;
816 }
817
airoha_ppe_foe_insert_entry(struct airoha_ppe * ppe,struct sk_buff * skb,u32 hash,bool rx_wlan)818 static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
819 struct sk_buff *skb,
820 u32 hash, bool rx_wlan)
821 {
822 struct airoha_flow_table_entry *e;
823 struct airoha_foe_bridge br = {};
824 struct airoha_foe_entry *hwe;
825 bool commit_done = false;
826 struct hlist_node *n;
827 u32 index, state;
828
829 spin_lock_bh(&ppe_lock);
830
831 hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
832 if (!hwe)
833 goto unlock;
834
835 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
836 if (state == AIROHA_FOE_STATE_BIND)
837 goto unlock;
838
839 index = airoha_ppe_foe_get_entry_hash(ppe, hwe);
840 hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) {
841 if (e->type == FLOW_TYPE_L2_SUBFLOW) {
842 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
843 if (state != AIROHA_FOE_STATE_BIND) {
844 e->hash = 0xffff;
845 airoha_ppe_foe_remove_flow(ppe, e);
846 }
847 continue;
848 }
849
850 if (!airoha_ppe_foe_compare_entry(e, hwe))
851 continue;
852
853 airoha_ppe_foe_commit_entry(ppe, &e->data, hash, rx_wlan);
854 commit_done = true;
855 e->hash = hash;
856 }
857
858 if (commit_done)
859 goto unlock;
860
861 airoha_ppe_foe_set_bridge_addrs(&br, eth_hdr(skb));
862 e = rhashtable_lookup_fast(&ppe->l2_flows, &br,
863 airoha_l2_flow_table_params);
864 if (e)
865 airoha_ppe_foe_commit_subflow_entry(ppe, e, hash, rx_wlan);
866 unlock:
867 spin_unlock_bh(&ppe_lock);
868 }
869
870 static int
airoha_ppe_foe_l2_flow_commit_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)871 airoha_ppe_foe_l2_flow_commit_entry(struct airoha_ppe *ppe,
872 struct airoha_flow_table_entry *e)
873 {
874 struct airoha_flow_table_entry *prev;
875
876 e->type = FLOW_TYPE_L2;
877 prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &e->l2_node,
878 airoha_l2_flow_table_params);
879 if (!prev)
880 return 0;
881
882 if (IS_ERR(prev))
883 return PTR_ERR(prev);
884
885 return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
886 &e->l2_node,
887 airoha_l2_flow_table_params);
888 }
889
airoha_ppe_foe_flow_commit_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)890 static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe,
891 struct airoha_flow_table_entry *e)
892 {
893 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
894 u32 hash;
895
896 if (type == PPE_PKT_TYPE_BRIDGE)
897 return airoha_ppe_foe_l2_flow_commit_entry(ppe, e);
898
899 hash = airoha_ppe_foe_get_entry_hash(ppe, &e->data);
900 e->type = FLOW_TYPE_L4;
901 e->hash = 0xffff;
902
903 spin_lock_bh(&ppe_lock);
904 hlist_add_head(&e->list, &ppe->foe_flow[hash]);
905 spin_unlock_bh(&ppe_lock);
906
907 return 0;
908 }
909
airoha_ppe_get_entry_idle_time(struct airoha_ppe * ppe,u32 ib1)910 static int airoha_ppe_get_entry_idle_time(struct airoha_ppe *ppe, u32 ib1)
911 {
912 u32 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
913 u32 ts, ts_mask, now = airoha_ppe_get_timestamp(ppe);
914 int idle;
915
916 if (state == AIROHA_FOE_STATE_BIND) {
917 ts = FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, ib1);
918 ts_mask = AIROHA_FOE_IB1_BIND_TIMESTAMP;
919 } else {
920 ts = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, ib1);
921 now = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, now);
922 ts_mask = AIROHA_FOE_IB1_UNBIND_TIMESTAMP;
923 }
924 idle = now - ts;
925
926 return idle < 0 ? idle + ts_mask + 1 : idle;
927 }
928
929 static void
airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)930 airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe,
931 struct airoha_flow_table_entry *e)
932 {
933 int min_idle = airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
934 struct airoha_flow_table_entry *iter;
935 struct hlist_node *n;
936
937 lockdep_assert_held(&ppe_lock);
938
939 hlist_for_each_entry_safe(iter, n, &e->l2_flows, l2_subflow_node) {
940 struct airoha_foe_entry *hwe;
941 u32 ib1, state;
942 int idle;
943
944 hwe = airoha_ppe_foe_get_entry_locked(ppe, iter->hash);
945 if (!hwe)
946 continue;
947
948 ib1 = READ_ONCE(hwe->ib1);
949 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
950 if (state != AIROHA_FOE_STATE_BIND) {
951 iter->hash = 0xffff;
952 airoha_ppe_foe_remove_flow(ppe, iter);
953 continue;
954 }
955
956 idle = airoha_ppe_get_entry_idle_time(ppe, ib1);
957 if (idle >= min_idle)
958 continue;
959
960 min_idle = idle;
961 e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
962 e->data.ib1 |= ib1 & AIROHA_FOE_IB1_BIND_TIMESTAMP;
963 }
964 }
965
airoha_ppe_foe_flow_entry_update(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)966 static void airoha_ppe_foe_flow_entry_update(struct airoha_ppe *ppe,
967 struct airoha_flow_table_entry *e)
968 {
969 struct airoha_foe_entry *hwe_p, hwe = {};
970
971 spin_lock_bh(&ppe_lock);
972
973 if (e->type == FLOW_TYPE_L2) {
974 airoha_ppe_foe_flow_l2_entry_update(ppe, e);
975 goto unlock;
976 }
977
978 if (e->hash == 0xffff)
979 goto unlock;
980
981 hwe_p = airoha_ppe_foe_get_entry_locked(ppe, e->hash);
982 if (!hwe_p)
983 goto unlock;
984
985 memcpy(&hwe, hwe_p, sizeof(*hwe_p));
986 if (!airoha_ppe_foe_compare_entry(e, &hwe)) {
987 e->hash = 0xffff;
988 goto unlock;
989 }
990
991 e->data.ib1 = hwe.ib1;
992 unlock:
993 spin_unlock_bh(&ppe_lock);
994 }
995
airoha_ppe_entry_idle_time(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)996 static int airoha_ppe_entry_idle_time(struct airoha_ppe *ppe,
997 struct airoha_flow_table_entry *e)
998 {
999 airoha_ppe_foe_flow_entry_update(ppe, e);
1000
1001 return airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
1002 }
1003
airoha_ppe_flow_offload_replace(struct airoha_eth * eth,struct flow_cls_offload * f)1004 static int airoha_ppe_flow_offload_replace(struct airoha_eth *eth,
1005 struct flow_cls_offload *f)
1006 {
1007 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1008 struct airoha_flow_table_entry *e;
1009 struct airoha_flow_data data = {};
1010 struct net_device *odev = NULL;
1011 struct flow_action_entry *act;
1012 struct airoha_foe_entry hwe;
1013 int err, i, offload_type;
1014 u16 addr_type = 0;
1015 u8 l4proto = 0;
1016
1017 if (rhashtable_lookup(ð->flow_table, &f->cookie,
1018 airoha_flow_table_params))
1019 return -EEXIST;
1020
1021 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
1022 return -EOPNOTSUPP;
1023
1024 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
1025 struct flow_match_control match;
1026
1027 flow_rule_match_control(rule, &match);
1028 addr_type = match.key->addr_type;
1029 if (flow_rule_has_control_flags(match.mask->flags,
1030 f->common.extack))
1031 return -EOPNOTSUPP;
1032 } else {
1033 return -EOPNOTSUPP;
1034 }
1035
1036 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
1037 struct flow_match_basic match;
1038
1039 flow_rule_match_basic(rule, &match);
1040 l4proto = match.key->ip_proto;
1041 } else {
1042 return -EOPNOTSUPP;
1043 }
1044
1045 switch (addr_type) {
1046 case 0:
1047 offload_type = PPE_PKT_TYPE_BRIDGE;
1048 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1049 struct flow_match_eth_addrs match;
1050
1051 flow_rule_match_eth_addrs(rule, &match);
1052 memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
1053 memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
1054 } else {
1055 return -EOPNOTSUPP;
1056 }
1057 break;
1058 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1059 offload_type = PPE_PKT_TYPE_IPV4_HNAPT;
1060 break;
1061 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1062 offload_type = PPE_PKT_TYPE_IPV6_ROUTE_5T;
1063 break;
1064 default:
1065 return -EOPNOTSUPP;
1066 }
1067
1068 flow_action_for_each(i, act, &rule->action) {
1069 switch (act->id) {
1070 case FLOW_ACTION_MANGLE:
1071 if (offload_type == PPE_PKT_TYPE_BRIDGE)
1072 return -EOPNOTSUPP;
1073
1074 if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
1075 airoha_ppe_flow_mangle_eth(act, &data.eth);
1076 break;
1077 case FLOW_ACTION_REDIRECT:
1078 odev = act->dev;
1079 break;
1080 case FLOW_ACTION_CSUM:
1081 break;
1082 case FLOW_ACTION_VLAN_PUSH:
1083 if (data.vlan.num == 2 ||
1084 act->vlan.proto != htons(ETH_P_8021Q))
1085 return -EOPNOTSUPP;
1086
1087 data.vlan.hdr[data.vlan.num].id = act->vlan.vid;
1088 data.vlan.hdr[data.vlan.num].proto = act->vlan.proto;
1089 data.vlan.num++;
1090 break;
1091 case FLOW_ACTION_VLAN_POP:
1092 break;
1093 case FLOW_ACTION_PPPOE_PUSH:
1094 if (data.pppoe.num == 1 || data.vlan.num == 2)
1095 return -EOPNOTSUPP;
1096
1097 data.pppoe.sid = act->pppoe.sid;
1098 data.pppoe.num++;
1099 break;
1100 default:
1101 return -EOPNOTSUPP;
1102 }
1103 }
1104
1105 if (!is_valid_ether_addr(data.eth.h_source) ||
1106 !is_valid_ether_addr(data.eth.h_dest))
1107 return -EINVAL;
1108
1109 err = airoha_ppe_foe_entry_prepare(eth, &hwe, odev, offload_type,
1110 &data, l4proto);
1111 if (err)
1112 return err;
1113
1114 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1115 struct flow_match_ports ports;
1116
1117 if (offload_type == PPE_PKT_TYPE_BRIDGE)
1118 return -EOPNOTSUPP;
1119
1120 flow_rule_match_ports(rule, &ports);
1121 data.src_port = ports.key->src;
1122 data.dst_port = ports.key->dst;
1123 } else if (offload_type != PPE_PKT_TYPE_BRIDGE) {
1124 return -EOPNOTSUPP;
1125 }
1126
1127 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1128 struct flow_match_ipv4_addrs addrs;
1129
1130 flow_rule_match_ipv4_addrs(rule, &addrs);
1131 data.v4.src_addr = addrs.key->src;
1132 data.v4.dst_addr = addrs.key->dst;
1133 airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, false);
1134 }
1135
1136 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1137 struct flow_match_ipv6_addrs addrs;
1138
1139 flow_rule_match_ipv6_addrs(rule, &addrs);
1140
1141 data.v6.src_addr = addrs.key->src;
1142 data.v6.dst_addr = addrs.key->dst;
1143 airoha_ppe_foe_entry_set_ipv6_tuple(&hwe, &data);
1144 }
1145
1146 flow_action_for_each(i, act, &rule->action) {
1147 if (act->id != FLOW_ACTION_MANGLE)
1148 continue;
1149
1150 if (offload_type == PPE_PKT_TYPE_BRIDGE)
1151 return -EOPNOTSUPP;
1152
1153 switch (act->mangle.htype) {
1154 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
1155 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
1156 err = airoha_ppe_flow_mangle_ports(act, &data);
1157 break;
1158 case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
1159 err = airoha_ppe_flow_mangle_ipv4(act, &data);
1160 break;
1161 case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
1162 /* handled earlier */
1163 break;
1164 default:
1165 return -EOPNOTSUPP;
1166 }
1167
1168 if (err)
1169 return err;
1170 }
1171
1172 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1173 err = airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, true);
1174 if (err)
1175 return err;
1176 }
1177
1178 e = kzalloc_obj(*e);
1179 if (!e)
1180 return -ENOMEM;
1181
1182 e->cookie = f->cookie;
1183 memcpy(&e->data, &hwe, sizeof(e->data));
1184
1185 err = airoha_ppe_foe_flow_commit_entry(eth->ppe, e);
1186 if (err)
1187 goto free_entry;
1188
1189 err = rhashtable_insert_fast(ð->flow_table, &e->node,
1190 airoha_flow_table_params);
1191 if (err < 0)
1192 goto remove_foe_entry;
1193
1194 return 0;
1195
1196 remove_foe_entry:
1197 airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
1198 free_entry:
1199 kfree(e);
1200
1201 return err;
1202 }
1203
airoha_ppe_flow_offload_destroy(struct airoha_eth * eth,struct flow_cls_offload * f)1204 static int airoha_ppe_flow_offload_destroy(struct airoha_eth *eth,
1205 struct flow_cls_offload *f)
1206 {
1207 struct airoha_flow_table_entry *e;
1208
1209 e = rhashtable_lookup(ð->flow_table, &f->cookie,
1210 airoha_flow_table_params);
1211 if (!e)
1212 return -ENOENT;
1213
1214 airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
1215 rhashtable_remove_fast(ð->flow_table, &e->node,
1216 airoha_flow_table_params);
1217 kfree(e);
1218
1219 return 0;
1220 }
1221
airoha_ppe_foe_entry_get_stats(struct airoha_ppe * ppe,u32 hash,struct airoha_foe_stats64 * stats)1222 void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
1223 struct airoha_foe_stats64 *stats)
1224 {
1225 struct airoha_eth *eth = ppe->eth;
1226 int ppe_num_stats_entries;
1227 struct airoha_npu *npu;
1228 u32 index;
1229
1230 ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
1231 if (ppe_num_stats_entries < 0)
1232 return;
1233
1234 if (airoha_ppe_foe_get_flow_stats_index(ppe, hash, &index))
1235 return;
1236
1237 if (index >= ppe_num_stats_entries)
1238 return;
1239
1240 rcu_read_lock();
1241
1242 npu = rcu_dereference(eth->npu);
1243 if (npu) {
1244 u64 packets = ppe->foe_stats[index].packets;
1245 u64 bytes = ppe->foe_stats[index].bytes;
1246 struct airoha_foe_stats npu_stats;
1247
1248 memcpy_fromio(&npu_stats, &npu->stats[index],
1249 sizeof(*npu->stats));
1250 stats->packets = packets << 32 | npu_stats.packets;
1251 stats->bytes = bytes << 32 | npu_stats.bytes;
1252 }
1253
1254 rcu_read_unlock();
1255 }
1256
airoha_ppe_flow_offload_stats(struct airoha_eth * eth,struct flow_cls_offload * f)1257 static int airoha_ppe_flow_offload_stats(struct airoha_eth *eth,
1258 struct flow_cls_offload *f)
1259 {
1260 struct airoha_flow_table_entry *e;
1261 u32 idle;
1262
1263 e = rhashtable_lookup(ð->flow_table, &f->cookie,
1264 airoha_flow_table_params);
1265 if (!e)
1266 return -ENOENT;
1267
1268 idle = airoha_ppe_entry_idle_time(eth->ppe, e);
1269 f->stats.lastused = jiffies - idle * HZ;
1270
1271 if (e->hash != 0xffff) {
1272 struct airoha_foe_stats64 stats = {};
1273
1274 airoha_ppe_foe_entry_get_stats(eth->ppe, e->hash, &stats);
1275 f->stats.pkts += (stats.packets - e->stats.packets);
1276 f->stats.bytes += (stats.bytes - e->stats.bytes);
1277 e->stats = stats;
1278 }
1279
1280 return 0;
1281 }
1282
airoha_ppe_flow_offload_cmd(struct airoha_eth * eth,struct flow_cls_offload * f)1283 static int airoha_ppe_flow_offload_cmd(struct airoha_eth *eth,
1284 struct flow_cls_offload *f)
1285 {
1286 switch (f->command) {
1287 case FLOW_CLS_REPLACE:
1288 return airoha_ppe_flow_offload_replace(eth, f);
1289 case FLOW_CLS_DESTROY:
1290 return airoha_ppe_flow_offload_destroy(eth, f);
1291 case FLOW_CLS_STATS:
1292 return airoha_ppe_flow_offload_stats(eth, f);
1293 default:
1294 break;
1295 }
1296
1297 return -EOPNOTSUPP;
1298 }
1299
airoha_ppe_flush_sram_entries(struct airoha_ppe * ppe)1300 static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe)
1301 {
1302 u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
1303 struct airoha_foe_entry *hwe = ppe->foe;
1304 int i, err = 0;
1305
1306 for (i = 0; i < sram_num_entries; i++) {
1307 int err;
1308
1309 memset(&hwe[i], 0, sizeof(*hwe));
1310 err = airoha_ppe_foe_commit_sram_entry(ppe, i);
1311 if (err)
1312 break;
1313 }
1314
1315 return err;
1316 }
1317
airoha_ppe_npu_get(struct airoha_eth * eth)1318 static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
1319 {
1320 struct airoha_npu *npu = airoha_npu_get(eth->dev);
1321
1322 if (IS_ERR(npu)) {
1323 request_module("airoha-npu");
1324 npu = airoha_npu_get(eth->dev);
1325 }
1326
1327 return npu;
1328 }
1329
airoha_ppe_offload_setup(struct airoha_eth * eth)1330 static int airoha_ppe_offload_setup(struct airoha_eth *eth)
1331 {
1332 struct airoha_npu *npu = airoha_ppe_npu_get(eth);
1333 struct airoha_ppe *ppe = eth->ppe;
1334 int err, ppe_num_stats_entries;
1335
1336 if (IS_ERR(npu))
1337 return PTR_ERR(npu);
1338
1339 err = npu->ops.ppe_init(npu);
1340 if (err)
1341 goto error_npu_put;
1342
1343 ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
1344 if (ppe_num_stats_entries > 0) {
1345 err = npu->ops.ppe_init_stats(npu, ppe->foe_stats_dma,
1346 ppe_num_stats_entries);
1347 if (err)
1348 goto error_npu_put;
1349 }
1350
1351 airoha_ppe_hw_init(ppe);
1352 airoha_ppe_foe_flow_stats_reset(ppe, npu);
1353
1354 rcu_assign_pointer(eth->npu, npu);
1355 synchronize_rcu();
1356
1357 return 0;
1358
1359 error_npu_put:
1360 airoha_npu_put(npu);
1361
1362 return err;
1363 }
1364
airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev * dev,void * type_data)1365 int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev, void *type_data)
1366 {
1367 struct airoha_ppe *ppe = dev->priv;
1368 struct airoha_eth *eth = ppe->eth;
1369 int err = 0;
1370
1371 /* Netfilter flowtable can try to offload flower rules while not all
1372 * the net_devices are registered or initialized. Delay offloading
1373 * until all net_devices are registered in the system.
1374 */
1375 if (!test_bit(DEV_STATE_REGISTERED, ð->state))
1376 return -EBUSY;
1377
1378 mutex_lock(&flow_offload_mutex);
1379
1380 if (!eth->npu)
1381 err = airoha_ppe_offload_setup(eth);
1382 if (!err)
1383 err = airoha_ppe_flow_offload_cmd(eth, type_data);
1384
1385 mutex_unlock(&flow_offload_mutex);
1386
1387 return err;
1388 }
1389
airoha_ppe_check_skb(struct airoha_ppe_dev * dev,struct sk_buff * skb,u16 hash,bool rx_wlan)1390 void airoha_ppe_check_skb(struct airoha_ppe_dev *dev, struct sk_buff *skb,
1391 u16 hash, bool rx_wlan)
1392 {
1393 struct airoha_ppe *ppe = dev->priv;
1394 u32 ppe_hash_mask = airoha_ppe_get_total_num_entries(ppe) - 1;
1395 u16 now, diff;
1396
1397 if (hash > ppe_hash_mask)
1398 return;
1399
1400 now = (u16)jiffies;
1401 diff = now - ppe->foe_check_time[hash];
1402 if (diff < HZ / 10)
1403 return;
1404
1405 ppe->foe_check_time[hash] = now;
1406 airoha_ppe_foe_insert_entry(ppe, skb, hash, rx_wlan);
1407 }
1408
airoha_ppe_init_upd_mem(struct airoha_gdm_port * port)1409 void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port)
1410 {
1411 struct airoha_eth *eth = port->qdma->eth;
1412 struct net_device *dev = port->dev;
1413 const u8 *addr = dev->dev_addr;
1414 u32 val;
1415
1416 val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
1417 airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
1418 airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
1419 FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
1420 PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
1421
1422 val = (addr[0] << 8) | addr[1];
1423 airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
1424 airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
1425 FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
1426 FIELD_PREP(PPE_UPDMEM_OFFSET_MASK, 1) |
1427 PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
1428 }
1429
airoha_ppe_get_dev(struct device * dev)1430 struct airoha_ppe_dev *airoha_ppe_get_dev(struct device *dev)
1431 {
1432 struct platform_device *pdev;
1433 struct device_node *np;
1434 struct airoha_eth *eth;
1435
1436 np = of_parse_phandle(dev->of_node, "airoha,eth", 0);
1437 if (!np)
1438 return ERR_PTR(-ENODEV);
1439
1440 pdev = of_find_device_by_node(np);
1441 if (!pdev) {
1442 dev_err(dev, "cannot find device node %s\n", np->name);
1443 of_node_put(np);
1444 return ERR_PTR(-ENODEV);
1445 }
1446 of_node_put(np);
1447
1448 if (!try_module_get(THIS_MODULE)) {
1449 dev_err(dev, "failed to get the device driver module\n");
1450 goto error_pdev_put;
1451 }
1452
1453 eth = platform_get_drvdata(pdev);
1454 if (!eth)
1455 goto error_module_put;
1456
1457 if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER)) {
1458 dev_err(&pdev->dev,
1459 "failed to create device link to consumer %s\n",
1460 dev_name(dev));
1461 goto error_module_put;
1462 }
1463
1464 return ð->ppe->dev;
1465
1466 error_module_put:
1467 module_put(THIS_MODULE);
1468 error_pdev_put:
1469 platform_device_put(pdev);
1470
1471 return ERR_PTR(-ENODEV);
1472 }
1473 EXPORT_SYMBOL_GPL(airoha_ppe_get_dev);
1474
airoha_ppe_put_dev(struct airoha_ppe_dev * dev)1475 void airoha_ppe_put_dev(struct airoha_ppe_dev *dev)
1476 {
1477 struct airoha_ppe *ppe = dev->priv;
1478 struct airoha_eth *eth = ppe->eth;
1479
1480 module_put(THIS_MODULE);
1481 put_device(eth->dev);
1482 }
1483 EXPORT_SYMBOL_GPL(airoha_ppe_put_dev);
1484
airoha_ppe_init(struct airoha_eth * eth)1485 int airoha_ppe_init(struct airoha_eth *eth)
1486 {
1487 int foe_size, err, ppe_num_stats_entries;
1488 u32 ppe_num_entries;
1489 struct airoha_ppe *ppe;
1490
1491 ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL);
1492 if (!ppe)
1493 return -ENOMEM;
1494
1495 ppe->dev.ops.setup_tc_block_cb = airoha_ppe_setup_tc_block_cb;
1496 ppe->dev.ops.check_skb = airoha_ppe_check_skb;
1497 ppe->dev.priv = ppe;
1498 ppe->eth = eth;
1499 eth->ppe = ppe;
1500
1501 ppe_num_entries = airoha_ppe_get_total_num_entries(ppe);
1502 foe_size = ppe_num_entries * sizeof(struct airoha_foe_entry);
1503 ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma,
1504 GFP_KERNEL);
1505 if (!ppe->foe)
1506 return -ENOMEM;
1507
1508 ppe->foe_flow = devm_kzalloc(eth->dev,
1509 ppe_num_entries * sizeof(*ppe->foe_flow),
1510 GFP_KERNEL);
1511 if (!ppe->foe_flow)
1512 return -ENOMEM;
1513
1514 ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
1515 if (ppe_num_stats_entries > 0) {
1516 foe_size = ppe_num_stats_entries * sizeof(*ppe->foe_stats);
1517 ppe->foe_stats = dmam_alloc_coherent(eth->dev, foe_size,
1518 &ppe->foe_stats_dma,
1519 GFP_KERNEL);
1520 if (!ppe->foe_stats)
1521 return -ENOMEM;
1522 }
1523
1524 ppe->foe_check_time = devm_kzalloc(eth->dev, ppe_num_entries,
1525 GFP_KERNEL);
1526 if (!ppe->foe_check_time)
1527 return -ENOMEM;
1528
1529 err = airoha_ppe_flush_sram_entries(ppe);
1530 if (err)
1531 return err;
1532
1533 err = rhashtable_init(ð->flow_table, &airoha_flow_table_params);
1534 if (err)
1535 return err;
1536
1537 err = rhashtable_init(&ppe->l2_flows, &airoha_l2_flow_table_params);
1538 if (err)
1539 goto error_flow_table_destroy;
1540
1541 err = airoha_ppe_debugfs_init(ppe);
1542 if (err)
1543 goto error_l2_flow_table_destroy;
1544
1545 return 0;
1546
1547 error_l2_flow_table_destroy:
1548 rhashtable_destroy(&ppe->l2_flows);
1549 error_flow_table_destroy:
1550 rhashtable_destroy(ð->flow_table);
1551
1552 return err;
1553 }
1554
airoha_ppe_deinit(struct airoha_eth * eth)1555 void airoha_ppe_deinit(struct airoha_eth *eth)
1556 {
1557 struct airoha_npu *npu;
1558
1559 mutex_lock(&flow_offload_mutex);
1560
1561 npu = rcu_replace_pointer(eth->npu, NULL,
1562 lockdep_is_held(&flow_offload_mutex));
1563 if (npu) {
1564 npu->ops.ppe_deinit(npu);
1565 airoha_npu_put(npu);
1566 }
1567
1568 mutex_unlock(&flow_offload_mutex);
1569
1570 rhashtable_destroy(ð->ppe->l2_flows);
1571 rhashtable_destroy(ð->flow_table);
1572 debugfs_remove(eth->ppe->debugfs_dir);
1573 }
1574