1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2025 AIROHA Inc
4 * Author: Lorenzo Bianconi <lorenzo@kernel.org>
5 */
6
7 #include <linux/ip.h>
8 #include <linux/ipv6.h>
9 #include <linux/of_platform.h>
10 #include <linux/platform_device.h>
11 #include <linux/rhashtable.h>
12 #include <net/ipv6.h>
13 #include <net/pkt_cls.h>
14
15 #include "airoha_regs.h"
16 #include "airoha_eth.h"
17
18 static DEFINE_MUTEX(flow_offload_mutex);
19 static DEFINE_SPINLOCK(ppe_lock);
20
21 static const struct rhashtable_params airoha_flow_table_params = {
22 .head_offset = offsetof(struct airoha_flow_table_entry, node),
23 .key_offset = offsetof(struct airoha_flow_table_entry, cookie),
24 .key_len = sizeof(unsigned long),
25 .automatic_shrinking = true,
26 };
27
28 static const struct rhashtable_params airoha_l2_flow_table_params = {
29 .head_offset = offsetof(struct airoha_flow_table_entry, l2_node),
30 .key_offset = offsetof(struct airoha_flow_table_entry, data.bridge),
31 .key_len = 2 * ETH_ALEN,
32 .automatic_shrinking = true,
33 };
34
airoha_ppe_get_num_stats_entries(struct airoha_ppe * ppe)35 static int airoha_ppe_get_num_stats_entries(struct airoha_ppe *ppe)
36 {
37 if (!IS_ENABLED(CONFIG_NET_AIROHA_FLOW_STATS))
38 return -EOPNOTSUPP;
39
40 if (airoha_is_7583(ppe->eth))
41 return -EOPNOTSUPP;
42
43 return PPE_STATS_NUM_ENTRIES;
44 }
45
airoha_ppe_get_total_num_stats_entries(struct airoha_ppe * ppe)46 static int airoha_ppe_get_total_num_stats_entries(struct airoha_ppe *ppe)
47 {
48 int num_stats = airoha_ppe_get_num_stats_entries(ppe);
49
50 if (num_stats > 0) {
51 struct airoha_eth *eth = ppe->eth;
52
53 num_stats = num_stats * eth->soc->num_ppe;
54 }
55
56 return num_stats;
57 }
58
airoha_ppe_get_total_sram_num_entries(struct airoha_ppe * ppe)59 static u32 airoha_ppe_get_total_sram_num_entries(struct airoha_ppe *ppe)
60 {
61 struct airoha_eth *eth = ppe->eth;
62
63 return PPE_SRAM_NUM_ENTRIES * eth->soc->num_ppe;
64 }
65
airoha_ppe_get_total_num_entries(struct airoha_ppe * ppe)66 u32 airoha_ppe_get_total_num_entries(struct airoha_ppe *ppe)
67 {
68 u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
69
70 return sram_num_entries + PPE_DRAM_NUM_ENTRIES;
71 }
72
airoha_ppe_is_enabled(struct airoha_eth * eth,int index)73 bool airoha_ppe_is_enabled(struct airoha_eth *eth, int index)
74 {
75 if (index >= eth->soc->num_ppe)
76 return false;
77
78 return airoha_fe_rr(eth, REG_PPE_GLO_CFG(index)) & PPE_GLO_CFG_EN_MASK;
79 }
80
airoha_ppe_get_timestamp(struct airoha_ppe * ppe)81 static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe)
82 {
83 u16 timestamp = airoha_fe_rr(ppe->eth, REG_FE_FOE_TS);
84
85 return FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, timestamp);
86 }
87
airoha_ppe_set_cpu_port(struct airoha_gdm_port * port,u8 ppe_id,u8 fport)88 void airoha_ppe_set_cpu_port(struct airoha_gdm_port *port, u8 ppe_id, u8 fport)
89 {
90 struct airoha_qdma *qdma = port->qdma;
91 struct airoha_eth *eth = qdma->eth;
92 u8 qdma_id = qdma - ð->qdma[0];
93 u32 fe_cpu_port;
94
95 fe_cpu_port = qdma_id ? FE_PSE_PORT_CDM2 : FE_PSE_PORT_CDM1;
96 airoha_fe_rmw(eth, REG_PPE_DFT_CPORT(ppe_id, fport),
97 DFT_CPORT_MASK(fport),
98 __field_prep(DFT_CPORT_MASK(fport), fe_cpu_port));
99 }
100
airoha_ppe_hw_init(struct airoha_ppe * ppe)101 static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
102 {
103 u32 sram_ppe_num_data_entries = PPE_SRAM_NUM_ENTRIES, sram_num_entries;
104 u32 sram_tb_size, dram_num_entries;
105 struct airoha_eth *eth = ppe->eth;
106 int i, sram_num_stats_entries;
107
108 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
109 sram_tb_size = sram_num_entries * sizeof(struct airoha_foe_entry);
110 dram_num_entries = PPE_RAM_NUM_ENTRIES_SHIFT(PPE_DRAM_NUM_ENTRIES);
111
112 sram_num_stats_entries = airoha_ppe_get_num_stats_entries(ppe);
113 if (sram_num_stats_entries > 0)
114 sram_ppe_num_data_entries -= sram_num_stats_entries;
115 sram_ppe_num_data_entries =
116 PPE_RAM_NUM_ENTRIES_SHIFT(sram_ppe_num_data_entries);
117
118 for (i = 0; i < eth->soc->num_ppe; i++) {
119 int p;
120
121 airoha_fe_wr(eth, REG_PPE_TB_BASE(i),
122 ppe->foe_dma + sram_tb_size);
123
124 airoha_fe_rmw(eth, REG_PPE_BND_AGE0(i),
125 PPE_BIND_AGE0_DELTA_NON_L4 |
126 PPE_BIND_AGE0_DELTA_UDP,
127 FIELD_PREP(PPE_BIND_AGE0_DELTA_NON_L4, 60) |
128 FIELD_PREP(PPE_BIND_AGE0_DELTA_UDP, 60));
129 airoha_fe_rmw(eth, REG_PPE_BND_AGE1(i),
130 PPE_BIND_AGE1_DELTA_TCP_FIN |
131 PPE_BIND_AGE1_DELTA_TCP,
132 FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
133 FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP, 60));
134
135 airoha_fe_rmw(eth, REG_PPE_TB_HASH_CFG(i),
136 PPE_SRAM_TABLE_EN_MASK |
137 PPE_SRAM_HASH1_EN_MASK |
138 PPE_DRAM_TABLE_EN_MASK |
139 PPE_SRAM_HASH0_MODE_MASK |
140 PPE_SRAM_HASH1_MODE_MASK |
141 PPE_DRAM_HASH0_MODE_MASK |
142 PPE_DRAM_HASH1_MODE_MASK,
143 FIELD_PREP(PPE_SRAM_TABLE_EN_MASK, 1) |
144 FIELD_PREP(PPE_SRAM_HASH1_EN_MASK, 1) |
145 FIELD_PREP(PPE_SRAM_HASH1_MODE_MASK, 1) |
146 FIELD_PREP(PPE_DRAM_HASH1_MODE_MASK, 3));
147
148 airoha_fe_rmw(eth, REG_PPE_TB_CFG(i),
149 PPE_TB_CFG_SEARCH_MISS_MASK |
150 PPE_SRAM_TB_NUM_ENTRY_MASK |
151 PPE_DRAM_TB_NUM_ENTRY_MASK |
152 PPE_TB_CFG_KEEPALIVE_MASK |
153 PPE_TB_ENTRY_SIZE_MASK,
154 FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) |
155 FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0) |
156 FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
157 sram_ppe_num_data_entries) |
158 FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
159 dram_num_entries));
160
161 airoha_fe_rmw(eth, REG_PPE_BIND_RATE(i),
162 PPE_BIND_RATE_L2B_BIND_MASK |
163 PPE_BIND_RATE_BIND_MASK,
164 FIELD_PREP(PPE_BIND_RATE_L2B_BIND_MASK, 0x1e) |
165 FIELD_PREP(PPE_BIND_RATE_BIND_MASK, 0x1e));
166
167 airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED);
168 airoha_fe_clear(eth, REG_PPE_PPE_FLOW_CFG(i),
169 PPE_FLOW_CFG_IP6_6RD_MASK);
170
171 for (p = 0; p < ARRAY_SIZE(eth->ports); p++) {
172 struct airoha_gdm_port *port = eth->ports[p];
173
174 airoha_fe_rmw(eth, REG_PPE_MTU(i, p),
175 FP0_EGRESS_MTU_MASK |
176 FP1_EGRESS_MTU_MASK,
177 FIELD_PREP(FP0_EGRESS_MTU_MASK,
178 AIROHA_MAX_MTU) |
179 FIELD_PREP(FP1_EGRESS_MTU_MASK,
180 AIROHA_MAX_MTU));
181 if (!port)
182 continue;
183
184 airoha_ppe_set_cpu_port(port, i,
185 airoha_get_fe_port(port));
186 }
187 }
188 }
189
airoha_ppe_flow_mangle_eth(const struct flow_action_entry * act,void * eth)190 static void airoha_ppe_flow_mangle_eth(const struct flow_action_entry *act, void *eth)
191 {
192 void *dest = eth + act->mangle.offset;
193 const void *src = &act->mangle.val;
194
195 if (act->mangle.offset > 8)
196 return;
197
198 if (act->mangle.mask == 0xffff) {
199 src += 2;
200 dest += 2;
201 }
202
203 memcpy(dest, src, act->mangle.mask ? 2 : 4);
204 }
205
airoha_ppe_flow_mangle_ports(const struct flow_action_entry * act,struct airoha_flow_data * data)206 static int airoha_ppe_flow_mangle_ports(const struct flow_action_entry *act,
207 struct airoha_flow_data *data)
208 {
209 u32 val = be32_to_cpu((__force __be32)act->mangle.val);
210
211 switch (act->mangle.offset) {
212 case 0:
213 if ((__force __be32)act->mangle.mask == ~cpu_to_be32(0xffff))
214 data->dst_port = cpu_to_be16(val);
215 else
216 data->src_port = cpu_to_be16(val >> 16);
217 break;
218 case 2:
219 data->dst_port = cpu_to_be16(val);
220 break;
221 default:
222 return -EINVAL;
223 }
224
225 return 0;
226 }
227
airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry * act,struct airoha_flow_data * data)228 static int airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry *act,
229 struct airoha_flow_data *data)
230 {
231 __be32 *dest;
232
233 switch (act->mangle.offset) {
234 case offsetof(struct iphdr, saddr):
235 dest = &data->v4.src_addr;
236 break;
237 case offsetof(struct iphdr, daddr):
238 dest = &data->v4.dst_addr;
239 break;
240 default:
241 return -EINVAL;
242 }
243
244 memcpy(dest, &act->mangle.val, sizeof(u32));
245
246 return 0;
247 }
248
airoha_ppe_get_wdma_info(struct net_device * dev,const u8 * addr,struct airoha_wdma_info * info)249 static int airoha_ppe_get_wdma_info(struct net_device *dev, const u8 *addr,
250 struct airoha_wdma_info *info)
251 {
252 struct net_device_path_stack stack;
253 struct net_device_path *path;
254 int err;
255
256 if (!dev)
257 return -ENODEV;
258
259 rcu_read_lock();
260 err = dev_fill_forward_path(dev, addr, &stack);
261 rcu_read_unlock();
262 if (err)
263 return err;
264
265 path = &stack.path[stack.num_paths - 1];
266 if (path->type != DEV_PATH_MTK_WDMA)
267 return -1;
268
269 info->idx = path->mtk_wdma.wdma_idx;
270 info->bss = path->mtk_wdma.bss;
271 info->wcid = path->mtk_wdma.wcid;
272
273 return 0;
274 }
275
airoha_get_dsa_port(struct net_device ** dev)276 static int airoha_get_dsa_port(struct net_device **dev)
277 {
278 #if IS_ENABLED(CONFIG_NET_DSA)
279 struct dsa_port *dp = dsa_port_from_netdev(*dev);
280
281 if (IS_ERR(dp))
282 return -ENODEV;
283
284 *dev = dsa_port_to_conduit(dp);
285 return dp->index;
286 #else
287 return -ENODEV;
288 #endif
289 }
290
airoha_ppe_foe_set_bridge_addrs(struct airoha_foe_bridge * br,struct ethhdr * eh)291 static void airoha_ppe_foe_set_bridge_addrs(struct airoha_foe_bridge *br,
292 struct ethhdr *eh)
293 {
294 br->dest_mac_hi = get_unaligned_be32(eh->h_dest);
295 br->dest_mac_lo = get_unaligned_be16(eh->h_dest + 4);
296 br->src_mac_hi = get_unaligned_be16(eh->h_source);
297 br->src_mac_lo = get_unaligned_be32(eh->h_source + 2);
298 }
299
airoha_ppe_foe_entry_prepare(struct airoha_eth * eth,struct airoha_foe_entry * hwe,struct net_device * dev,int type,struct airoha_flow_data * data,int l4proto)300 static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
301 struct airoha_foe_entry *hwe,
302 struct net_device *dev, int type,
303 struct airoha_flow_data *data,
304 int l4proto)
305 {
306 u32 qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f), ports_pad, val;
307 int wlan_etype = -EINVAL, dsa_port = airoha_get_dsa_port(&dev);
308 struct airoha_foe_mac_info_common *l2;
309 u8 smac_id = 0xf;
310
311 memset(hwe, 0, sizeof(*hwe));
312
313 val = FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE, AIROHA_FOE_STATE_BIND) |
314 FIELD_PREP(AIROHA_FOE_IB1_BIND_PACKET_TYPE, type) |
315 FIELD_PREP(AIROHA_FOE_IB1_BIND_UDP, l4proto == IPPROTO_UDP) |
316 FIELD_PREP(AIROHA_FOE_IB1_BIND_VLAN_LAYER, data->vlan.num) |
317 FIELD_PREP(AIROHA_FOE_IB1_BIND_VPM, data->vlan.num) |
318 FIELD_PREP(AIROHA_FOE_IB1_BIND_PPPOE, data->pppoe.num) |
319 AIROHA_FOE_IB1_BIND_TTL;
320 hwe->ib1 = val;
321
322 val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f);
323 if (dev) {
324 struct airoha_wdma_info info = {};
325
326 if (!airoha_ppe_get_wdma_info(dev, data->eth.h_dest, &info)) {
327 val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, info.idx) |
328 FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT,
329 FE_PSE_PORT_CDM4);
330 qdata |= FIELD_PREP(AIROHA_FOE_ACTDP, info.bss);
331 wlan_etype = FIELD_PREP(AIROHA_FOE_MAC_WDMA_BAND,
332 info.idx) |
333 FIELD_PREP(AIROHA_FOE_MAC_WDMA_WCID,
334 info.wcid);
335 } else {
336 struct airoha_gdm_port *port = netdev_priv(dev);
337 u8 pse_port;
338
339 if (!airoha_is_valid_gdm_port(eth, port))
340 return -EINVAL;
341
342 if (dsa_port >= 0 || eth->ports[1])
343 pse_port = port->id == 4 ? FE_PSE_PORT_GDM4
344 : port->id;
345 else
346 pse_port = 2; /* uplink relies on GDM2
347 * loopback
348 */
349
350 val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port) |
351 AIROHA_FOE_IB2_PSE_QOS;
352 /* For downlink traffic consume SRAM memory for hw
353 * forwarding descriptors queue.
354 */
355 if (airoha_is_lan_gdm_port(port))
356 val |= AIROHA_FOE_IB2_FAST_PATH;
357 if (dsa_port >= 0)
358 val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ,
359 dsa_port);
360
361 smac_id = port->id;
362 }
363 }
364
365 if (is_multicast_ether_addr(data->eth.h_dest))
366 val |= AIROHA_FOE_IB2_MULTICAST;
367
368 ports_pad = 0xa5a5a500 | (l4proto & 0xff);
369 if (type == PPE_PKT_TYPE_IPV4_ROUTE)
370 hwe->ipv4.orig_tuple.ports = ports_pad;
371 if (type == PPE_PKT_TYPE_IPV6_ROUTE_3T)
372 hwe->ipv6.ports = ports_pad;
373
374 if (type == PPE_PKT_TYPE_BRIDGE) {
375 airoha_ppe_foe_set_bridge_addrs(&hwe->bridge, &data->eth);
376 hwe->bridge.data = qdata;
377 hwe->bridge.ib2 = val;
378 l2 = &hwe->bridge.l2.common;
379 } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
380 hwe->ipv6.data = qdata;
381 hwe->ipv6.ib2 = val;
382 l2 = &hwe->ipv6.l2;
383 l2->etype = ETH_P_IPV6;
384 } else {
385 hwe->ipv4.data = qdata;
386 hwe->ipv4.ib2 = val;
387 l2 = &hwe->ipv4.l2.common;
388 l2->etype = ETH_P_IP;
389 }
390
391 l2->dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
392 l2->dest_mac_lo = get_unaligned_be16(data->eth.h_dest + 4);
393 if (type <= PPE_PKT_TYPE_IPV4_DSLITE) {
394 struct airoha_foe_mac_info *mac_info;
395
396 l2->src_mac_hi = get_unaligned_be32(data->eth.h_source);
397 hwe->ipv4.l2.src_mac_lo =
398 get_unaligned_be16(data->eth.h_source + 4);
399
400 mac_info = (struct airoha_foe_mac_info *)l2;
401 mac_info->pppoe_id = data->pppoe.sid;
402 } else {
403 l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id) |
404 FIELD_PREP(AIROHA_FOE_MAC_PPPOE_ID,
405 data->pppoe.sid);
406 }
407
408 if (data->vlan.num) {
409 l2->vlan1 = data->vlan.hdr[0].id;
410 if (data->vlan.num == 2)
411 l2->vlan2 = data->vlan.hdr[1].id;
412 }
413
414 if (wlan_etype >= 0) {
415 l2->etype = wlan_etype;
416 } else if (dsa_port >= 0) {
417 l2->etype = BIT(dsa_port);
418 l2->etype |= !data->vlan.num ? BIT(15) : 0;
419 } else if (data->pppoe.num) {
420 l2->etype = ETH_P_PPP_SES;
421 }
422
423 return 0;
424 }
425
airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry * hwe,struct airoha_flow_data * data,bool egress)426 static int airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry *hwe,
427 struct airoha_flow_data *data,
428 bool egress)
429 {
430 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
431 struct airoha_foe_ipv4_tuple *t;
432
433 switch (type) {
434 case PPE_PKT_TYPE_IPV4_HNAPT:
435 if (egress) {
436 t = &hwe->ipv4.new_tuple;
437 break;
438 }
439 fallthrough;
440 case PPE_PKT_TYPE_IPV4_DSLITE:
441 case PPE_PKT_TYPE_IPV4_ROUTE:
442 t = &hwe->ipv4.orig_tuple;
443 break;
444 default:
445 WARN_ON_ONCE(1);
446 return -EINVAL;
447 }
448
449 t->src_ip = be32_to_cpu(data->v4.src_addr);
450 t->dest_ip = be32_to_cpu(data->v4.dst_addr);
451
452 if (type != PPE_PKT_TYPE_IPV4_ROUTE) {
453 t->src_port = be16_to_cpu(data->src_port);
454 t->dest_port = be16_to_cpu(data->dst_port);
455 }
456
457 return 0;
458 }
459
airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry * hwe,struct airoha_flow_data * data)460 static int airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry *hwe,
461 struct airoha_flow_data *data)
462
463 {
464 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
465 u32 *src, *dest;
466
467 switch (type) {
468 case PPE_PKT_TYPE_IPV6_ROUTE_5T:
469 case PPE_PKT_TYPE_IPV6_6RD:
470 hwe->ipv6.src_port = be16_to_cpu(data->src_port);
471 hwe->ipv6.dest_port = be16_to_cpu(data->dst_port);
472 fallthrough;
473 case PPE_PKT_TYPE_IPV6_ROUTE_3T:
474 src = hwe->ipv6.src_ip;
475 dest = hwe->ipv6.dest_ip;
476 break;
477 default:
478 WARN_ON_ONCE(1);
479 return -EINVAL;
480 }
481
482 ipv6_addr_be32_to_cpu(src, data->v6.src_addr.s6_addr32);
483 ipv6_addr_be32_to_cpu(dest, data->v6.dst_addr.s6_addr32);
484
485 return 0;
486 }
487
airoha_ppe_foe_get_entry_hash(struct airoha_ppe * ppe,struct airoha_foe_entry * hwe)488 static u32 airoha_ppe_foe_get_entry_hash(struct airoha_ppe *ppe,
489 struct airoha_foe_entry *hwe)
490 {
491 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
492 u32 ppe_hash_mask = airoha_ppe_get_total_num_entries(ppe) - 1;
493 u32 hash, hv1, hv2, hv3;
494
495 switch (type) {
496 case PPE_PKT_TYPE_IPV4_ROUTE:
497 case PPE_PKT_TYPE_IPV4_HNAPT:
498 hv1 = hwe->ipv4.orig_tuple.ports;
499 hv2 = hwe->ipv4.orig_tuple.dest_ip;
500 hv3 = hwe->ipv4.orig_tuple.src_ip;
501 break;
502 case PPE_PKT_TYPE_IPV6_ROUTE_3T:
503 case PPE_PKT_TYPE_IPV6_ROUTE_5T:
504 hv1 = hwe->ipv6.src_ip[3] ^ hwe->ipv6.dest_ip[3];
505 hv1 ^= hwe->ipv6.ports;
506
507 hv2 = hwe->ipv6.src_ip[2] ^ hwe->ipv6.dest_ip[2];
508 hv2 ^= hwe->ipv6.dest_ip[0];
509
510 hv3 = hwe->ipv6.src_ip[1] ^ hwe->ipv6.dest_ip[1];
511 hv3 ^= hwe->ipv6.src_ip[0];
512 break;
513 case PPE_PKT_TYPE_BRIDGE: {
514 struct airoha_foe_mac_info *l2 = &hwe->bridge.l2;
515
516 hv1 = l2->common.src_mac_hi & 0xffff;
517 hv1 = hv1 << 16 | l2->src_mac_lo;
518
519 hv2 = l2->common.dest_mac_lo;
520 hv2 = hv2 << 16;
521 hv2 = hv2 | ((l2->common.src_mac_hi & 0xffff0000) >> 16);
522
523 hv3 = l2->common.dest_mac_hi;
524 break;
525 }
526 case PPE_PKT_TYPE_IPV4_DSLITE:
527 case PPE_PKT_TYPE_IPV6_6RD:
528 default:
529 WARN_ON_ONCE(1);
530 return ppe_hash_mask;
531 }
532
533 hash = (hv1 & hv2) | ((~hv1) & hv3);
534 hash = (hash >> 24) | ((hash & 0xffffff) << 8);
535 hash ^= hv1 ^ hv2 ^ hv3;
536 hash ^= hash >> 16;
537 hash &= ppe_hash_mask;
538
539 return hash;
540 }
541
airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe * ppe,u32 hash,u32 * index)542 static int airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe,
543 u32 hash, u32 *index)
544 {
545 int ppe_num_stats_entries;
546
547 ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
548 if (ppe_num_stats_entries < 0)
549 return ppe_num_stats_entries;
550
551 *index = hash >= ppe_num_stats_entries ? hash - PPE_STATS_NUM_ENTRIES
552 : hash;
553
554 return 0;
555 }
556
airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe * ppe,struct airoha_npu * npu,int index)557 static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe,
558 struct airoha_npu *npu,
559 int index)
560 {
561 memset_io(&npu->stats[index], 0, sizeof(*npu->stats));
562 memset(&ppe->foe_stats[index], 0, sizeof(*ppe->foe_stats));
563 }
564
airoha_ppe_foe_flow_stats_reset(struct airoha_ppe * ppe,struct airoha_npu * npu)565 static void airoha_ppe_foe_flow_stats_reset(struct airoha_ppe *ppe,
566 struct airoha_npu *npu)
567 {
568 int i, ppe_num_stats_entries;
569
570 ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
571 if (ppe_num_stats_entries < 0)
572 return;
573
574 for (i = 0; i < ppe_num_stats_entries; i++)
575 airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, i);
576 }
577
airoha_ppe_foe_flow_stats_update(struct airoha_ppe * ppe,struct airoha_npu * npu,struct airoha_foe_entry * hwe,u32 hash)578 static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe,
579 struct airoha_npu *npu,
580 struct airoha_foe_entry *hwe,
581 u32 hash)
582 {
583 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
584 u32 index, pse_port, val, *data, *ib2, *meter;
585 int ppe_num_stats_entries;
586 u8 nbq;
587
588 ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
589 if (ppe_num_stats_entries < 0)
590 return;
591
592 if (airoha_ppe_foe_get_flow_stats_index(ppe, hash, &index))
593 return;
594
595 if (index >= ppe_num_stats_entries)
596 return;
597
598 if (type == PPE_PKT_TYPE_BRIDGE) {
599 data = &hwe->bridge.data;
600 ib2 = &hwe->bridge.ib2;
601 meter = &hwe->bridge.l2.meter;
602 } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
603 data = &hwe->ipv6.data;
604 ib2 = &hwe->ipv6.ib2;
605 meter = &hwe->ipv6.meter;
606 } else {
607 data = &hwe->ipv4.data;
608 ib2 = &hwe->ipv4.ib2;
609 meter = &hwe->ipv4.l2.meter;
610 }
611
612 pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2);
613 if (pse_port == FE_PSE_PORT_CDM4)
614 return;
615
616 airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, index);
617
618 val = FIELD_GET(AIROHA_FOE_CHANNEL | AIROHA_FOE_QID, *data);
619 *data = (*data & ~AIROHA_FOE_ACTDP) |
620 FIELD_PREP(AIROHA_FOE_ACTDP, val);
621
622 val = *ib2 & (AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
623 AIROHA_FOE_IB2_PSE_QOS | AIROHA_FOE_IB2_FAST_PATH);
624 *meter |= FIELD_PREP(AIROHA_FOE_TUNNEL_MTU, val);
625
626 nbq = pse_port == 1 ? 6 : 5;
627 *ib2 &= ~(AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
628 AIROHA_FOE_IB2_PSE_QOS);
629 *ib2 |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, 6) |
630 FIELD_PREP(AIROHA_FOE_IB2_NBQ, nbq);
631 }
632
633 static struct airoha_foe_entry *
airoha_ppe_foe_get_entry_locked(struct airoha_ppe * ppe,u32 hash)634 airoha_ppe_foe_get_entry_locked(struct airoha_ppe *ppe, u32 hash)
635 {
636 u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
637
638 lockdep_assert_held(&ppe_lock);
639
640 if (hash < sram_num_entries) {
641 u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry);
642 bool ppe2 = hash >= PPE_SRAM_NUM_ENTRIES;
643 struct airoha_eth *eth = ppe->eth;
644 u32 val;
645 int i;
646
647 airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2),
648 FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) |
649 PPE_SRAM_CTRL_REQ_MASK);
650 if (read_poll_timeout_atomic(airoha_fe_rr, val,
651 val & PPE_SRAM_CTRL_ACK_MASK,
652 10, 100, false, eth,
653 REG_PPE_RAM_CTRL(ppe2)))
654 return NULL;
655
656 for (i = 0; i < sizeof(struct airoha_foe_entry) / sizeof(*hwe);
657 i++)
658 hwe[i] = airoha_fe_rr(eth,
659 REG_PPE_RAM_ENTRY(ppe2, i));
660 }
661
662 return ppe->foe + hash * sizeof(struct airoha_foe_entry);
663 }
664
airoha_ppe_foe_get_entry(struct airoha_ppe * ppe,u32 hash)665 struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
666 u32 hash)
667 {
668 struct airoha_foe_entry *hwe;
669
670 spin_lock_bh(&ppe_lock);
671 hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
672 spin_unlock_bh(&ppe_lock);
673
674 return hwe;
675 }
676
airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry * e,struct airoha_foe_entry * hwe)677 static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e,
678 struct airoha_foe_entry *hwe)
679 {
680 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
681 int len;
682
683 if ((hwe->ib1 ^ e->data.ib1) & AIROHA_FOE_IB1_BIND_UDP)
684 return false;
685
686 if (type > PPE_PKT_TYPE_IPV4_DSLITE)
687 len = offsetof(struct airoha_foe_entry, ipv6.data);
688 else
689 len = offsetof(struct airoha_foe_entry, ipv4.ib2);
690
691 return !memcmp(&e->data.d, &hwe->d, len - sizeof(hwe->ib1));
692 }
693
airoha_ppe_foe_commit_sram_entry(struct airoha_ppe * ppe,u32 hash)694 static int airoha_ppe_foe_commit_sram_entry(struct airoha_ppe *ppe, u32 hash)
695 {
696 struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
697 bool ppe2 = hash >= PPE_SRAM_NUM_ENTRIES;
698 u32 *ptr = (u32 *)hwe, val;
699 int i;
700
701 for (i = 0; i < sizeof(*hwe) / sizeof(*ptr); i++)
702 airoha_fe_wr(ppe->eth, REG_PPE_RAM_ENTRY(ppe2, i), ptr[i]);
703
704 wmb();
705 airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2),
706 FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) |
707 PPE_SRAM_CTRL_WR_MASK | PPE_SRAM_CTRL_REQ_MASK);
708
709 return read_poll_timeout_atomic(airoha_fe_rr, val,
710 val & PPE_SRAM_CTRL_ACK_MASK,
711 10, 100, false, ppe->eth,
712 REG_PPE_RAM_CTRL(ppe2));
713 }
714
airoha_ppe_foe_commit_entry(struct airoha_ppe * ppe,struct airoha_foe_entry * e,u32 hash,bool rx_wlan)715 static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
716 struct airoha_foe_entry *e,
717 u32 hash, bool rx_wlan)
718 {
719 u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
720 struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
721 u32 ts = airoha_ppe_get_timestamp(ppe);
722 struct airoha_eth *eth = ppe->eth;
723 struct airoha_npu *npu;
724 int err = 0;
725
726 memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1));
727 wmb();
728
729 e->ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
730 e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts);
731 hwe->ib1 = e->ib1;
732
733 rcu_read_lock();
734
735 npu = rcu_dereference(eth->npu);
736 if (!npu) {
737 err = -ENODEV;
738 goto unlock;
739 }
740
741 if (!rx_wlan)
742 airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash);
743
744 if (hash < sram_num_entries)
745 err = airoha_ppe_foe_commit_sram_entry(ppe, hash);
746 unlock:
747 rcu_read_unlock();
748
749 return err;
750 }
751
airoha_ppe_foe_remove_flow(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)752 static void airoha_ppe_foe_remove_flow(struct airoha_ppe *ppe,
753 struct airoha_flow_table_entry *e)
754 {
755 lockdep_assert_held(&ppe_lock);
756
757 hlist_del_init(&e->list);
758 if (e->hash != 0xffff) {
759 e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
760 e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
761 AIROHA_FOE_STATE_INVALID);
762 airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash, false);
763 e->hash = 0xffff;
764 }
765 if (e->type == FLOW_TYPE_L2_SUBFLOW) {
766 hlist_del_init(&e->l2_subflow_node);
767 kfree(e);
768 }
769 }
770
airoha_ppe_foe_remove_l2_flow(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)771 static void airoha_ppe_foe_remove_l2_flow(struct airoha_ppe *ppe,
772 struct airoha_flow_table_entry *e)
773 {
774 struct hlist_head *head = &e->l2_flows;
775 struct hlist_node *n;
776
777 lockdep_assert_held(&ppe_lock);
778
779 rhashtable_remove_fast(&ppe->l2_flows, &e->l2_node,
780 airoha_l2_flow_table_params);
781 hlist_for_each_entry_safe(e, n, head, l2_subflow_node)
782 airoha_ppe_foe_remove_flow(ppe, e);
783 }
784
airoha_ppe_foe_flow_remove_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)785 static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
786 struct airoha_flow_table_entry *e)
787 {
788 spin_lock_bh(&ppe_lock);
789
790 if (e->type == FLOW_TYPE_L2)
791 airoha_ppe_foe_remove_l2_flow(ppe, e);
792 else
793 airoha_ppe_foe_remove_flow(ppe, e);
794
795 spin_unlock_bh(&ppe_lock);
796 }
797
798 static int
airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e,u32 hash,bool rx_wlan)799 airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
800 struct airoha_flow_table_entry *e,
801 u32 hash, bool rx_wlan)
802 {
803 u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP;
804 struct airoha_foe_entry *hwe_p, hwe;
805 struct airoha_flow_table_entry *f;
806 int type;
807
808 hwe_p = airoha_ppe_foe_get_entry_locked(ppe, hash);
809 if (!hwe_p)
810 return -EINVAL;
811
812 f = kzalloc_obj(*f, GFP_ATOMIC);
813 if (!f)
814 return -ENOMEM;
815
816 hlist_add_head(&f->l2_subflow_node, &e->l2_flows);
817 f->type = FLOW_TYPE_L2_SUBFLOW;
818 f->hash = hash;
819
820 memcpy(&hwe, hwe_p, sizeof(*hwe_p));
821 hwe.ib1 = (hwe.ib1 & mask) | (e->data.ib1 & ~mask);
822
823 type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe.ib1);
824 if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
825 memcpy(&hwe.ipv6.l2, &e->data.bridge.l2, sizeof(hwe.ipv6.l2));
826 hwe.ipv6.ib2 = e->data.bridge.ib2;
827 /* setting smac_id to 0xf instruct the hw to keep original
828 * source mac address
829 */
830 hwe.ipv6.l2.src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID,
831 0xf);
832 } else {
833 memcpy(&hwe.bridge.l2, &e->data.bridge.l2,
834 sizeof(hwe.bridge.l2));
835 hwe.bridge.ib2 = e->data.bridge.ib2;
836 if (type == PPE_PKT_TYPE_IPV4_HNAPT)
837 memcpy(&hwe.ipv4.new_tuple, &hwe.ipv4.orig_tuple,
838 sizeof(hwe.ipv4.new_tuple));
839 }
840
841 hwe.bridge.data = e->data.bridge.data;
842 airoha_ppe_foe_commit_entry(ppe, &hwe, hash, rx_wlan);
843
844 return 0;
845 }
846
airoha_ppe_foe_insert_entry(struct airoha_ppe * ppe,struct sk_buff * skb,u32 hash,bool rx_wlan)847 static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
848 struct sk_buff *skb,
849 u32 hash, bool rx_wlan)
850 {
851 struct airoha_flow_table_entry *e;
852 struct airoha_foe_bridge br = {};
853 struct airoha_foe_entry *hwe;
854 bool commit_done = false;
855 struct hlist_node *n;
856 u32 index, state;
857
858 spin_lock_bh(&ppe_lock);
859
860 hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
861 if (!hwe)
862 goto unlock;
863
864 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
865 if (state == AIROHA_FOE_STATE_BIND)
866 goto unlock;
867
868 index = airoha_ppe_foe_get_entry_hash(ppe, hwe);
869 hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) {
870 if (e->type == FLOW_TYPE_L2_SUBFLOW) {
871 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
872 if (state != AIROHA_FOE_STATE_BIND) {
873 e->hash = 0xffff;
874 airoha_ppe_foe_remove_flow(ppe, e);
875 }
876 continue;
877 }
878
879 if (!airoha_ppe_foe_compare_entry(e, hwe))
880 continue;
881
882 airoha_ppe_foe_commit_entry(ppe, &e->data, hash, rx_wlan);
883 commit_done = true;
884 e->hash = hash;
885 }
886
887 if (commit_done)
888 goto unlock;
889
890 airoha_ppe_foe_set_bridge_addrs(&br, eth_hdr(skb));
891 e = rhashtable_lookup_fast(&ppe->l2_flows, &br,
892 airoha_l2_flow_table_params);
893 if (e)
894 airoha_ppe_foe_commit_subflow_entry(ppe, e, hash, rx_wlan);
895 unlock:
896 spin_unlock_bh(&ppe_lock);
897 }
898
899 static int
airoha_ppe_foe_l2_flow_commit_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)900 airoha_ppe_foe_l2_flow_commit_entry(struct airoha_ppe *ppe,
901 struct airoha_flow_table_entry *e)
902 {
903 struct airoha_flow_table_entry *prev;
904
905 e->type = FLOW_TYPE_L2;
906 prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &e->l2_node,
907 airoha_l2_flow_table_params);
908 if (!prev)
909 return 0;
910
911 if (IS_ERR(prev))
912 return PTR_ERR(prev);
913
914 return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
915 &e->l2_node,
916 airoha_l2_flow_table_params);
917 }
918
airoha_ppe_foe_flow_commit_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)919 static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe,
920 struct airoha_flow_table_entry *e)
921 {
922 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
923 u32 hash;
924
925 if (type == PPE_PKT_TYPE_BRIDGE)
926 return airoha_ppe_foe_l2_flow_commit_entry(ppe, e);
927
928 hash = airoha_ppe_foe_get_entry_hash(ppe, &e->data);
929 e->type = FLOW_TYPE_L4;
930 e->hash = 0xffff;
931
932 spin_lock_bh(&ppe_lock);
933 hlist_add_head(&e->list, &ppe->foe_flow[hash]);
934 spin_unlock_bh(&ppe_lock);
935
936 return 0;
937 }
938
airoha_ppe_get_entry_idle_time(struct airoha_ppe * ppe,u32 ib1)939 static int airoha_ppe_get_entry_idle_time(struct airoha_ppe *ppe, u32 ib1)
940 {
941 u32 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
942 u32 ts, ts_mask, now = airoha_ppe_get_timestamp(ppe);
943 int idle;
944
945 if (state == AIROHA_FOE_STATE_BIND) {
946 ts = FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, ib1);
947 ts_mask = AIROHA_FOE_IB1_BIND_TIMESTAMP;
948 } else {
949 ts = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, ib1);
950 now = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, now);
951 ts_mask = AIROHA_FOE_IB1_UNBIND_TIMESTAMP;
952 }
953 idle = now - ts;
954
955 return idle < 0 ? idle + ts_mask + 1 : idle;
956 }
957
958 static void
airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)959 airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe,
960 struct airoha_flow_table_entry *e)
961 {
962 int min_idle = airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
963 struct airoha_flow_table_entry *iter;
964 struct hlist_node *n;
965
966 lockdep_assert_held(&ppe_lock);
967
968 hlist_for_each_entry_safe(iter, n, &e->l2_flows, l2_subflow_node) {
969 struct airoha_foe_entry *hwe;
970 u32 ib1, state;
971 int idle;
972
973 hwe = airoha_ppe_foe_get_entry_locked(ppe, iter->hash);
974 if (!hwe)
975 continue;
976
977 ib1 = READ_ONCE(hwe->ib1);
978 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
979 if (state != AIROHA_FOE_STATE_BIND) {
980 iter->hash = 0xffff;
981 airoha_ppe_foe_remove_flow(ppe, iter);
982 continue;
983 }
984
985 idle = airoha_ppe_get_entry_idle_time(ppe, ib1);
986 if (idle >= min_idle)
987 continue;
988
989 min_idle = idle;
990 e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
991 e->data.ib1 |= ib1 & AIROHA_FOE_IB1_BIND_TIMESTAMP;
992 }
993 }
994
airoha_ppe_foe_flow_entry_update(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)995 static void airoha_ppe_foe_flow_entry_update(struct airoha_ppe *ppe,
996 struct airoha_flow_table_entry *e)
997 {
998 struct airoha_foe_entry *hwe_p, hwe = {};
999
1000 spin_lock_bh(&ppe_lock);
1001
1002 if (e->type == FLOW_TYPE_L2) {
1003 airoha_ppe_foe_flow_l2_entry_update(ppe, e);
1004 goto unlock;
1005 }
1006
1007 if (e->hash == 0xffff)
1008 goto unlock;
1009
1010 hwe_p = airoha_ppe_foe_get_entry_locked(ppe, e->hash);
1011 if (!hwe_p)
1012 goto unlock;
1013
1014 memcpy(&hwe, hwe_p, sizeof(*hwe_p));
1015 if (!airoha_ppe_foe_compare_entry(e, &hwe)) {
1016 e->hash = 0xffff;
1017 goto unlock;
1018 }
1019
1020 e->data.ib1 = hwe.ib1;
1021 unlock:
1022 spin_unlock_bh(&ppe_lock);
1023 }
1024
airoha_ppe_entry_idle_time(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)1025 static int airoha_ppe_entry_idle_time(struct airoha_ppe *ppe,
1026 struct airoha_flow_table_entry *e)
1027 {
1028 airoha_ppe_foe_flow_entry_update(ppe, e);
1029
1030 return airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
1031 }
1032
airoha_ppe_flow_offload_replace(struct airoha_eth * eth,struct flow_cls_offload * f)1033 static int airoha_ppe_flow_offload_replace(struct airoha_eth *eth,
1034 struct flow_cls_offload *f)
1035 {
1036 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1037 struct airoha_flow_table_entry *e;
1038 struct airoha_flow_data data = {};
1039 struct net_device *odev = NULL;
1040 struct flow_action_entry *act;
1041 struct airoha_foe_entry hwe;
1042 int err, i, offload_type;
1043 u16 addr_type = 0;
1044 u8 l4proto = 0;
1045
1046 if (rhashtable_lookup(ð->flow_table, &f->cookie,
1047 airoha_flow_table_params))
1048 return -EEXIST;
1049
1050 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
1051 return -EOPNOTSUPP;
1052
1053 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
1054 struct flow_match_control match;
1055
1056 flow_rule_match_control(rule, &match);
1057 addr_type = match.key->addr_type;
1058 if (flow_rule_has_control_flags(match.mask->flags,
1059 f->common.extack))
1060 return -EOPNOTSUPP;
1061 } else {
1062 return -EOPNOTSUPP;
1063 }
1064
1065 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
1066 struct flow_match_basic match;
1067
1068 flow_rule_match_basic(rule, &match);
1069 l4proto = match.key->ip_proto;
1070 } else {
1071 return -EOPNOTSUPP;
1072 }
1073
1074 switch (addr_type) {
1075 case 0:
1076 offload_type = PPE_PKT_TYPE_BRIDGE;
1077 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1078 struct flow_match_eth_addrs match;
1079
1080 flow_rule_match_eth_addrs(rule, &match);
1081 memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
1082 memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
1083 } else {
1084 return -EOPNOTSUPP;
1085 }
1086 break;
1087 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1088 offload_type = PPE_PKT_TYPE_IPV4_HNAPT;
1089 break;
1090 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1091 offload_type = PPE_PKT_TYPE_IPV6_ROUTE_5T;
1092 break;
1093 default:
1094 return -EOPNOTSUPP;
1095 }
1096
1097 flow_action_for_each(i, act, &rule->action) {
1098 switch (act->id) {
1099 case FLOW_ACTION_MANGLE:
1100 if (offload_type == PPE_PKT_TYPE_BRIDGE)
1101 return -EOPNOTSUPP;
1102
1103 if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
1104 airoha_ppe_flow_mangle_eth(act, &data.eth);
1105 break;
1106 case FLOW_ACTION_REDIRECT:
1107 odev = act->dev;
1108 break;
1109 case FLOW_ACTION_CSUM:
1110 break;
1111 case FLOW_ACTION_VLAN_PUSH:
1112 if (data.vlan.num == 2 ||
1113 act->vlan.proto != htons(ETH_P_8021Q))
1114 return -EOPNOTSUPP;
1115
1116 data.vlan.hdr[data.vlan.num].id = act->vlan.vid;
1117 data.vlan.hdr[data.vlan.num].proto = act->vlan.proto;
1118 data.vlan.num++;
1119 break;
1120 case FLOW_ACTION_VLAN_POP:
1121 break;
1122 case FLOW_ACTION_PPPOE_PUSH:
1123 if (data.pppoe.num == 1 || data.vlan.num == 2)
1124 return -EOPNOTSUPP;
1125
1126 data.pppoe.sid = act->pppoe.sid;
1127 data.pppoe.num++;
1128 break;
1129 default:
1130 return -EOPNOTSUPP;
1131 }
1132 }
1133
1134 if (!is_valid_ether_addr(data.eth.h_source) ||
1135 !is_valid_ether_addr(data.eth.h_dest))
1136 return -EINVAL;
1137
1138 err = airoha_ppe_foe_entry_prepare(eth, &hwe, odev, offload_type,
1139 &data, l4proto);
1140 if (err)
1141 return err;
1142
1143 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1144 struct flow_match_ports ports;
1145
1146 if (offload_type == PPE_PKT_TYPE_BRIDGE)
1147 return -EOPNOTSUPP;
1148
1149 flow_rule_match_ports(rule, &ports);
1150 data.src_port = ports.key->src;
1151 data.dst_port = ports.key->dst;
1152 } else if (offload_type != PPE_PKT_TYPE_BRIDGE) {
1153 return -EOPNOTSUPP;
1154 }
1155
1156 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1157 struct flow_match_ipv4_addrs addrs;
1158
1159 flow_rule_match_ipv4_addrs(rule, &addrs);
1160 data.v4.src_addr = addrs.key->src;
1161 data.v4.dst_addr = addrs.key->dst;
1162 airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, false);
1163 }
1164
1165 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1166 struct flow_match_ipv6_addrs addrs;
1167
1168 flow_rule_match_ipv6_addrs(rule, &addrs);
1169
1170 data.v6.src_addr = addrs.key->src;
1171 data.v6.dst_addr = addrs.key->dst;
1172 airoha_ppe_foe_entry_set_ipv6_tuple(&hwe, &data);
1173 }
1174
1175 flow_action_for_each(i, act, &rule->action) {
1176 if (act->id != FLOW_ACTION_MANGLE)
1177 continue;
1178
1179 if (offload_type == PPE_PKT_TYPE_BRIDGE)
1180 return -EOPNOTSUPP;
1181
1182 switch (act->mangle.htype) {
1183 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
1184 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
1185 err = airoha_ppe_flow_mangle_ports(act, &data);
1186 break;
1187 case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
1188 err = airoha_ppe_flow_mangle_ipv4(act, &data);
1189 break;
1190 case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
1191 /* handled earlier */
1192 break;
1193 default:
1194 return -EOPNOTSUPP;
1195 }
1196
1197 if (err)
1198 return err;
1199 }
1200
1201 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1202 err = airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, true);
1203 if (err)
1204 return err;
1205 }
1206
1207 e = kzalloc_obj(*e);
1208 if (!e)
1209 return -ENOMEM;
1210
1211 e->cookie = f->cookie;
1212 memcpy(&e->data, &hwe, sizeof(e->data));
1213
1214 err = airoha_ppe_foe_flow_commit_entry(eth->ppe, e);
1215 if (err)
1216 goto free_entry;
1217
1218 err = rhashtable_insert_fast(ð->flow_table, &e->node,
1219 airoha_flow_table_params);
1220 if (err < 0)
1221 goto remove_foe_entry;
1222
1223 return 0;
1224
1225 remove_foe_entry:
1226 airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
1227 free_entry:
1228 kfree(e);
1229
1230 return err;
1231 }
1232
airoha_ppe_flow_offload_destroy(struct airoha_eth * eth,struct flow_cls_offload * f)1233 static int airoha_ppe_flow_offload_destroy(struct airoha_eth *eth,
1234 struct flow_cls_offload *f)
1235 {
1236 struct airoha_flow_table_entry *e;
1237
1238 e = rhashtable_lookup(ð->flow_table, &f->cookie,
1239 airoha_flow_table_params);
1240 if (!e)
1241 return -ENOENT;
1242
1243 airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
1244 rhashtable_remove_fast(ð->flow_table, &e->node,
1245 airoha_flow_table_params);
1246 kfree(e);
1247
1248 return 0;
1249 }
1250
airoha_ppe_foe_entry_get_stats(struct airoha_ppe * ppe,u32 hash,struct airoha_foe_stats64 * stats)1251 void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
1252 struct airoha_foe_stats64 *stats)
1253 {
1254 struct airoha_eth *eth = ppe->eth;
1255 int ppe_num_stats_entries;
1256 struct airoha_npu *npu;
1257 u32 index;
1258
1259 ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
1260 if (ppe_num_stats_entries < 0)
1261 return;
1262
1263 if (airoha_ppe_foe_get_flow_stats_index(ppe, hash, &index))
1264 return;
1265
1266 if (index >= ppe_num_stats_entries)
1267 return;
1268
1269 rcu_read_lock();
1270
1271 npu = rcu_dereference(eth->npu);
1272 if (npu) {
1273 u64 packets = ppe->foe_stats[index].packets;
1274 u64 bytes = ppe->foe_stats[index].bytes;
1275 struct airoha_foe_stats npu_stats;
1276
1277 memcpy_fromio(&npu_stats, &npu->stats[index],
1278 sizeof(*npu->stats));
1279 stats->packets = packets << 32 | npu_stats.packets;
1280 stats->bytes = bytes << 32 | npu_stats.bytes;
1281 }
1282
1283 rcu_read_unlock();
1284 }
1285
airoha_ppe_flow_offload_stats(struct airoha_eth * eth,struct flow_cls_offload * f)1286 static int airoha_ppe_flow_offload_stats(struct airoha_eth *eth,
1287 struct flow_cls_offload *f)
1288 {
1289 struct airoha_flow_table_entry *e;
1290 u32 idle;
1291
1292 e = rhashtable_lookup(ð->flow_table, &f->cookie,
1293 airoha_flow_table_params);
1294 if (!e)
1295 return -ENOENT;
1296
1297 idle = airoha_ppe_entry_idle_time(eth->ppe, e);
1298 f->stats.lastused = jiffies - idle * HZ;
1299
1300 if (e->hash != 0xffff) {
1301 struct airoha_foe_stats64 stats = {};
1302
1303 airoha_ppe_foe_entry_get_stats(eth->ppe, e->hash, &stats);
1304 f->stats.pkts += (stats.packets - e->stats.packets);
1305 f->stats.bytes += (stats.bytes - e->stats.bytes);
1306 e->stats = stats;
1307 }
1308
1309 return 0;
1310 }
1311
airoha_ppe_flow_offload_cmd(struct airoha_eth * eth,struct flow_cls_offload * f)1312 static int airoha_ppe_flow_offload_cmd(struct airoha_eth *eth,
1313 struct flow_cls_offload *f)
1314 {
1315 switch (f->command) {
1316 case FLOW_CLS_REPLACE:
1317 return airoha_ppe_flow_offload_replace(eth, f);
1318 case FLOW_CLS_DESTROY:
1319 return airoha_ppe_flow_offload_destroy(eth, f);
1320 case FLOW_CLS_STATS:
1321 return airoha_ppe_flow_offload_stats(eth, f);
1322 default:
1323 break;
1324 }
1325
1326 return -EOPNOTSUPP;
1327 }
1328
airoha_ppe_flush_sram_entries(struct airoha_ppe * ppe)1329 static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe)
1330 {
1331 u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
1332 struct airoha_foe_entry *hwe = ppe->foe;
1333 int i, err = 0;
1334
1335 for (i = 0; i < sram_num_entries; i++) {
1336 int err;
1337
1338 memset(&hwe[i], 0, sizeof(*hwe));
1339 err = airoha_ppe_foe_commit_sram_entry(ppe, i);
1340 if (err)
1341 break;
1342 }
1343
1344 return err;
1345 }
1346
airoha_ppe_npu_get(struct airoha_eth * eth)1347 static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
1348 {
1349 struct airoha_npu *npu = airoha_npu_get(eth->dev);
1350
1351 if (IS_ERR(npu)) {
1352 request_module("airoha-npu");
1353 npu = airoha_npu_get(eth->dev);
1354 }
1355
1356 return npu;
1357 }
1358
airoha_ppe_wait_for_npu_init(struct airoha_eth * eth)1359 static int airoha_ppe_wait_for_npu_init(struct airoha_eth *eth)
1360 {
1361 int err;
1362 u32 val;
1363
1364 /* PPE_FLOW_CFG default register value is 0. Since we reset FE
1365 * during the device probe we can just check the configured value
1366 * is not 0 here.
1367 */
1368 err = read_poll_timeout(airoha_fe_rr, val, val, USEC_PER_MSEC,
1369 100 * USEC_PER_MSEC, false, eth,
1370 REG_PPE_PPE_FLOW_CFG(0));
1371 if (err)
1372 return err;
1373
1374 if (airoha_ppe_is_enabled(eth, 1))
1375 err = read_poll_timeout(airoha_fe_rr, val, val, USEC_PER_MSEC,
1376 100 * USEC_PER_MSEC, false, eth,
1377 REG_PPE_PPE_FLOW_CFG(1));
1378
1379 return err;
1380 }
1381
airoha_ppe_offload_setup(struct airoha_eth * eth)1382 static int airoha_ppe_offload_setup(struct airoha_eth *eth)
1383 {
1384 struct airoha_npu *npu = airoha_ppe_npu_get(eth);
1385 struct airoha_ppe *ppe = eth->ppe;
1386 int err, ppe_num_stats_entries;
1387
1388 if (IS_ERR(npu))
1389 return PTR_ERR(npu);
1390
1391 err = npu->ops.ppe_init(npu);
1392 if (err)
1393 goto error_npu_put;
1394
1395 /* Wait for NPU PPE configuration to complete */
1396 err = airoha_ppe_wait_for_npu_init(eth);
1397 if (err)
1398 goto error_npu_put;
1399
1400 ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
1401 if (ppe_num_stats_entries > 0) {
1402 err = npu->ops.ppe_init_stats(npu, ppe->foe_stats_dma,
1403 ppe_num_stats_entries);
1404 if (err)
1405 goto error_npu_put;
1406 }
1407
1408 airoha_ppe_hw_init(ppe);
1409 airoha_ppe_foe_flow_stats_reset(ppe, npu);
1410
1411 rcu_assign_pointer(eth->npu, npu);
1412 synchronize_rcu();
1413
1414 return 0;
1415
1416 error_npu_put:
1417 airoha_npu_put(npu);
1418
1419 return err;
1420 }
1421
airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev * dev,void * type_data)1422 int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev, void *type_data)
1423 {
1424 struct airoha_ppe *ppe = dev->priv;
1425 struct airoha_eth *eth = ppe->eth;
1426 int err = 0;
1427
1428 /* Netfilter flowtable can try to offload flower rules while not all
1429 * the net_devices are registered or initialized. Delay offloading
1430 * until all net_devices are registered in the system.
1431 */
1432 if (!test_bit(DEV_STATE_REGISTERED, ð->state))
1433 return -EBUSY;
1434
1435 mutex_lock(&flow_offload_mutex);
1436
1437 if (!eth->npu)
1438 err = airoha_ppe_offload_setup(eth);
1439 if (!err)
1440 err = airoha_ppe_flow_offload_cmd(eth, type_data);
1441
1442 mutex_unlock(&flow_offload_mutex);
1443
1444 return err;
1445 }
1446
airoha_ppe_check_skb(struct airoha_ppe_dev * dev,struct sk_buff * skb,u16 hash,bool rx_wlan)1447 void airoha_ppe_check_skb(struct airoha_ppe_dev *dev, struct sk_buff *skb,
1448 u16 hash, bool rx_wlan)
1449 {
1450 struct airoha_ppe *ppe = dev->priv;
1451 u32 ppe_hash_mask = airoha_ppe_get_total_num_entries(ppe) - 1;
1452 u16 now, diff;
1453
1454 if (hash > ppe_hash_mask)
1455 return;
1456
1457 now = (u16)jiffies;
1458 diff = now - ppe->foe_check_time[hash];
1459 if (diff < HZ / 10)
1460 return;
1461
1462 ppe->foe_check_time[hash] = now;
1463 airoha_ppe_foe_insert_entry(ppe, skb, hash, rx_wlan);
1464 }
1465
airoha_ppe_init_upd_mem(struct airoha_gdm_port * port)1466 void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port)
1467 {
1468 struct airoha_eth *eth = port->qdma->eth;
1469 struct net_device *dev = port->dev;
1470 const u8 *addr = dev->dev_addr;
1471 u32 val;
1472
1473 val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
1474 airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
1475 airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
1476 FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
1477 PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
1478
1479 val = (addr[0] << 8) | addr[1];
1480 airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
1481 airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
1482 FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
1483 FIELD_PREP(PPE_UPDMEM_OFFSET_MASK, 1) |
1484 PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
1485 }
1486
airoha_ppe_get_dev(struct device * dev)1487 struct airoha_ppe_dev *airoha_ppe_get_dev(struct device *dev)
1488 {
1489 struct platform_device *pdev;
1490 struct device_node *np;
1491 struct airoha_eth *eth;
1492
1493 np = of_parse_phandle(dev->of_node, "airoha,eth", 0);
1494 if (!np)
1495 return ERR_PTR(-ENODEV);
1496
1497 pdev = of_find_device_by_node(np);
1498 if (!pdev) {
1499 dev_err(dev, "cannot find device node %s\n", np->name);
1500 of_node_put(np);
1501 return ERR_PTR(-ENODEV);
1502 }
1503 of_node_put(np);
1504
1505 if (!try_module_get(THIS_MODULE)) {
1506 dev_err(dev, "failed to get the device driver module\n");
1507 goto error_pdev_put;
1508 }
1509
1510 eth = platform_get_drvdata(pdev);
1511 if (!eth)
1512 goto error_module_put;
1513
1514 if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER)) {
1515 dev_err(&pdev->dev,
1516 "failed to create device link to consumer %s\n",
1517 dev_name(dev));
1518 goto error_module_put;
1519 }
1520
1521 return ð->ppe->dev;
1522
1523 error_module_put:
1524 module_put(THIS_MODULE);
1525 error_pdev_put:
1526 platform_device_put(pdev);
1527
1528 return ERR_PTR(-ENODEV);
1529 }
1530 EXPORT_SYMBOL_GPL(airoha_ppe_get_dev);
1531
airoha_ppe_put_dev(struct airoha_ppe_dev * dev)1532 void airoha_ppe_put_dev(struct airoha_ppe_dev *dev)
1533 {
1534 struct airoha_ppe *ppe = dev->priv;
1535 struct airoha_eth *eth = ppe->eth;
1536
1537 module_put(THIS_MODULE);
1538 put_device(eth->dev);
1539 }
1540 EXPORT_SYMBOL_GPL(airoha_ppe_put_dev);
1541
airoha_ppe_init(struct airoha_eth * eth)1542 int airoha_ppe_init(struct airoha_eth *eth)
1543 {
1544 int foe_size, err, ppe_num_stats_entries;
1545 u32 ppe_num_entries;
1546 struct airoha_ppe *ppe;
1547
1548 ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL);
1549 if (!ppe)
1550 return -ENOMEM;
1551
1552 ppe->dev.ops.setup_tc_block_cb = airoha_ppe_setup_tc_block_cb;
1553 ppe->dev.ops.check_skb = airoha_ppe_check_skb;
1554 ppe->dev.priv = ppe;
1555 ppe->eth = eth;
1556 eth->ppe = ppe;
1557
1558 ppe_num_entries = airoha_ppe_get_total_num_entries(ppe);
1559 foe_size = ppe_num_entries * sizeof(struct airoha_foe_entry);
1560 ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma,
1561 GFP_KERNEL);
1562 if (!ppe->foe)
1563 return -ENOMEM;
1564
1565 ppe->foe_flow = devm_kzalloc(eth->dev,
1566 ppe_num_entries * sizeof(*ppe->foe_flow),
1567 GFP_KERNEL);
1568 if (!ppe->foe_flow)
1569 return -ENOMEM;
1570
1571 ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
1572 if (ppe_num_stats_entries > 0) {
1573 foe_size = ppe_num_stats_entries * sizeof(*ppe->foe_stats);
1574 ppe->foe_stats = dmam_alloc_coherent(eth->dev, foe_size,
1575 &ppe->foe_stats_dma,
1576 GFP_KERNEL);
1577 if (!ppe->foe_stats)
1578 return -ENOMEM;
1579 }
1580
1581 ppe->foe_check_time = devm_kzalloc(eth->dev, ppe_num_entries,
1582 GFP_KERNEL);
1583 if (!ppe->foe_check_time)
1584 return -ENOMEM;
1585
1586 err = airoha_ppe_flush_sram_entries(ppe);
1587 if (err)
1588 return err;
1589
1590 err = rhashtable_init(ð->flow_table, &airoha_flow_table_params);
1591 if (err)
1592 return err;
1593
1594 err = rhashtable_init(&ppe->l2_flows, &airoha_l2_flow_table_params);
1595 if (err)
1596 goto error_flow_table_destroy;
1597
1598 err = airoha_ppe_debugfs_init(ppe);
1599 if (err)
1600 goto error_l2_flow_table_destroy;
1601
1602 return 0;
1603
1604 error_l2_flow_table_destroy:
1605 rhashtable_destroy(&ppe->l2_flows);
1606 error_flow_table_destroy:
1607 rhashtable_destroy(ð->flow_table);
1608
1609 return err;
1610 }
1611
airoha_ppe_deinit(struct airoha_eth * eth)1612 void airoha_ppe_deinit(struct airoha_eth *eth)
1613 {
1614 struct airoha_npu *npu;
1615
1616 mutex_lock(&flow_offload_mutex);
1617
1618 npu = rcu_replace_pointer(eth->npu, NULL,
1619 lockdep_is_held(&flow_offload_mutex));
1620 if (npu) {
1621 npu->ops.ppe_deinit(npu);
1622 airoha_npu_put(npu);
1623 }
1624
1625 mutex_unlock(&flow_offload_mutex);
1626
1627 rhashtable_destroy(ð->ppe->l2_flows);
1628 rhashtable_destroy(ð->flow_table);
1629 debugfs_remove(eth->ppe->debugfs_dir);
1630 }
1631