xref: /linux/drivers/net/ethernet/airoha/airoha_ppe.c (revision fb7399cf2d0b33825b8039f95c45395c7deba25c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2025 AIROHA Inc
4  * Author: Lorenzo Bianconi <lorenzo@kernel.org>
5  */
6 
7 #include <linux/ip.h>
8 #include <linux/ipv6.h>
9 #include <linux/rhashtable.h>
10 #include <net/ipv6.h>
11 #include <net/pkt_cls.h>
12 
13 #include "airoha_npu.h"
14 #include "airoha_regs.h"
15 #include "airoha_eth.h"
16 
17 static DEFINE_MUTEX(flow_offload_mutex);
18 static DEFINE_SPINLOCK(ppe_lock);
19 
20 static const struct rhashtable_params airoha_flow_table_params = {
21 	.head_offset = offsetof(struct airoha_flow_table_entry, node),
22 	.key_offset = offsetof(struct airoha_flow_table_entry, cookie),
23 	.key_len = sizeof(unsigned long),
24 	.automatic_shrinking = true,
25 };
26 
27 static const struct rhashtable_params airoha_l2_flow_table_params = {
28 	.head_offset = offsetof(struct airoha_flow_table_entry, l2_node),
29 	.key_offset = offsetof(struct airoha_flow_table_entry, data.bridge),
30 	.key_len = 2 * ETH_ALEN,
31 	.automatic_shrinking = true,
32 };
33 
34 static bool airoha_ppe2_is_enabled(struct airoha_eth *eth)
35 {
36 	return airoha_fe_rr(eth, REG_PPE_GLO_CFG(1)) & PPE_GLO_CFG_EN_MASK;
37 }
38 
39 static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe)
40 {
41 	u16 timestamp = airoha_fe_rr(ppe->eth, REG_FE_FOE_TS);
42 
43 	return FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, timestamp);
44 }
45 
46 static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
47 {
48 	u32 sram_tb_size, sram_num_entries, dram_num_entries;
49 	struct airoha_eth *eth = ppe->eth;
50 	int i;
51 
52 	sram_tb_size = PPE_SRAM_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
53 	dram_num_entries = PPE_RAM_NUM_ENTRIES_SHIFT(PPE_DRAM_NUM_ENTRIES);
54 
55 	for (i = 0; i < PPE_NUM; i++) {
56 		int p;
57 
58 		airoha_fe_wr(eth, REG_PPE_TB_BASE(i),
59 			     ppe->foe_dma + sram_tb_size);
60 
61 		airoha_fe_rmw(eth, REG_PPE_BND_AGE0(i),
62 			      PPE_BIND_AGE0_DELTA_NON_L4 |
63 			      PPE_BIND_AGE0_DELTA_UDP,
64 			      FIELD_PREP(PPE_BIND_AGE0_DELTA_NON_L4, 1) |
65 			      FIELD_PREP(PPE_BIND_AGE0_DELTA_UDP, 12));
66 		airoha_fe_rmw(eth, REG_PPE_BND_AGE1(i),
67 			      PPE_BIND_AGE1_DELTA_TCP_FIN |
68 			      PPE_BIND_AGE1_DELTA_TCP,
69 			      FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
70 			      FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP, 7));
71 
72 		airoha_fe_rmw(eth, REG_PPE_TB_HASH_CFG(i),
73 			      PPE_SRAM_TABLE_EN_MASK |
74 			      PPE_SRAM_HASH1_EN_MASK |
75 			      PPE_DRAM_TABLE_EN_MASK |
76 			      PPE_SRAM_HASH0_MODE_MASK |
77 			      PPE_SRAM_HASH1_MODE_MASK |
78 			      PPE_DRAM_HASH0_MODE_MASK |
79 			      PPE_DRAM_HASH1_MODE_MASK,
80 			      FIELD_PREP(PPE_SRAM_TABLE_EN_MASK, 1) |
81 			      FIELD_PREP(PPE_SRAM_HASH1_EN_MASK, 1) |
82 			      FIELD_PREP(PPE_SRAM_HASH1_MODE_MASK, 1) |
83 			      FIELD_PREP(PPE_DRAM_HASH1_MODE_MASK, 3));
84 
85 		airoha_fe_rmw(eth, REG_PPE_TB_CFG(i),
86 			      PPE_TB_CFG_SEARCH_MISS_MASK |
87 			      PPE_TB_CFG_KEEPALIVE_MASK |
88 			      PPE_TB_ENTRY_SIZE_MASK,
89 			      FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) |
90 			      FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0));
91 
92 		airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED);
93 
94 		for (p = 0; p < ARRAY_SIZE(eth->ports); p++)
95 			airoha_fe_rmw(eth, REG_PPE_MTU(i, p),
96 				      FP0_EGRESS_MTU_MASK |
97 				      FP1_EGRESS_MTU_MASK,
98 				      FIELD_PREP(FP0_EGRESS_MTU_MASK,
99 						 AIROHA_MAX_MTU) |
100 				      FIELD_PREP(FP1_EGRESS_MTU_MASK,
101 						 AIROHA_MAX_MTU));
102 	}
103 
104 	if (airoha_ppe2_is_enabled(eth)) {
105 		sram_num_entries =
106 			PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_DATA_ENTRIES);
107 		airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
108 			      PPE_SRAM_TB_NUM_ENTRY_MASK |
109 			      PPE_DRAM_TB_NUM_ENTRY_MASK,
110 			      FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
111 					 sram_num_entries) |
112 			      FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
113 					 dram_num_entries));
114 		airoha_fe_rmw(eth, REG_PPE_TB_CFG(1),
115 			      PPE_SRAM_TB_NUM_ENTRY_MASK |
116 			      PPE_DRAM_TB_NUM_ENTRY_MASK,
117 			      FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
118 					 sram_num_entries) |
119 			      FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
120 					 dram_num_entries));
121 	} else {
122 		sram_num_entries =
123 			PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_DATA_ENTRIES);
124 		airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
125 			      PPE_SRAM_TB_NUM_ENTRY_MASK |
126 			      PPE_DRAM_TB_NUM_ENTRY_MASK,
127 			      FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
128 					 sram_num_entries) |
129 			      FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
130 					 dram_num_entries));
131 	}
132 }
133 
134 static void airoha_ppe_flow_mangle_eth(const struct flow_action_entry *act, void *eth)
135 {
136 	void *dest = eth + act->mangle.offset;
137 	const void *src = &act->mangle.val;
138 
139 	if (act->mangle.offset > 8)
140 		return;
141 
142 	if (act->mangle.mask == 0xffff) {
143 		src += 2;
144 		dest += 2;
145 	}
146 
147 	memcpy(dest, src, act->mangle.mask ? 2 : 4);
148 }
149 
150 static int airoha_ppe_flow_mangle_ports(const struct flow_action_entry *act,
151 					struct airoha_flow_data *data)
152 {
153 	u32 val = be32_to_cpu((__force __be32)act->mangle.val);
154 
155 	switch (act->mangle.offset) {
156 	case 0:
157 		if ((__force __be32)act->mangle.mask == ~cpu_to_be32(0xffff))
158 			data->dst_port = cpu_to_be16(val);
159 		else
160 			data->src_port = cpu_to_be16(val >> 16);
161 		break;
162 	case 2:
163 		data->dst_port = cpu_to_be16(val);
164 		break;
165 	default:
166 		return -EINVAL;
167 	}
168 
169 	return 0;
170 }
171 
172 static int airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry *act,
173 				       struct airoha_flow_data *data)
174 {
175 	__be32 *dest;
176 
177 	switch (act->mangle.offset) {
178 	case offsetof(struct iphdr, saddr):
179 		dest = &data->v4.src_addr;
180 		break;
181 	case offsetof(struct iphdr, daddr):
182 		dest = &data->v4.dst_addr;
183 		break;
184 	default:
185 		return -EINVAL;
186 	}
187 
188 	memcpy(dest, &act->mangle.val, sizeof(u32));
189 
190 	return 0;
191 }
192 
193 static int airoha_get_dsa_port(struct net_device **dev)
194 {
195 #if IS_ENABLED(CONFIG_NET_DSA)
196 	struct dsa_port *dp = dsa_port_from_netdev(*dev);
197 
198 	if (IS_ERR(dp))
199 		return -ENODEV;
200 
201 	*dev = dsa_port_to_conduit(dp);
202 	return dp->index;
203 #else
204 	return -ENODEV;
205 #endif
206 }
207 
208 static void airoha_ppe_foe_set_bridge_addrs(struct airoha_foe_bridge *br,
209 					    struct ethhdr *eh)
210 {
211 	br->dest_mac_hi = get_unaligned_be32(eh->h_dest);
212 	br->dest_mac_lo = get_unaligned_be16(eh->h_dest + 4);
213 	br->src_mac_hi = get_unaligned_be16(eh->h_source);
214 	br->src_mac_lo = get_unaligned_be32(eh->h_source + 2);
215 }
216 
217 static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
218 					struct airoha_foe_entry *hwe,
219 					struct net_device *dev, int type,
220 					struct airoha_flow_data *data,
221 					int l4proto)
222 {
223 	int dsa_port = airoha_get_dsa_port(&dev);
224 	struct airoha_foe_mac_info_common *l2;
225 	u32 qdata, ports_pad, val;
226 	u8 smac_id = 0xf;
227 
228 	memset(hwe, 0, sizeof(*hwe));
229 
230 	val = FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE, AIROHA_FOE_STATE_BIND) |
231 	      FIELD_PREP(AIROHA_FOE_IB1_BIND_PACKET_TYPE, type) |
232 	      FIELD_PREP(AIROHA_FOE_IB1_BIND_UDP, l4proto == IPPROTO_UDP) |
233 	      FIELD_PREP(AIROHA_FOE_IB1_BIND_VLAN_LAYER, data->vlan.num) |
234 	      FIELD_PREP(AIROHA_FOE_IB1_BIND_VPM, data->vlan.num) |
235 	      AIROHA_FOE_IB1_BIND_TTL;
236 	hwe->ib1 = val;
237 
238 	val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f) |
239 	      AIROHA_FOE_IB2_PSE_QOS;
240 	if (dsa_port >= 0)
241 		val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, dsa_port);
242 
243 	if (dev) {
244 		struct airoha_gdm_port *port = netdev_priv(dev);
245 		u8 pse_port;
246 
247 		if (!airoha_is_valid_gdm_port(eth, port))
248 			return -EINVAL;
249 
250 		if (dsa_port >= 0)
251 			pse_port = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
252 		else
253 			pse_port = 2; /* uplink relies on GDM2 loopback */
254 		val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port);
255 
256 		/* For downlink traffic consume SRAM memory for hw forwarding
257 		 * descriptors queue.
258 		 */
259 		if (airhoa_is_lan_gdm_port(port))
260 			val |= AIROHA_FOE_IB2_FAST_PATH;
261 
262 		smac_id = port->id;
263 	}
264 
265 	if (is_multicast_ether_addr(data->eth.h_dest))
266 		val |= AIROHA_FOE_IB2_MULTICAST;
267 
268 	ports_pad = 0xa5a5a500 | (l4proto & 0xff);
269 	if (type == PPE_PKT_TYPE_IPV4_ROUTE)
270 		hwe->ipv4.orig_tuple.ports = ports_pad;
271 	if (type == PPE_PKT_TYPE_IPV6_ROUTE_3T)
272 		hwe->ipv6.ports = ports_pad;
273 
274 	qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f);
275 	if (type == PPE_PKT_TYPE_BRIDGE) {
276 		airoha_ppe_foe_set_bridge_addrs(&hwe->bridge, &data->eth);
277 		hwe->bridge.data = qdata;
278 		hwe->bridge.ib2 = val;
279 		l2 = &hwe->bridge.l2.common;
280 	} else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
281 		hwe->ipv6.data = qdata;
282 		hwe->ipv6.ib2 = val;
283 		l2 = &hwe->ipv6.l2;
284 	} else {
285 		hwe->ipv4.data = qdata;
286 		hwe->ipv4.ib2 = val;
287 		l2 = &hwe->ipv4.l2.common;
288 	}
289 
290 	l2->dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
291 	l2->dest_mac_lo = get_unaligned_be16(data->eth.h_dest + 4);
292 	if (type <= PPE_PKT_TYPE_IPV4_DSLITE) {
293 		l2->src_mac_hi = get_unaligned_be32(data->eth.h_source);
294 		hwe->ipv4.l2.src_mac_lo =
295 			get_unaligned_be16(data->eth.h_source + 4);
296 	} else {
297 		l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id);
298 	}
299 
300 	if (data->vlan.num) {
301 		l2->etype = dsa_port >= 0 ? BIT(dsa_port) : 0;
302 		l2->vlan1 = data->vlan.hdr[0].id;
303 		if (data->vlan.num == 2)
304 			l2->vlan2 = data->vlan.hdr[1].id;
305 	} else if (dsa_port >= 0) {
306 		l2->etype = BIT(15) | BIT(dsa_port);
307 	} else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
308 		l2->etype = ETH_P_IPV6;
309 	} else {
310 		l2->etype = ETH_P_IP;
311 	}
312 
313 	return 0;
314 }
315 
316 static int airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry *hwe,
317 					       struct airoha_flow_data *data,
318 					       bool egress)
319 {
320 	int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
321 	struct airoha_foe_ipv4_tuple *t;
322 
323 	switch (type) {
324 	case PPE_PKT_TYPE_IPV4_HNAPT:
325 		if (egress) {
326 			t = &hwe->ipv4.new_tuple;
327 			break;
328 		}
329 		fallthrough;
330 	case PPE_PKT_TYPE_IPV4_DSLITE:
331 	case PPE_PKT_TYPE_IPV4_ROUTE:
332 		t = &hwe->ipv4.orig_tuple;
333 		break;
334 	default:
335 		WARN_ON_ONCE(1);
336 		return -EINVAL;
337 	}
338 
339 	t->src_ip = be32_to_cpu(data->v4.src_addr);
340 	t->dest_ip = be32_to_cpu(data->v4.dst_addr);
341 
342 	if (type != PPE_PKT_TYPE_IPV4_ROUTE) {
343 		t->src_port = be16_to_cpu(data->src_port);
344 		t->dest_port = be16_to_cpu(data->dst_port);
345 	}
346 
347 	return 0;
348 }
349 
350 static int airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry *hwe,
351 					       struct airoha_flow_data *data)
352 
353 {
354 	int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
355 	u32 *src, *dest;
356 
357 	switch (type) {
358 	case PPE_PKT_TYPE_IPV6_ROUTE_5T:
359 	case PPE_PKT_TYPE_IPV6_6RD:
360 		hwe->ipv6.src_port = be16_to_cpu(data->src_port);
361 		hwe->ipv6.dest_port = be16_to_cpu(data->dst_port);
362 		fallthrough;
363 	case PPE_PKT_TYPE_IPV6_ROUTE_3T:
364 		src = hwe->ipv6.src_ip;
365 		dest = hwe->ipv6.dest_ip;
366 		break;
367 	default:
368 		WARN_ON_ONCE(1);
369 		return -EINVAL;
370 	}
371 
372 	ipv6_addr_be32_to_cpu(src, data->v6.src_addr.s6_addr32);
373 	ipv6_addr_be32_to_cpu(dest, data->v6.dst_addr.s6_addr32);
374 
375 	return 0;
376 }
377 
378 static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe)
379 {
380 	int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
381 	u32 hash, hv1, hv2, hv3;
382 
383 	switch (type) {
384 	case PPE_PKT_TYPE_IPV4_ROUTE:
385 	case PPE_PKT_TYPE_IPV4_HNAPT:
386 		hv1 = hwe->ipv4.orig_tuple.ports;
387 		hv2 = hwe->ipv4.orig_tuple.dest_ip;
388 		hv3 = hwe->ipv4.orig_tuple.src_ip;
389 		break;
390 	case PPE_PKT_TYPE_IPV6_ROUTE_3T:
391 	case PPE_PKT_TYPE_IPV6_ROUTE_5T:
392 		hv1 = hwe->ipv6.src_ip[3] ^ hwe->ipv6.dest_ip[3];
393 		hv1 ^= hwe->ipv6.ports;
394 
395 		hv2 = hwe->ipv6.src_ip[2] ^ hwe->ipv6.dest_ip[2];
396 		hv2 ^= hwe->ipv6.dest_ip[0];
397 
398 		hv3 = hwe->ipv6.src_ip[1] ^ hwe->ipv6.dest_ip[1];
399 		hv3 ^= hwe->ipv6.src_ip[0];
400 		break;
401 	case PPE_PKT_TYPE_BRIDGE: {
402 		struct airoha_foe_mac_info *l2 = &hwe->bridge.l2;
403 
404 		hv1 = l2->common.src_mac_hi & 0xffff;
405 		hv1 = hv1 << 16 | l2->src_mac_lo;
406 
407 		hv2 = l2->common.dest_mac_lo;
408 		hv2 = hv2 << 16;
409 		hv2 = hv2 | ((l2->common.src_mac_hi & 0xffff0000) >> 16);
410 
411 		hv3 = l2->common.dest_mac_hi;
412 		break;
413 	}
414 	case PPE_PKT_TYPE_IPV4_DSLITE:
415 	case PPE_PKT_TYPE_IPV6_6RD:
416 	default:
417 		WARN_ON_ONCE(1);
418 		return PPE_HASH_MASK;
419 	}
420 
421 	hash = (hv1 & hv2) | ((~hv1) & hv3);
422 	hash = (hash >> 24) | ((hash & 0xffffff) << 8);
423 	hash ^= hv1 ^ hv2 ^ hv3;
424 	hash ^= hash >> 16;
425 	hash &= PPE_NUM_ENTRIES - 1;
426 
427 	return hash;
428 }
429 
430 static u32 airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe, u32 hash)
431 {
432 	if (!airoha_ppe2_is_enabled(ppe->eth))
433 		return hash;
434 
435 	return hash >= PPE_STATS_NUM_ENTRIES ? hash - PPE1_STATS_NUM_ENTRIES
436 					     : hash;
437 }
438 
439 static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe,
440 						 struct airoha_npu *npu,
441 						 int index)
442 {
443 	memset_io(&npu->stats[index], 0, sizeof(*npu->stats));
444 	memset(&ppe->foe_stats[index], 0, sizeof(*ppe->foe_stats));
445 }
446 
447 static void airoha_ppe_foe_flow_stats_reset(struct airoha_ppe *ppe,
448 					    struct airoha_npu *npu)
449 {
450 	int i;
451 
452 	for (i = 0; i < PPE_STATS_NUM_ENTRIES; i++)
453 		airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, i);
454 }
455 
456 static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe,
457 					     struct airoha_npu *npu,
458 					     struct airoha_foe_entry *hwe,
459 					     u32 hash)
460 {
461 	int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
462 	u32 index, pse_port, val, *data, *ib2, *meter;
463 	u8 nbq;
464 
465 	index = airoha_ppe_foe_get_flow_stats_index(ppe, hash);
466 	if (index >= PPE_STATS_NUM_ENTRIES)
467 		return;
468 
469 	if (type == PPE_PKT_TYPE_BRIDGE) {
470 		data = &hwe->bridge.data;
471 		ib2 = &hwe->bridge.ib2;
472 		meter = &hwe->bridge.l2.meter;
473 	} else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
474 		data = &hwe->ipv6.data;
475 		ib2 = &hwe->ipv6.ib2;
476 		meter = &hwe->ipv6.meter;
477 	} else {
478 		data = &hwe->ipv4.data;
479 		ib2 = &hwe->ipv4.ib2;
480 		meter = &hwe->ipv4.l2.meter;
481 	}
482 
483 	airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, index);
484 
485 	val = FIELD_GET(AIROHA_FOE_CHANNEL | AIROHA_FOE_QID, *data);
486 	*data = (*data & ~AIROHA_FOE_ACTDP) |
487 		FIELD_PREP(AIROHA_FOE_ACTDP, val);
488 
489 	val = *ib2 & (AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
490 		      AIROHA_FOE_IB2_PSE_QOS | AIROHA_FOE_IB2_FAST_PATH);
491 	*meter |= FIELD_PREP(AIROHA_FOE_TUNNEL_MTU, val);
492 
493 	pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2);
494 	nbq = pse_port == 1 ? 6 : 5;
495 	*ib2 &= ~(AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
496 		  AIROHA_FOE_IB2_PSE_QOS);
497 	*ib2 |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, 6) |
498 		FIELD_PREP(AIROHA_FOE_IB2_NBQ, nbq);
499 }
500 
501 struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
502 						  u32 hash)
503 {
504 	if (hash < PPE_SRAM_NUM_ENTRIES) {
505 		u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry);
506 		struct airoha_eth *eth = ppe->eth;
507 		bool ppe2;
508 		u32 val;
509 		int i;
510 
511 		ppe2 = airoha_ppe2_is_enabled(ppe->eth) &&
512 		       hash >= PPE1_SRAM_NUM_ENTRIES;
513 		airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2),
514 			     FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) |
515 			     PPE_SRAM_CTRL_REQ_MASK);
516 		if (read_poll_timeout_atomic(airoha_fe_rr, val,
517 					     val & PPE_SRAM_CTRL_ACK_MASK,
518 					     10, 100, false, eth,
519 					     REG_PPE_RAM_CTRL(ppe2)))
520 			return NULL;
521 
522 		for (i = 0; i < sizeof(struct airoha_foe_entry) / 4; i++)
523 			hwe[i] = airoha_fe_rr(eth,
524 					      REG_PPE_RAM_ENTRY(ppe2, i));
525 	}
526 
527 	return ppe->foe + hash * sizeof(struct airoha_foe_entry);
528 }
529 
530 static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e,
531 					 struct airoha_foe_entry *hwe)
532 {
533 	int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
534 	int len;
535 
536 	if ((hwe->ib1 ^ e->data.ib1) & AIROHA_FOE_IB1_BIND_UDP)
537 		return false;
538 
539 	if (type > PPE_PKT_TYPE_IPV4_DSLITE)
540 		len = offsetof(struct airoha_foe_entry, ipv6.data);
541 	else
542 		len = offsetof(struct airoha_foe_entry, ipv4.ib2);
543 
544 	return !memcmp(&e->data.d, &hwe->d, len - sizeof(hwe->ib1));
545 }
546 
547 static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
548 				       struct airoha_foe_entry *e,
549 				       u32 hash)
550 {
551 	struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
552 	u32 ts = airoha_ppe_get_timestamp(ppe);
553 	struct airoha_eth *eth = ppe->eth;
554 	struct airoha_npu *npu;
555 	int err = 0;
556 
557 	memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1));
558 	wmb();
559 
560 	e->ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
561 	e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts);
562 	hwe->ib1 = e->ib1;
563 
564 	rcu_read_lock();
565 
566 	npu = rcu_dereference(eth->npu);
567 	if (!npu) {
568 		err = -ENODEV;
569 		goto unlock;
570 	}
571 
572 	airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash);
573 
574 	if (hash < PPE_SRAM_NUM_ENTRIES) {
575 		dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe);
576 		bool ppe2 = airoha_ppe2_is_enabled(eth) &&
577 			    hash >= PPE1_SRAM_NUM_ENTRIES;
578 
579 		err = npu->ops.ppe_foe_commit_entry(npu, addr, sizeof(*hwe),
580 						    hash, ppe2);
581 	}
582 unlock:
583 	rcu_read_unlock();
584 
585 	return err;
586 }
587 
588 static void airoha_ppe_foe_remove_flow(struct airoha_ppe *ppe,
589 				       struct airoha_flow_table_entry *e)
590 {
591 	lockdep_assert_held(&ppe_lock);
592 
593 	hlist_del_init(&e->list);
594 	if (e->hash != 0xffff) {
595 		e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
596 		e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
597 					  AIROHA_FOE_STATE_INVALID);
598 		airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash);
599 		e->hash = 0xffff;
600 	}
601 	if (e->type == FLOW_TYPE_L2_SUBFLOW) {
602 		hlist_del_init(&e->l2_subflow_node);
603 		kfree(e);
604 	}
605 }
606 
607 static void airoha_ppe_foe_remove_l2_flow(struct airoha_ppe *ppe,
608 					  struct airoha_flow_table_entry *e)
609 {
610 	struct hlist_head *head = &e->l2_flows;
611 	struct hlist_node *n;
612 
613 	lockdep_assert_held(&ppe_lock);
614 
615 	rhashtable_remove_fast(&ppe->l2_flows, &e->l2_node,
616 			       airoha_l2_flow_table_params);
617 	hlist_for_each_entry_safe(e, n, head, l2_subflow_node)
618 		airoha_ppe_foe_remove_flow(ppe, e);
619 }
620 
621 static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
622 					     struct airoha_flow_table_entry *e)
623 {
624 	spin_lock_bh(&ppe_lock);
625 
626 	if (e->type == FLOW_TYPE_L2)
627 		airoha_ppe_foe_remove_l2_flow(ppe, e);
628 	else
629 		airoha_ppe_foe_remove_flow(ppe, e);
630 
631 	spin_unlock_bh(&ppe_lock);
632 }
633 
634 static int
635 airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
636 				    struct airoha_flow_table_entry *e,
637 				    u32 hash)
638 {
639 	u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP;
640 	struct airoha_foe_entry *hwe_p, hwe;
641 	struct airoha_flow_table_entry *f;
642 	int type;
643 
644 	hwe_p = airoha_ppe_foe_get_entry(ppe, hash);
645 	if (!hwe_p)
646 		return -EINVAL;
647 
648 	f = kzalloc(sizeof(*f), GFP_ATOMIC);
649 	if (!f)
650 		return -ENOMEM;
651 
652 	hlist_add_head(&f->l2_subflow_node, &e->l2_flows);
653 	f->type = FLOW_TYPE_L2_SUBFLOW;
654 	f->hash = hash;
655 
656 	memcpy(&hwe, hwe_p, sizeof(*hwe_p));
657 	hwe.ib1 = (hwe.ib1 & mask) | (e->data.ib1 & ~mask);
658 
659 	type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe.ib1);
660 	if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
661 		memcpy(&hwe.ipv6.l2, &e->data.bridge.l2, sizeof(hwe.ipv6.l2));
662 		hwe.ipv6.ib2 = e->data.bridge.ib2;
663 		/* setting smac_id to 0xf instruct the hw to keep original
664 		 * source mac address
665 		 */
666 		hwe.ipv6.l2.src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID,
667 						    0xf);
668 	} else {
669 		memcpy(&hwe.bridge.l2, &e->data.bridge.l2,
670 		       sizeof(hwe.bridge.l2));
671 		hwe.bridge.ib2 = e->data.bridge.ib2;
672 		if (type == PPE_PKT_TYPE_IPV4_HNAPT)
673 			memcpy(&hwe.ipv4.new_tuple, &hwe.ipv4.orig_tuple,
674 			       sizeof(hwe.ipv4.new_tuple));
675 	}
676 
677 	hwe.bridge.data = e->data.bridge.data;
678 	airoha_ppe_foe_commit_entry(ppe, &hwe, hash);
679 
680 	return 0;
681 }
682 
683 static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
684 					struct sk_buff *skb,
685 					u32 hash)
686 {
687 	struct airoha_flow_table_entry *e;
688 	struct airoha_foe_bridge br = {};
689 	struct airoha_foe_entry *hwe;
690 	bool commit_done = false;
691 	struct hlist_node *n;
692 	u32 index, state;
693 
694 	spin_lock_bh(&ppe_lock);
695 
696 	hwe = airoha_ppe_foe_get_entry(ppe, hash);
697 	if (!hwe)
698 		goto unlock;
699 
700 	state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
701 	if (state == AIROHA_FOE_STATE_BIND)
702 		goto unlock;
703 
704 	index = airoha_ppe_foe_get_entry_hash(hwe);
705 	hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) {
706 		if (e->type == FLOW_TYPE_L2_SUBFLOW) {
707 			state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
708 			if (state != AIROHA_FOE_STATE_BIND) {
709 				e->hash = 0xffff;
710 				airoha_ppe_foe_remove_flow(ppe, e);
711 			}
712 			continue;
713 		}
714 
715 		if (commit_done || !airoha_ppe_foe_compare_entry(e, hwe)) {
716 			e->hash = 0xffff;
717 			continue;
718 		}
719 
720 		airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
721 		commit_done = true;
722 		e->hash = hash;
723 	}
724 
725 	if (commit_done)
726 		goto unlock;
727 
728 	airoha_ppe_foe_set_bridge_addrs(&br, eth_hdr(skb));
729 	e = rhashtable_lookup_fast(&ppe->l2_flows, &br,
730 				   airoha_l2_flow_table_params);
731 	if (e)
732 		airoha_ppe_foe_commit_subflow_entry(ppe, e, hash);
733 unlock:
734 	spin_unlock_bh(&ppe_lock);
735 }
736 
737 static int
738 airoha_ppe_foe_l2_flow_commit_entry(struct airoha_ppe *ppe,
739 				    struct airoha_flow_table_entry *e)
740 {
741 	struct airoha_flow_table_entry *prev;
742 
743 	e->type = FLOW_TYPE_L2;
744 	prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &e->l2_node,
745 						 airoha_l2_flow_table_params);
746 	if (!prev)
747 		return 0;
748 
749 	if (IS_ERR(prev))
750 		return PTR_ERR(prev);
751 
752 	return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
753 				       &e->l2_node,
754 				       airoha_l2_flow_table_params);
755 }
756 
757 static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe,
758 					    struct airoha_flow_table_entry *e)
759 {
760 	int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
761 	u32 hash;
762 
763 	if (type == PPE_PKT_TYPE_BRIDGE)
764 		return airoha_ppe_foe_l2_flow_commit_entry(ppe, e);
765 
766 	hash = airoha_ppe_foe_get_entry_hash(&e->data);
767 	e->type = FLOW_TYPE_L4;
768 	e->hash = 0xffff;
769 
770 	spin_lock_bh(&ppe_lock);
771 	hlist_add_head(&e->list, &ppe->foe_flow[hash]);
772 	spin_unlock_bh(&ppe_lock);
773 
774 	return 0;
775 }
776 
777 static int airoha_ppe_get_entry_idle_time(struct airoha_ppe *ppe, u32 ib1)
778 {
779 	u32 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
780 	u32 ts, ts_mask, now = airoha_ppe_get_timestamp(ppe);
781 	int idle;
782 
783 	if (state == AIROHA_FOE_STATE_BIND) {
784 		ts = FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, ib1);
785 		ts_mask = AIROHA_FOE_IB1_BIND_TIMESTAMP;
786 	} else {
787 		ts = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, ib1);
788 		now = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, now);
789 		ts_mask = AIROHA_FOE_IB1_UNBIND_TIMESTAMP;
790 	}
791 	idle = now - ts;
792 
793 	return idle < 0 ? idle + ts_mask + 1 : idle;
794 }
795 
796 static void
797 airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe,
798 				    struct airoha_flow_table_entry *e)
799 {
800 	int min_idle = airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
801 	struct airoha_flow_table_entry *iter;
802 	struct hlist_node *n;
803 
804 	lockdep_assert_held(&ppe_lock);
805 
806 	hlist_for_each_entry_safe(iter, n, &e->l2_flows, l2_subflow_node) {
807 		struct airoha_foe_entry *hwe;
808 		u32 ib1, state;
809 		int idle;
810 
811 		hwe = airoha_ppe_foe_get_entry(ppe, iter->hash);
812 		ib1 = READ_ONCE(hwe->ib1);
813 
814 		state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
815 		if (state != AIROHA_FOE_STATE_BIND) {
816 			iter->hash = 0xffff;
817 			airoha_ppe_foe_remove_flow(ppe, iter);
818 			continue;
819 		}
820 
821 		idle = airoha_ppe_get_entry_idle_time(ppe, ib1);
822 		if (idle >= min_idle)
823 			continue;
824 
825 		min_idle = idle;
826 		e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
827 		e->data.ib1 |= ib1 & AIROHA_FOE_IB1_BIND_TIMESTAMP;
828 	}
829 }
830 
831 static void airoha_ppe_foe_flow_entry_update(struct airoha_ppe *ppe,
832 					     struct airoha_flow_table_entry *e)
833 {
834 	struct airoha_foe_entry *hwe_p, hwe = {};
835 
836 	spin_lock_bh(&ppe_lock);
837 
838 	if (e->type == FLOW_TYPE_L2) {
839 		airoha_ppe_foe_flow_l2_entry_update(ppe, e);
840 		goto unlock;
841 	}
842 
843 	if (e->hash == 0xffff)
844 		goto unlock;
845 
846 	hwe_p = airoha_ppe_foe_get_entry(ppe, e->hash);
847 	if (!hwe_p)
848 		goto unlock;
849 
850 	memcpy(&hwe, hwe_p, sizeof(*hwe_p));
851 	if (!airoha_ppe_foe_compare_entry(e, &hwe)) {
852 		e->hash = 0xffff;
853 		goto unlock;
854 	}
855 
856 	e->data.ib1 = hwe.ib1;
857 unlock:
858 	spin_unlock_bh(&ppe_lock);
859 }
860 
861 static int airoha_ppe_entry_idle_time(struct airoha_ppe *ppe,
862 				      struct airoha_flow_table_entry *e)
863 {
864 	airoha_ppe_foe_flow_entry_update(ppe, e);
865 
866 	return airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
867 }
868 
869 static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port,
870 					   struct flow_cls_offload *f)
871 {
872 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
873 	struct airoha_eth *eth = port->qdma->eth;
874 	struct airoha_flow_table_entry *e;
875 	struct airoha_flow_data data = {};
876 	struct net_device *odev = NULL;
877 	struct flow_action_entry *act;
878 	struct airoha_foe_entry hwe;
879 	int err, i, offload_type;
880 	u16 addr_type = 0;
881 	u8 l4proto = 0;
882 
883 	if (rhashtable_lookup(&eth->flow_table, &f->cookie,
884 			      airoha_flow_table_params))
885 		return -EEXIST;
886 
887 	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
888 		return -EOPNOTSUPP;
889 
890 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
891 		struct flow_match_control match;
892 
893 		flow_rule_match_control(rule, &match);
894 		addr_type = match.key->addr_type;
895 		if (flow_rule_has_control_flags(match.mask->flags,
896 						f->common.extack))
897 			return -EOPNOTSUPP;
898 	} else {
899 		return -EOPNOTSUPP;
900 	}
901 
902 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
903 		struct flow_match_basic match;
904 
905 		flow_rule_match_basic(rule, &match);
906 		l4proto = match.key->ip_proto;
907 	} else {
908 		return -EOPNOTSUPP;
909 	}
910 
911 	switch (addr_type) {
912 	case 0:
913 		offload_type = PPE_PKT_TYPE_BRIDGE;
914 		if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
915 			struct flow_match_eth_addrs match;
916 
917 			flow_rule_match_eth_addrs(rule, &match);
918 			memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
919 			memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
920 		} else {
921 			return -EOPNOTSUPP;
922 		}
923 		break;
924 	case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
925 		offload_type = PPE_PKT_TYPE_IPV4_HNAPT;
926 		break;
927 	case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
928 		offload_type = PPE_PKT_TYPE_IPV6_ROUTE_5T;
929 		break;
930 	default:
931 		return -EOPNOTSUPP;
932 	}
933 
934 	flow_action_for_each(i, act, &rule->action) {
935 		switch (act->id) {
936 		case FLOW_ACTION_MANGLE:
937 			if (offload_type == PPE_PKT_TYPE_BRIDGE)
938 				return -EOPNOTSUPP;
939 
940 			if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
941 				airoha_ppe_flow_mangle_eth(act, &data.eth);
942 			break;
943 		case FLOW_ACTION_REDIRECT:
944 			odev = act->dev;
945 			break;
946 		case FLOW_ACTION_CSUM:
947 			break;
948 		case FLOW_ACTION_VLAN_PUSH:
949 			if (data.vlan.num == 2 ||
950 			    act->vlan.proto != htons(ETH_P_8021Q))
951 				return -EOPNOTSUPP;
952 
953 			data.vlan.hdr[data.vlan.num].id = act->vlan.vid;
954 			data.vlan.hdr[data.vlan.num].proto = act->vlan.proto;
955 			data.vlan.num++;
956 			break;
957 		case FLOW_ACTION_VLAN_POP:
958 			break;
959 		case FLOW_ACTION_PPPOE_PUSH:
960 			break;
961 		default:
962 			return -EOPNOTSUPP;
963 		}
964 	}
965 
966 	if (!is_valid_ether_addr(data.eth.h_source) ||
967 	    !is_valid_ether_addr(data.eth.h_dest))
968 		return -EINVAL;
969 
970 	err = airoha_ppe_foe_entry_prepare(eth, &hwe, odev, offload_type,
971 					   &data, l4proto);
972 	if (err)
973 		return err;
974 
975 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
976 		struct flow_match_ports ports;
977 
978 		if (offload_type == PPE_PKT_TYPE_BRIDGE)
979 			return -EOPNOTSUPP;
980 
981 		flow_rule_match_ports(rule, &ports);
982 		data.src_port = ports.key->src;
983 		data.dst_port = ports.key->dst;
984 	} else if (offload_type != PPE_PKT_TYPE_BRIDGE) {
985 		return -EOPNOTSUPP;
986 	}
987 
988 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
989 		struct flow_match_ipv4_addrs addrs;
990 
991 		flow_rule_match_ipv4_addrs(rule, &addrs);
992 		data.v4.src_addr = addrs.key->src;
993 		data.v4.dst_addr = addrs.key->dst;
994 		airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, false);
995 	}
996 
997 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
998 		struct flow_match_ipv6_addrs addrs;
999 
1000 		flow_rule_match_ipv6_addrs(rule, &addrs);
1001 
1002 		data.v6.src_addr = addrs.key->src;
1003 		data.v6.dst_addr = addrs.key->dst;
1004 		airoha_ppe_foe_entry_set_ipv6_tuple(&hwe, &data);
1005 	}
1006 
1007 	flow_action_for_each(i, act, &rule->action) {
1008 		if (act->id != FLOW_ACTION_MANGLE)
1009 			continue;
1010 
1011 		if (offload_type == PPE_PKT_TYPE_BRIDGE)
1012 			return -EOPNOTSUPP;
1013 
1014 		switch (act->mangle.htype) {
1015 		case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
1016 		case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
1017 			err = airoha_ppe_flow_mangle_ports(act, &data);
1018 			break;
1019 		case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
1020 			err = airoha_ppe_flow_mangle_ipv4(act, &data);
1021 			break;
1022 		case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
1023 			/* handled earlier */
1024 			break;
1025 		default:
1026 			return -EOPNOTSUPP;
1027 		}
1028 
1029 		if (err)
1030 			return err;
1031 	}
1032 
1033 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1034 		err = airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, true);
1035 		if (err)
1036 			return err;
1037 	}
1038 
1039 	e = kzalloc(sizeof(*e), GFP_KERNEL);
1040 	if (!e)
1041 		return -ENOMEM;
1042 
1043 	e->cookie = f->cookie;
1044 	memcpy(&e->data, &hwe, sizeof(e->data));
1045 
1046 	err = airoha_ppe_foe_flow_commit_entry(eth->ppe, e);
1047 	if (err)
1048 		goto free_entry;
1049 
1050 	err = rhashtable_insert_fast(&eth->flow_table, &e->node,
1051 				     airoha_flow_table_params);
1052 	if (err < 0)
1053 		goto remove_foe_entry;
1054 
1055 	return 0;
1056 
1057 remove_foe_entry:
1058 	airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
1059 free_entry:
1060 	kfree(e);
1061 
1062 	return err;
1063 }
1064 
1065 static int airoha_ppe_flow_offload_destroy(struct airoha_gdm_port *port,
1066 					   struct flow_cls_offload *f)
1067 {
1068 	struct airoha_eth *eth = port->qdma->eth;
1069 	struct airoha_flow_table_entry *e;
1070 
1071 	e = rhashtable_lookup(&eth->flow_table, &f->cookie,
1072 			      airoha_flow_table_params);
1073 	if (!e)
1074 		return -ENOENT;
1075 
1076 	airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
1077 	rhashtable_remove_fast(&eth->flow_table, &e->node,
1078 			       airoha_flow_table_params);
1079 	kfree(e);
1080 
1081 	return 0;
1082 }
1083 
1084 void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
1085 				    struct airoha_foe_stats64 *stats)
1086 {
1087 	u32 index = airoha_ppe_foe_get_flow_stats_index(ppe, hash);
1088 	struct airoha_eth *eth = ppe->eth;
1089 	struct airoha_npu *npu;
1090 
1091 	if (index >= PPE_STATS_NUM_ENTRIES)
1092 		return;
1093 
1094 	rcu_read_lock();
1095 
1096 	npu = rcu_dereference(eth->npu);
1097 	if (npu) {
1098 		u64 packets = ppe->foe_stats[index].packets;
1099 		u64 bytes = ppe->foe_stats[index].bytes;
1100 		struct airoha_foe_stats npu_stats;
1101 
1102 		memcpy_fromio(&npu_stats, &npu->stats[index],
1103 			      sizeof(*npu->stats));
1104 		stats->packets = packets << 32 | npu_stats.packets;
1105 		stats->bytes = bytes << 32 | npu_stats.bytes;
1106 	}
1107 
1108 	rcu_read_unlock();
1109 }
1110 
1111 static int airoha_ppe_flow_offload_stats(struct airoha_gdm_port *port,
1112 					 struct flow_cls_offload *f)
1113 {
1114 	struct airoha_eth *eth = port->qdma->eth;
1115 	struct airoha_flow_table_entry *e;
1116 	u32 idle;
1117 
1118 	e = rhashtable_lookup(&eth->flow_table, &f->cookie,
1119 			      airoha_flow_table_params);
1120 	if (!e)
1121 		return -ENOENT;
1122 
1123 	idle = airoha_ppe_entry_idle_time(eth->ppe, e);
1124 	f->stats.lastused = jiffies - idle * HZ;
1125 
1126 	if (e->hash != 0xffff) {
1127 		struct airoha_foe_stats64 stats = {};
1128 
1129 		airoha_ppe_foe_entry_get_stats(eth->ppe, e->hash, &stats);
1130 		f->stats.pkts += (stats.packets - e->stats.packets);
1131 		f->stats.bytes += (stats.bytes - e->stats.bytes);
1132 		e->stats = stats;
1133 	}
1134 
1135 	return 0;
1136 }
1137 
1138 static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port,
1139 				       struct flow_cls_offload *f)
1140 {
1141 	switch (f->command) {
1142 	case FLOW_CLS_REPLACE:
1143 		return airoha_ppe_flow_offload_replace(port, f);
1144 	case FLOW_CLS_DESTROY:
1145 		return airoha_ppe_flow_offload_destroy(port, f);
1146 	case FLOW_CLS_STATS:
1147 		return airoha_ppe_flow_offload_stats(port, f);
1148 	default:
1149 		break;
1150 	}
1151 
1152 	return -EOPNOTSUPP;
1153 }
1154 
1155 static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe,
1156 					 struct airoha_npu *npu)
1157 {
1158 	int i, sram_num_entries = PPE_SRAM_NUM_ENTRIES;
1159 	struct airoha_foe_entry *hwe = ppe->foe;
1160 
1161 	if (airoha_ppe2_is_enabled(ppe->eth))
1162 		sram_num_entries = sram_num_entries / 2;
1163 
1164 	for (i = 0; i < sram_num_entries; i++)
1165 		memset(&hwe[i], 0, sizeof(*hwe));
1166 
1167 	return npu->ops.ppe_flush_sram_entries(npu, ppe->foe_dma,
1168 					       PPE_SRAM_NUM_ENTRIES);
1169 }
1170 
1171 static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
1172 {
1173 	struct airoha_npu *npu = airoha_npu_get(eth->dev,
1174 						&eth->ppe->foe_stats_dma);
1175 
1176 	if (IS_ERR(npu)) {
1177 		request_module("airoha-npu");
1178 		npu = airoha_npu_get(eth->dev, &eth->ppe->foe_stats_dma);
1179 	}
1180 
1181 	return npu;
1182 }
1183 
1184 static int airoha_ppe_offload_setup(struct airoha_eth *eth)
1185 {
1186 	struct airoha_npu *npu = airoha_ppe_npu_get(eth);
1187 	int err;
1188 
1189 	if (IS_ERR(npu))
1190 		return PTR_ERR(npu);
1191 
1192 	err = npu->ops.ppe_init(npu);
1193 	if (err)
1194 		goto error_npu_put;
1195 
1196 	airoha_ppe_hw_init(eth->ppe);
1197 	err = airoha_ppe_flush_sram_entries(eth->ppe, npu);
1198 	if (err)
1199 		goto error_npu_put;
1200 
1201 	airoha_ppe_foe_flow_stats_reset(eth->ppe, npu);
1202 
1203 	rcu_assign_pointer(eth->npu, npu);
1204 	synchronize_rcu();
1205 
1206 	return 0;
1207 
1208 error_npu_put:
1209 	airoha_npu_put(npu);
1210 
1211 	return err;
1212 }
1213 
1214 int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data)
1215 {
1216 	struct airoha_gdm_port *port = netdev_priv(dev);
1217 	struct flow_cls_offload *cls = type_data;
1218 	struct airoha_eth *eth = port->qdma->eth;
1219 	int err = 0;
1220 
1221 	mutex_lock(&flow_offload_mutex);
1222 
1223 	if (!eth->npu)
1224 		err = airoha_ppe_offload_setup(eth);
1225 	if (!err)
1226 		err = airoha_ppe_flow_offload_cmd(port, cls);
1227 
1228 	mutex_unlock(&flow_offload_mutex);
1229 
1230 	return err;
1231 }
1232 
1233 void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
1234 			  u16 hash)
1235 {
1236 	u16 now, diff;
1237 
1238 	if (hash > PPE_HASH_MASK)
1239 		return;
1240 
1241 	now = (u16)jiffies;
1242 	diff = now - ppe->foe_check_time[hash];
1243 	if (diff < HZ / 10)
1244 		return;
1245 
1246 	ppe->foe_check_time[hash] = now;
1247 	airoha_ppe_foe_insert_entry(ppe, skb, hash);
1248 }
1249 
1250 void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port)
1251 {
1252 	struct airoha_eth *eth = port->qdma->eth;
1253 	struct net_device *dev = port->dev;
1254 	const u8 *addr = dev->dev_addr;
1255 	u32 val;
1256 
1257 	val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
1258 	airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
1259 	airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
1260 		     FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
1261 		     PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
1262 
1263 	val = (addr[0] << 8) | addr[1];
1264 	airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
1265 	airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
1266 		     FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
1267 		     FIELD_PREP(PPE_UPDMEM_OFFSET_MASK, 1) |
1268 		     PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
1269 }
1270 
1271 int airoha_ppe_init(struct airoha_eth *eth)
1272 {
1273 	struct airoha_ppe *ppe;
1274 	int foe_size, err;
1275 
1276 	ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL);
1277 	if (!ppe)
1278 		return -ENOMEM;
1279 
1280 	foe_size = PPE_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
1281 	ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma,
1282 				       GFP_KERNEL);
1283 	if (!ppe->foe)
1284 		return -ENOMEM;
1285 
1286 	ppe->eth = eth;
1287 	eth->ppe = ppe;
1288 
1289 	ppe->foe_flow = devm_kzalloc(eth->dev,
1290 				     PPE_NUM_ENTRIES * sizeof(*ppe->foe_flow),
1291 				     GFP_KERNEL);
1292 	if (!ppe->foe_flow)
1293 		return -ENOMEM;
1294 
1295 	foe_size = PPE_STATS_NUM_ENTRIES * sizeof(*ppe->foe_stats);
1296 	if (foe_size) {
1297 		ppe->foe_stats = dmam_alloc_coherent(eth->dev, foe_size,
1298 						     &ppe->foe_stats_dma,
1299 						     GFP_KERNEL);
1300 		if (!ppe->foe_stats)
1301 			return -ENOMEM;
1302 	}
1303 
1304 	err = rhashtable_init(&eth->flow_table, &airoha_flow_table_params);
1305 	if (err)
1306 		return err;
1307 
1308 	err = rhashtable_init(&ppe->l2_flows, &airoha_l2_flow_table_params);
1309 	if (err)
1310 		goto error_flow_table_destroy;
1311 
1312 	err = airoha_ppe_debugfs_init(ppe);
1313 	if (err)
1314 		goto error_l2_flow_table_destroy;
1315 
1316 	return 0;
1317 
1318 error_l2_flow_table_destroy:
1319 	rhashtable_destroy(&ppe->l2_flows);
1320 error_flow_table_destroy:
1321 	rhashtable_destroy(&eth->flow_table);
1322 
1323 	return err;
1324 }
1325 
1326 void airoha_ppe_deinit(struct airoha_eth *eth)
1327 {
1328 	struct airoha_npu *npu;
1329 
1330 	rcu_read_lock();
1331 	npu = rcu_dereference(eth->npu);
1332 	if (npu) {
1333 		npu->ops.ppe_deinit(npu);
1334 		airoha_npu_put(npu);
1335 	}
1336 	rcu_read_unlock();
1337 
1338 	rhashtable_destroy(&eth->ppe->l2_flows);
1339 	rhashtable_destroy(&eth->flow_table);
1340 	debugfs_remove(eth->ppe->debugfs_dir);
1341 }
1342