xref: /linux/drivers/net/ethernet/airoha/airoha_ppe.c (revision 6439a0e64c355d2e375bd094f365d56ce81faba3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2025 AIROHA Inc
4  * Author: Lorenzo Bianconi <lorenzo@kernel.org>
5  */
6 
7 #include <linux/ip.h>
8 #include <linux/ipv6.h>
9 #include <linux/rhashtable.h>
10 #include <net/ipv6.h>
11 #include <net/pkt_cls.h>
12 
13 #include "airoha_npu.h"
14 #include "airoha_regs.h"
15 #include "airoha_eth.h"
16 
17 static DEFINE_MUTEX(flow_offload_mutex);
18 static DEFINE_SPINLOCK(ppe_lock);
19 
20 static const struct rhashtable_params airoha_flow_table_params = {
21 	.head_offset = offsetof(struct airoha_flow_table_entry, node),
22 	.key_offset = offsetof(struct airoha_flow_table_entry, cookie),
23 	.key_len = sizeof(unsigned long),
24 	.automatic_shrinking = true,
25 };
26 
27 static const struct rhashtable_params airoha_l2_flow_table_params = {
28 	.head_offset = offsetof(struct airoha_flow_table_entry, l2_node),
29 	.key_offset = offsetof(struct airoha_flow_table_entry, data.bridge),
30 	.key_len = 2 * ETH_ALEN,
31 	.automatic_shrinking = true,
32 };
33 
airoha_ppe2_is_enabled(struct airoha_eth * eth)34 static bool airoha_ppe2_is_enabled(struct airoha_eth *eth)
35 {
36 	return airoha_fe_rr(eth, REG_PPE_GLO_CFG(1)) & PPE_GLO_CFG_EN_MASK;
37 }
38 
airoha_ppe_get_timestamp(struct airoha_ppe * ppe)39 static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe)
40 {
41 	u16 timestamp = airoha_fe_rr(ppe->eth, REG_FE_FOE_TS);
42 
43 	return FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, timestamp);
44 }
45 
airoha_ppe_hw_init(struct airoha_ppe * ppe)46 static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
47 {
48 	u32 sram_tb_size, sram_num_entries, dram_num_entries;
49 	struct airoha_eth *eth = ppe->eth;
50 	int i;
51 
52 	sram_tb_size = PPE_SRAM_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
53 	dram_num_entries = PPE_RAM_NUM_ENTRIES_SHIFT(PPE_DRAM_NUM_ENTRIES);
54 
55 	for (i = 0; i < PPE_NUM; i++) {
56 		int p;
57 
58 		airoha_fe_wr(eth, REG_PPE_TB_BASE(i),
59 			     ppe->foe_dma + sram_tb_size);
60 
61 		airoha_fe_rmw(eth, REG_PPE_BND_AGE0(i),
62 			      PPE_BIND_AGE0_DELTA_NON_L4 |
63 			      PPE_BIND_AGE0_DELTA_UDP,
64 			      FIELD_PREP(PPE_BIND_AGE0_DELTA_NON_L4, 1) |
65 			      FIELD_PREP(PPE_BIND_AGE0_DELTA_UDP, 12));
66 		airoha_fe_rmw(eth, REG_PPE_BND_AGE1(i),
67 			      PPE_BIND_AGE1_DELTA_TCP_FIN |
68 			      PPE_BIND_AGE1_DELTA_TCP,
69 			      FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
70 			      FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP, 7));
71 
72 		airoha_fe_rmw(eth, REG_PPE_TB_HASH_CFG(i),
73 			      PPE_SRAM_TABLE_EN_MASK |
74 			      PPE_SRAM_HASH1_EN_MASK |
75 			      PPE_DRAM_TABLE_EN_MASK |
76 			      PPE_SRAM_HASH0_MODE_MASK |
77 			      PPE_SRAM_HASH1_MODE_MASK |
78 			      PPE_DRAM_HASH0_MODE_MASK |
79 			      PPE_DRAM_HASH1_MODE_MASK,
80 			      FIELD_PREP(PPE_SRAM_TABLE_EN_MASK, 1) |
81 			      FIELD_PREP(PPE_SRAM_HASH1_EN_MASK, 1) |
82 			      FIELD_PREP(PPE_SRAM_HASH1_MODE_MASK, 1) |
83 			      FIELD_PREP(PPE_DRAM_HASH1_MODE_MASK, 3));
84 
85 		airoha_fe_rmw(eth, REG_PPE_TB_CFG(i),
86 			      PPE_TB_CFG_SEARCH_MISS_MASK |
87 			      PPE_TB_CFG_KEEPALIVE_MASK |
88 			      PPE_TB_ENTRY_SIZE_MASK,
89 			      FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) |
90 			      FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0));
91 
92 		airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED);
93 
94 		for (p = 0; p < ARRAY_SIZE(eth->ports); p++)
95 			airoha_fe_rmw(eth, REG_PPE_MTU(i, p),
96 				      FP0_EGRESS_MTU_MASK |
97 				      FP1_EGRESS_MTU_MASK,
98 				      FIELD_PREP(FP0_EGRESS_MTU_MASK,
99 						 AIROHA_MAX_MTU) |
100 				      FIELD_PREP(FP1_EGRESS_MTU_MASK,
101 						 AIROHA_MAX_MTU));
102 	}
103 
104 	if (airoha_ppe2_is_enabled(eth)) {
105 		sram_num_entries =
106 			PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_DATA_ENTRIES);
107 		airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
108 			      PPE_SRAM_TB_NUM_ENTRY_MASK |
109 			      PPE_DRAM_TB_NUM_ENTRY_MASK,
110 			      FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
111 					 sram_num_entries) |
112 			      FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
113 					 dram_num_entries));
114 		airoha_fe_rmw(eth, REG_PPE_TB_CFG(1),
115 			      PPE_SRAM_TB_NUM_ENTRY_MASK |
116 			      PPE_DRAM_TB_NUM_ENTRY_MASK,
117 			      FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
118 					 sram_num_entries) |
119 			      FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
120 					 dram_num_entries));
121 	} else {
122 		sram_num_entries =
123 			PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_DATA_ENTRIES);
124 		airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
125 			      PPE_SRAM_TB_NUM_ENTRY_MASK |
126 			      PPE_DRAM_TB_NUM_ENTRY_MASK,
127 			      FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
128 					 sram_num_entries) |
129 			      FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
130 					 dram_num_entries));
131 	}
132 }
133 
airoha_ppe_flow_mangle_eth(const struct flow_action_entry * act,void * eth)134 static void airoha_ppe_flow_mangle_eth(const struct flow_action_entry *act, void *eth)
135 {
136 	void *dest = eth + act->mangle.offset;
137 	const void *src = &act->mangle.val;
138 
139 	if (act->mangle.offset > 8)
140 		return;
141 
142 	if (act->mangle.mask == 0xffff) {
143 		src += 2;
144 		dest += 2;
145 	}
146 
147 	memcpy(dest, src, act->mangle.mask ? 2 : 4);
148 }
149 
airoha_ppe_flow_mangle_ports(const struct flow_action_entry * act,struct airoha_flow_data * data)150 static int airoha_ppe_flow_mangle_ports(const struct flow_action_entry *act,
151 					struct airoha_flow_data *data)
152 {
153 	u32 val = be32_to_cpu((__force __be32)act->mangle.val);
154 
155 	switch (act->mangle.offset) {
156 	case 0:
157 		if ((__force __be32)act->mangle.mask == ~cpu_to_be32(0xffff))
158 			data->dst_port = cpu_to_be16(val);
159 		else
160 			data->src_port = cpu_to_be16(val >> 16);
161 		break;
162 	case 2:
163 		data->dst_port = cpu_to_be16(val);
164 		break;
165 	default:
166 		return -EINVAL;
167 	}
168 
169 	return 0;
170 }
171 
airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry * act,struct airoha_flow_data * data)172 static int airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry *act,
173 				       struct airoha_flow_data *data)
174 {
175 	__be32 *dest;
176 
177 	switch (act->mangle.offset) {
178 	case offsetof(struct iphdr, saddr):
179 		dest = &data->v4.src_addr;
180 		break;
181 	case offsetof(struct iphdr, daddr):
182 		dest = &data->v4.dst_addr;
183 		break;
184 	default:
185 		return -EINVAL;
186 	}
187 
188 	memcpy(dest, &act->mangle.val, sizeof(u32));
189 
190 	return 0;
191 }
192 
airoha_get_dsa_port(struct net_device ** dev)193 static int airoha_get_dsa_port(struct net_device **dev)
194 {
195 #if IS_ENABLED(CONFIG_NET_DSA)
196 	struct dsa_port *dp = dsa_port_from_netdev(*dev);
197 
198 	if (IS_ERR(dp))
199 		return -ENODEV;
200 
201 	*dev = dsa_port_to_conduit(dp);
202 	return dp->index;
203 #else
204 	return -ENODEV;
205 #endif
206 }
207 
airoha_ppe_foe_set_bridge_addrs(struct airoha_foe_bridge * br,struct ethhdr * eh)208 static void airoha_ppe_foe_set_bridge_addrs(struct airoha_foe_bridge *br,
209 					    struct ethhdr *eh)
210 {
211 	br->dest_mac_hi = get_unaligned_be32(eh->h_dest);
212 	br->dest_mac_lo = get_unaligned_be16(eh->h_dest + 4);
213 	br->src_mac_hi = get_unaligned_be16(eh->h_source);
214 	br->src_mac_lo = get_unaligned_be32(eh->h_source + 2);
215 }
216 
airoha_ppe_foe_entry_prepare(struct airoha_eth * eth,struct airoha_foe_entry * hwe,struct net_device * dev,int type,struct airoha_flow_data * data,int l4proto)217 static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
218 					struct airoha_foe_entry *hwe,
219 					struct net_device *dev, int type,
220 					struct airoha_flow_data *data,
221 					int l4proto)
222 {
223 	int dsa_port = airoha_get_dsa_port(&dev);
224 	struct airoha_foe_mac_info_common *l2;
225 	u32 qdata, ports_pad, val;
226 	u8 smac_id = 0xf;
227 
228 	memset(hwe, 0, sizeof(*hwe));
229 
230 	val = FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE, AIROHA_FOE_STATE_BIND) |
231 	      FIELD_PREP(AIROHA_FOE_IB1_BIND_PACKET_TYPE, type) |
232 	      FIELD_PREP(AIROHA_FOE_IB1_BIND_UDP, l4proto == IPPROTO_UDP) |
233 	      FIELD_PREP(AIROHA_FOE_IB1_BIND_VLAN_LAYER, data->vlan.num) |
234 	      FIELD_PREP(AIROHA_FOE_IB1_BIND_VPM, data->vlan.num) |
235 	      FIELD_PREP(AIROHA_FOE_IB1_BIND_PPPOE, data->pppoe.num) |
236 	      AIROHA_FOE_IB1_BIND_TTL;
237 	hwe->ib1 = val;
238 
239 	val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f) |
240 	      AIROHA_FOE_IB2_PSE_QOS;
241 	if (dsa_port >= 0)
242 		val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, dsa_port);
243 
244 	if (dev) {
245 		struct airoha_gdm_port *port = netdev_priv(dev);
246 		u8 pse_port;
247 
248 		if (!airoha_is_valid_gdm_port(eth, port))
249 			return -EINVAL;
250 
251 		if (dsa_port >= 0)
252 			pse_port = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
253 		else
254 			pse_port = 2; /* uplink relies on GDM2 loopback */
255 		val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port);
256 
257 		/* For downlink traffic consume SRAM memory for hw forwarding
258 		 * descriptors queue.
259 		 */
260 		if (airhoa_is_lan_gdm_port(port))
261 			val |= AIROHA_FOE_IB2_FAST_PATH;
262 
263 		smac_id = port->id;
264 	}
265 
266 	if (is_multicast_ether_addr(data->eth.h_dest))
267 		val |= AIROHA_FOE_IB2_MULTICAST;
268 
269 	ports_pad = 0xa5a5a500 | (l4proto & 0xff);
270 	if (type == PPE_PKT_TYPE_IPV4_ROUTE)
271 		hwe->ipv4.orig_tuple.ports = ports_pad;
272 	if (type == PPE_PKT_TYPE_IPV6_ROUTE_3T)
273 		hwe->ipv6.ports = ports_pad;
274 
275 	qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f);
276 	if (type == PPE_PKT_TYPE_BRIDGE) {
277 		airoha_ppe_foe_set_bridge_addrs(&hwe->bridge, &data->eth);
278 		hwe->bridge.data = qdata;
279 		hwe->bridge.ib2 = val;
280 		l2 = &hwe->bridge.l2.common;
281 	} else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
282 		hwe->ipv6.data = qdata;
283 		hwe->ipv6.ib2 = val;
284 		l2 = &hwe->ipv6.l2;
285 		l2->etype = ETH_P_IPV6;
286 	} else {
287 		hwe->ipv4.data = qdata;
288 		hwe->ipv4.ib2 = val;
289 		l2 = &hwe->ipv4.l2.common;
290 		l2->etype = ETH_P_IP;
291 	}
292 
293 	l2->dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
294 	l2->dest_mac_lo = get_unaligned_be16(data->eth.h_dest + 4);
295 	if (type <= PPE_PKT_TYPE_IPV4_DSLITE) {
296 		struct airoha_foe_mac_info *mac_info;
297 
298 		l2->src_mac_hi = get_unaligned_be32(data->eth.h_source);
299 		hwe->ipv4.l2.src_mac_lo =
300 			get_unaligned_be16(data->eth.h_source + 4);
301 
302 		mac_info = (struct airoha_foe_mac_info *)l2;
303 		mac_info->pppoe_id = data->pppoe.sid;
304 	} else {
305 		l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id) |
306 				 FIELD_PREP(AIROHA_FOE_MAC_PPPOE_ID,
307 					    data->pppoe.sid);
308 	}
309 
310 	if (data->vlan.num) {
311 		l2->vlan1 = data->vlan.hdr[0].id;
312 		if (data->vlan.num == 2)
313 			l2->vlan2 = data->vlan.hdr[1].id;
314 	}
315 
316 	if (dsa_port >= 0) {
317 		l2->etype = BIT(dsa_port);
318 		l2->etype |= !data->vlan.num ? BIT(15) : 0;
319 	} else if (data->pppoe.num) {
320 		l2->etype = ETH_P_PPP_SES;
321 	}
322 
323 	return 0;
324 }
325 
airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry * hwe,struct airoha_flow_data * data,bool egress)326 static int airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry *hwe,
327 					       struct airoha_flow_data *data,
328 					       bool egress)
329 {
330 	int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
331 	struct airoha_foe_ipv4_tuple *t;
332 
333 	switch (type) {
334 	case PPE_PKT_TYPE_IPV4_HNAPT:
335 		if (egress) {
336 			t = &hwe->ipv4.new_tuple;
337 			break;
338 		}
339 		fallthrough;
340 	case PPE_PKT_TYPE_IPV4_DSLITE:
341 	case PPE_PKT_TYPE_IPV4_ROUTE:
342 		t = &hwe->ipv4.orig_tuple;
343 		break;
344 	default:
345 		WARN_ON_ONCE(1);
346 		return -EINVAL;
347 	}
348 
349 	t->src_ip = be32_to_cpu(data->v4.src_addr);
350 	t->dest_ip = be32_to_cpu(data->v4.dst_addr);
351 
352 	if (type != PPE_PKT_TYPE_IPV4_ROUTE) {
353 		t->src_port = be16_to_cpu(data->src_port);
354 		t->dest_port = be16_to_cpu(data->dst_port);
355 	}
356 
357 	return 0;
358 }
359 
airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry * hwe,struct airoha_flow_data * data)360 static int airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry *hwe,
361 					       struct airoha_flow_data *data)
362 
363 {
364 	int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
365 	u32 *src, *dest;
366 
367 	switch (type) {
368 	case PPE_PKT_TYPE_IPV6_ROUTE_5T:
369 	case PPE_PKT_TYPE_IPV6_6RD:
370 		hwe->ipv6.src_port = be16_to_cpu(data->src_port);
371 		hwe->ipv6.dest_port = be16_to_cpu(data->dst_port);
372 		fallthrough;
373 	case PPE_PKT_TYPE_IPV6_ROUTE_3T:
374 		src = hwe->ipv6.src_ip;
375 		dest = hwe->ipv6.dest_ip;
376 		break;
377 	default:
378 		WARN_ON_ONCE(1);
379 		return -EINVAL;
380 	}
381 
382 	ipv6_addr_be32_to_cpu(src, data->v6.src_addr.s6_addr32);
383 	ipv6_addr_be32_to_cpu(dest, data->v6.dst_addr.s6_addr32);
384 
385 	return 0;
386 }
387 
airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry * hwe)388 static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe)
389 {
390 	int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
391 	u32 hash, hv1, hv2, hv3;
392 
393 	switch (type) {
394 	case PPE_PKT_TYPE_IPV4_ROUTE:
395 	case PPE_PKT_TYPE_IPV4_HNAPT:
396 		hv1 = hwe->ipv4.orig_tuple.ports;
397 		hv2 = hwe->ipv4.orig_tuple.dest_ip;
398 		hv3 = hwe->ipv4.orig_tuple.src_ip;
399 		break;
400 	case PPE_PKT_TYPE_IPV6_ROUTE_3T:
401 	case PPE_PKT_TYPE_IPV6_ROUTE_5T:
402 		hv1 = hwe->ipv6.src_ip[3] ^ hwe->ipv6.dest_ip[3];
403 		hv1 ^= hwe->ipv6.ports;
404 
405 		hv2 = hwe->ipv6.src_ip[2] ^ hwe->ipv6.dest_ip[2];
406 		hv2 ^= hwe->ipv6.dest_ip[0];
407 
408 		hv3 = hwe->ipv6.src_ip[1] ^ hwe->ipv6.dest_ip[1];
409 		hv3 ^= hwe->ipv6.src_ip[0];
410 		break;
411 	case PPE_PKT_TYPE_BRIDGE: {
412 		struct airoha_foe_mac_info *l2 = &hwe->bridge.l2;
413 
414 		hv1 = l2->common.src_mac_hi & 0xffff;
415 		hv1 = hv1 << 16 | l2->src_mac_lo;
416 
417 		hv2 = l2->common.dest_mac_lo;
418 		hv2 = hv2 << 16;
419 		hv2 = hv2 | ((l2->common.src_mac_hi & 0xffff0000) >> 16);
420 
421 		hv3 = l2->common.dest_mac_hi;
422 		break;
423 	}
424 	case PPE_PKT_TYPE_IPV4_DSLITE:
425 	case PPE_PKT_TYPE_IPV6_6RD:
426 	default:
427 		WARN_ON_ONCE(1);
428 		return PPE_HASH_MASK;
429 	}
430 
431 	hash = (hv1 & hv2) | ((~hv1) & hv3);
432 	hash = (hash >> 24) | ((hash & 0xffffff) << 8);
433 	hash ^= hv1 ^ hv2 ^ hv3;
434 	hash ^= hash >> 16;
435 	hash &= PPE_NUM_ENTRIES - 1;
436 
437 	return hash;
438 }
439 
airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe * ppe,u32 hash)440 static u32 airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe, u32 hash)
441 {
442 	if (!airoha_ppe2_is_enabled(ppe->eth))
443 		return hash;
444 
445 	return hash >= PPE_STATS_NUM_ENTRIES ? hash - PPE1_STATS_NUM_ENTRIES
446 					     : hash;
447 }
448 
airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe * ppe,struct airoha_npu * npu,int index)449 static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe,
450 						 struct airoha_npu *npu,
451 						 int index)
452 {
453 	memset_io(&npu->stats[index], 0, sizeof(*npu->stats));
454 	memset(&ppe->foe_stats[index], 0, sizeof(*ppe->foe_stats));
455 }
456 
airoha_ppe_foe_flow_stats_reset(struct airoha_ppe * ppe,struct airoha_npu * npu)457 static void airoha_ppe_foe_flow_stats_reset(struct airoha_ppe *ppe,
458 					    struct airoha_npu *npu)
459 {
460 	int i;
461 
462 	for (i = 0; i < PPE_STATS_NUM_ENTRIES; i++)
463 		airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, i);
464 }
465 
airoha_ppe_foe_flow_stats_update(struct airoha_ppe * ppe,struct airoha_npu * npu,struct airoha_foe_entry * hwe,u32 hash)466 static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe,
467 					     struct airoha_npu *npu,
468 					     struct airoha_foe_entry *hwe,
469 					     u32 hash)
470 {
471 	int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
472 	u32 index, pse_port, val, *data, *ib2, *meter;
473 	u8 nbq;
474 
475 	index = airoha_ppe_foe_get_flow_stats_index(ppe, hash);
476 	if (index >= PPE_STATS_NUM_ENTRIES)
477 		return;
478 
479 	if (type == PPE_PKT_TYPE_BRIDGE) {
480 		data = &hwe->bridge.data;
481 		ib2 = &hwe->bridge.ib2;
482 		meter = &hwe->bridge.l2.meter;
483 	} else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
484 		data = &hwe->ipv6.data;
485 		ib2 = &hwe->ipv6.ib2;
486 		meter = &hwe->ipv6.meter;
487 	} else {
488 		data = &hwe->ipv4.data;
489 		ib2 = &hwe->ipv4.ib2;
490 		meter = &hwe->ipv4.l2.meter;
491 	}
492 
493 	airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, index);
494 
495 	val = FIELD_GET(AIROHA_FOE_CHANNEL | AIROHA_FOE_QID, *data);
496 	*data = (*data & ~AIROHA_FOE_ACTDP) |
497 		FIELD_PREP(AIROHA_FOE_ACTDP, val);
498 
499 	val = *ib2 & (AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
500 		      AIROHA_FOE_IB2_PSE_QOS | AIROHA_FOE_IB2_FAST_PATH);
501 	*meter |= FIELD_PREP(AIROHA_FOE_TUNNEL_MTU, val);
502 
503 	pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2);
504 	nbq = pse_port == 1 ? 6 : 5;
505 	*ib2 &= ~(AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
506 		  AIROHA_FOE_IB2_PSE_QOS);
507 	*ib2 |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, 6) |
508 		FIELD_PREP(AIROHA_FOE_IB2_NBQ, nbq);
509 }
510 
511 static struct airoha_foe_entry *
airoha_ppe_foe_get_entry_locked(struct airoha_ppe * ppe,u32 hash)512 airoha_ppe_foe_get_entry_locked(struct airoha_ppe *ppe, u32 hash)
513 {
514 	lockdep_assert_held(&ppe_lock);
515 
516 	if (hash < PPE_SRAM_NUM_ENTRIES) {
517 		u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry);
518 		struct airoha_eth *eth = ppe->eth;
519 		bool ppe2;
520 		u32 val;
521 		int i;
522 
523 		ppe2 = airoha_ppe2_is_enabled(ppe->eth) &&
524 		       hash >= PPE1_SRAM_NUM_ENTRIES;
525 		airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2),
526 			     FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) |
527 			     PPE_SRAM_CTRL_REQ_MASK);
528 		if (read_poll_timeout_atomic(airoha_fe_rr, val,
529 					     val & PPE_SRAM_CTRL_ACK_MASK,
530 					     10, 100, false, eth,
531 					     REG_PPE_RAM_CTRL(ppe2)))
532 			return NULL;
533 
534 		for (i = 0; i < sizeof(struct airoha_foe_entry) / 4; i++)
535 			hwe[i] = airoha_fe_rr(eth,
536 					      REG_PPE_RAM_ENTRY(ppe2, i));
537 	}
538 
539 	return ppe->foe + hash * sizeof(struct airoha_foe_entry);
540 }
541 
airoha_ppe_foe_get_entry(struct airoha_ppe * ppe,u32 hash)542 struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
543 						  u32 hash)
544 {
545 	struct airoha_foe_entry *hwe;
546 
547 	spin_lock_bh(&ppe_lock);
548 	hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
549 	spin_unlock_bh(&ppe_lock);
550 
551 	return hwe;
552 }
553 
airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry * e,struct airoha_foe_entry * hwe)554 static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e,
555 					 struct airoha_foe_entry *hwe)
556 {
557 	int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
558 	int len;
559 
560 	if ((hwe->ib1 ^ e->data.ib1) & AIROHA_FOE_IB1_BIND_UDP)
561 		return false;
562 
563 	if (type > PPE_PKT_TYPE_IPV4_DSLITE)
564 		len = offsetof(struct airoha_foe_entry, ipv6.data);
565 	else
566 		len = offsetof(struct airoha_foe_entry, ipv4.ib2);
567 
568 	return !memcmp(&e->data.d, &hwe->d, len - sizeof(hwe->ib1));
569 }
570 
airoha_ppe_foe_commit_entry(struct airoha_ppe * ppe,struct airoha_foe_entry * e,u32 hash)571 static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
572 				       struct airoha_foe_entry *e,
573 				       u32 hash)
574 {
575 	struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
576 	u32 ts = airoha_ppe_get_timestamp(ppe);
577 	struct airoha_eth *eth = ppe->eth;
578 	struct airoha_npu *npu;
579 	int err = 0;
580 
581 	memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1));
582 	wmb();
583 
584 	e->ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
585 	e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts);
586 	hwe->ib1 = e->ib1;
587 
588 	rcu_read_lock();
589 
590 	npu = rcu_dereference(eth->npu);
591 	if (!npu) {
592 		err = -ENODEV;
593 		goto unlock;
594 	}
595 
596 	airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash);
597 
598 	if (hash < PPE_SRAM_NUM_ENTRIES) {
599 		dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe);
600 		bool ppe2 = airoha_ppe2_is_enabled(eth) &&
601 			    hash >= PPE1_SRAM_NUM_ENTRIES;
602 
603 		err = npu->ops.ppe_foe_commit_entry(npu, addr, sizeof(*hwe),
604 						    hash, ppe2);
605 	}
606 unlock:
607 	rcu_read_unlock();
608 
609 	return err;
610 }
611 
airoha_ppe_foe_remove_flow(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)612 static void airoha_ppe_foe_remove_flow(struct airoha_ppe *ppe,
613 				       struct airoha_flow_table_entry *e)
614 {
615 	lockdep_assert_held(&ppe_lock);
616 
617 	hlist_del_init(&e->list);
618 	if (e->hash != 0xffff) {
619 		e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
620 		e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
621 					  AIROHA_FOE_STATE_INVALID);
622 		airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash);
623 		e->hash = 0xffff;
624 	}
625 	if (e->type == FLOW_TYPE_L2_SUBFLOW) {
626 		hlist_del_init(&e->l2_subflow_node);
627 		kfree(e);
628 	}
629 }
630 
airoha_ppe_foe_remove_l2_flow(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)631 static void airoha_ppe_foe_remove_l2_flow(struct airoha_ppe *ppe,
632 					  struct airoha_flow_table_entry *e)
633 {
634 	struct hlist_head *head = &e->l2_flows;
635 	struct hlist_node *n;
636 
637 	lockdep_assert_held(&ppe_lock);
638 
639 	rhashtable_remove_fast(&ppe->l2_flows, &e->l2_node,
640 			       airoha_l2_flow_table_params);
641 	hlist_for_each_entry_safe(e, n, head, l2_subflow_node)
642 		airoha_ppe_foe_remove_flow(ppe, e);
643 }
644 
airoha_ppe_foe_flow_remove_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)645 static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
646 					     struct airoha_flow_table_entry *e)
647 {
648 	spin_lock_bh(&ppe_lock);
649 
650 	if (e->type == FLOW_TYPE_L2)
651 		airoha_ppe_foe_remove_l2_flow(ppe, e);
652 	else
653 		airoha_ppe_foe_remove_flow(ppe, e);
654 
655 	spin_unlock_bh(&ppe_lock);
656 }
657 
658 static int
airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e,u32 hash)659 airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
660 				    struct airoha_flow_table_entry *e,
661 				    u32 hash)
662 {
663 	u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP;
664 	struct airoha_foe_entry *hwe_p, hwe;
665 	struct airoha_flow_table_entry *f;
666 	int type;
667 
668 	hwe_p = airoha_ppe_foe_get_entry_locked(ppe, hash);
669 	if (!hwe_p)
670 		return -EINVAL;
671 
672 	f = kzalloc(sizeof(*f), GFP_ATOMIC);
673 	if (!f)
674 		return -ENOMEM;
675 
676 	hlist_add_head(&f->l2_subflow_node, &e->l2_flows);
677 	f->type = FLOW_TYPE_L2_SUBFLOW;
678 	f->hash = hash;
679 
680 	memcpy(&hwe, hwe_p, sizeof(*hwe_p));
681 	hwe.ib1 = (hwe.ib1 & mask) | (e->data.ib1 & ~mask);
682 
683 	type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe.ib1);
684 	if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
685 		memcpy(&hwe.ipv6.l2, &e->data.bridge.l2, sizeof(hwe.ipv6.l2));
686 		hwe.ipv6.ib2 = e->data.bridge.ib2;
687 		/* setting smac_id to 0xf instruct the hw to keep original
688 		 * source mac address
689 		 */
690 		hwe.ipv6.l2.src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID,
691 						    0xf);
692 	} else {
693 		memcpy(&hwe.bridge.l2, &e->data.bridge.l2,
694 		       sizeof(hwe.bridge.l2));
695 		hwe.bridge.ib2 = e->data.bridge.ib2;
696 		if (type == PPE_PKT_TYPE_IPV4_HNAPT)
697 			memcpy(&hwe.ipv4.new_tuple, &hwe.ipv4.orig_tuple,
698 			       sizeof(hwe.ipv4.new_tuple));
699 	}
700 
701 	hwe.bridge.data = e->data.bridge.data;
702 	airoha_ppe_foe_commit_entry(ppe, &hwe, hash);
703 
704 	return 0;
705 }
706 
airoha_ppe_foe_insert_entry(struct airoha_ppe * ppe,struct sk_buff * skb,u32 hash)707 static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
708 					struct sk_buff *skb,
709 					u32 hash)
710 {
711 	struct airoha_flow_table_entry *e;
712 	struct airoha_foe_bridge br = {};
713 	struct airoha_foe_entry *hwe;
714 	bool commit_done = false;
715 	struct hlist_node *n;
716 	u32 index, state;
717 
718 	spin_lock_bh(&ppe_lock);
719 
720 	hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
721 	if (!hwe)
722 		goto unlock;
723 
724 	state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
725 	if (state == AIROHA_FOE_STATE_BIND)
726 		goto unlock;
727 
728 	index = airoha_ppe_foe_get_entry_hash(hwe);
729 	hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) {
730 		if (e->type == FLOW_TYPE_L2_SUBFLOW) {
731 			state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
732 			if (state != AIROHA_FOE_STATE_BIND) {
733 				e->hash = 0xffff;
734 				airoha_ppe_foe_remove_flow(ppe, e);
735 			}
736 			continue;
737 		}
738 
739 		if (!airoha_ppe_foe_compare_entry(e, hwe))
740 			continue;
741 
742 		airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
743 		commit_done = true;
744 		e->hash = hash;
745 	}
746 
747 	if (commit_done)
748 		goto unlock;
749 
750 	airoha_ppe_foe_set_bridge_addrs(&br, eth_hdr(skb));
751 	e = rhashtable_lookup_fast(&ppe->l2_flows, &br,
752 				   airoha_l2_flow_table_params);
753 	if (e)
754 		airoha_ppe_foe_commit_subflow_entry(ppe, e, hash);
755 unlock:
756 	spin_unlock_bh(&ppe_lock);
757 }
758 
759 static int
airoha_ppe_foe_l2_flow_commit_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)760 airoha_ppe_foe_l2_flow_commit_entry(struct airoha_ppe *ppe,
761 				    struct airoha_flow_table_entry *e)
762 {
763 	struct airoha_flow_table_entry *prev;
764 
765 	e->type = FLOW_TYPE_L2;
766 	prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &e->l2_node,
767 						 airoha_l2_flow_table_params);
768 	if (!prev)
769 		return 0;
770 
771 	if (IS_ERR(prev))
772 		return PTR_ERR(prev);
773 
774 	return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
775 				       &e->l2_node,
776 				       airoha_l2_flow_table_params);
777 }
778 
airoha_ppe_foe_flow_commit_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)779 static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe,
780 					    struct airoha_flow_table_entry *e)
781 {
782 	int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
783 	u32 hash;
784 
785 	if (type == PPE_PKT_TYPE_BRIDGE)
786 		return airoha_ppe_foe_l2_flow_commit_entry(ppe, e);
787 
788 	hash = airoha_ppe_foe_get_entry_hash(&e->data);
789 	e->type = FLOW_TYPE_L4;
790 	e->hash = 0xffff;
791 
792 	spin_lock_bh(&ppe_lock);
793 	hlist_add_head(&e->list, &ppe->foe_flow[hash]);
794 	spin_unlock_bh(&ppe_lock);
795 
796 	return 0;
797 }
798 
airoha_ppe_get_entry_idle_time(struct airoha_ppe * ppe,u32 ib1)799 static int airoha_ppe_get_entry_idle_time(struct airoha_ppe *ppe, u32 ib1)
800 {
801 	u32 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
802 	u32 ts, ts_mask, now = airoha_ppe_get_timestamp(ppe);
803 	int idle;
804 
805 	if (state == AIROHA_FOE_STATE_BIND) {
806 		ts = FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, ib1);
807 		ts_mask = AIROHA_FOE_IB1_BIND_TIMESTAMP;
808 	} else {
809 		ts = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, ib1);
810 		now = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, now);
811 		ts_mask = AIROHA_FOE_IB1_UNBIND_TIMESTAMP;
812 	}
813 	idle = now - ts;
814 
815 	return idle < 0 ? idle + ts_mask + 1 : idle;
816 }
817 
818 static void
airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)819 airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe,
820 				    struct airoha_flow_table_entry *e)
821 {
822 	int min_idle = airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
823 	struct airoha_flow_table_entry *iter;
824 	struct hlist_node *n;
825 
826 	lockdep_assert_held(&ppe_lock);
827 
828 	hlist_for_each_entry_safe(iter, n, &e->l2_flows, l2_subflow_node) {
829 		struct airoha_foe_entry *hwe;
830 		u32 ib1, state;
831 		int idle;
832 
833 		hwe = airoha_ppe_foe_get_entry_locked(ppe, iter->hash);
834 		if (!hwe)
835 			continue;
836 
837 		ib1 = READ_ONCE(hwe->ib1);
838 		state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
839 		if (state != AIROHA_FOE_STATE_BIND) {
840 			iter->hash = 0xffff;
841 			airoha_ppe_foe_remove_flow(ppe, iter);
842 			continue;
843 		}
844 
845 		idle = airoha_ppe_get_entry_idle_time(ppe, ib1);
846 		if (idle >= min_idle)
847 			continue;
848 
849 		min_idle = idle;
850 		e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
851 		e->data.ib1 |= ib1 & AIROHA_FOE_IB1_BIND_TIMESTAMP;
852 	}
853 }
854 
airoha_ppe_foe_flow_entry_update(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)855 static void airoha_ppe_foe_flow_entry_update(struct airoha_ppe *ppe,
856 					     struct airoha_flow_table_entry *e)
857 {
858 	struct airoha_foe_entry *hwe_p, hwe = {};
859 
860 	spin_lock_bh(&ppe_lock);
861 
862 	if (e->type == FLOW_TYPE_L2) {
863 		airoha_ppe_foe_flow_l2_entry_update(ppe, e);
864 		goto unlock;
865 	}
866 
867 	if (e->hash == 0xffff)
868 		goto unlock;
869 
870 	hwe_p = airoha_ppe_foe_get_entry_locked(ppe, e->hash);
871 	if (!hwe_p)
872 		goto unlock;
873 
874 	memcpy(&hwe, hwe_p, sizeof(*hwe_p));
875 	if (!airoha_ppe_foe_compare_entry(e, &hwe)) {
876 		e->hash = 0xffff;
877 		goto unlock;
878 	}
879 
880 	e->data.ib1 = hwe.ib1;
881 unlock:
882 	spin_unlock_bh(&ppe_lock);
883 }
884 
airoha_ppe_entry_idle_time(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)885 static int airoha_ppe_entry_idle_time(struct airoha_ppe *ppe,
886 				      struct airoha_flow_table_entry *e)
887 {
888 	airoha_ppe_foe_flow_entry_update(ppe, e);
889 
890 	return airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
891 }
892 
airoha_ppe_flow_offload_replace(struct airoha_gdm_port * port,struct flow_cls_offload * f)893 static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port,
894 					   struct flow_cls_offload *f)
895 {
896 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
897 	struct airoha_eth *eth = port->qdma->eth;
898 	struct airoha_flow_table_entry *e;
899 	struct airoha_flow_data data = {};
900 	struct net_device *odev = NULL;
901 	struct flow_action_entry *act;
902 	struct airoha_foe_entry hwe;
903 	int err, i, offload_type;
904 	u16 addr_type = 0;
905 	u8 l4proto = 0;
906 
907 	if (rhashtable_lookup(&eth->flow_table, &f->cookie,
908 			      airoha_flow_table_params))
909 		return -EEXIST;
910 
911 	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
912 		return -EOPNOTSUPP;
913 
914 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
915 		struct flow_match_control match;
916 
917 		flow_rule_match_control(rule, &match);
918 		addr_type = match.key->addr_type;
919 		if (flow_rule_has_control_flags(match.mask->flags,
920 						f->common.extack))
921 			return -EOPNOTSUPP;
922 	} else {
923 		return -EOPNOTSUPP;
924 	}
925 
926 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
927 		struct flow_match_basic match;
928 
929 		flow_rule_match_basic(rule, &match);
930 		l4proto = match.key->ip_proto;
931 	} else {
932 		return -EOPNOTSUPP;
933 	}
934 
935 	switch (addr_type) {
936 	case 0:
937 		offload_type = PPE_PKT_TYPE_BRIDGE;
938 		if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
939 			struct flow_match_eth_addrs match;
940 
941 			flow_rule_match_eth_addrs(rule, &match);
942 			memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
943 			memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
944 		} else {
945 			return -EOPNOTSUPP;
946 		}
947 		break;
948 	case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
949 		offload_type = PPE_PKT_TYPE_IPV4_HNAPT;
950 		break;
951 	case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
952 		offload_type = PPE_PKT_TYPE_IPV6_ROUTE_5T;
953 		break;
954 	default:
955 		return -EOPNOTSUPP;
956 	}
957 
958 	flow_action_for_each(i, act, &rule->action) {
959 		switch (act->id) {
960 		case FLOW_ACTION_MANGLE:
961 			if (offload_type == PPE_PKT_TYPE_BRIDGE)
962 				return -EOPNOTSUPP;
963 
964 			if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
965 				airoha_ppe_flow_mangle_eth(act, &data.eth);
966 			break;
967 		case FLOW_ACTION_REDIRECT:
968 			odev = act->dev;
969 			break;
970 		case FLOW_ACTION_CSUM:
971 			break;
972 		case FLOW_ACTION_VLAN_PUSH:
973 			if (data.vlan.num == 2 ||
974 			    act->vlan.proto != htons(ETH_P_8021Q))
975 				return -EOPNOTSUPP;
976 
977 			data.vlan.hdr[data.vlan.num].id = act->vlan.vid;
978 			data.vlan.hdr[data.vlan.num].proto = act->vlan.proto;
979 			data.vlan.num++;
980 			break;
981 		case FLOW_ACTION_VLAN_POP:
982 			break;
983 		case FLOW_ACTION_PPPOE_PUSH:
984 			if (data.pppoe.num == 1 || data.vlan.num == 2)
985 				return -EOPNOTSUPP;
986 
987 			data.pppoe.sid = act->pppoe.sid;
988 			data.pppoe.num++;
989 			break;
990 		default:
991 			return -EOPNOTSUPP;
992 		}
993 	}
994 
995 	if (!is_valid_ether_addr(data.eth.h_source) ||
996 	    !is_valid_ether_addr(data.eth.h_dest))
997 		return -EINVAL;
998 
999 	err = airoha_ppe_foe_entry_prepare(eth, &hwe, odev, offload_type,
1000 					   &data, l4proto);
1001 	if (err)
1002 		return err;
1003 
1004 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1005 		struct flow_match_ports ports;
1006 
1007 		if (offload_type == PPE_PKT_TYPE_BRIDGE)
1008 			return -EOPNOTSUPP;
1009 
1010 		flow_rule_match_ports(rule, &ports);
1011 		data.src_port = ports.key->src;
1012 		data.dst_port = ports.key->dst;
1013 	} else if (offload_type != PPE_PKT_TYPE_BRIDGE) {
1014 		return -EOPNOTSUPP;
1015 	}
1016 
1017 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1018 		struct flow_match_ipv4_addrs addrs;
1019 
1020 		flow_rule_match_ipv4_addrs(rule, &addrs);
1021 		data.v4.src_addr = addrs.key->src;
1022 		data.v4.dst_addr = addrs.key->dst;
1023 		airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, false);
1024 	}
1025 
1026 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1027 		struct flow_match_ipv6_addrs addrs;
1028 
1029 		flow_rule_match_ipv6_addrs(rule, &addrs);
1030 
1031 		data.v6.src_addr = addrs.key->src;
1032 		data.v6.dst_addr = addrs.key->dst;
1033 		airoha_ppe_foe_entry_set_ipv6_tuple(&hwe, &data);
1034 	}
1035 
1036 	flow_action_for_each(i, act, &rule->action) {
1037 		if (act->id != FLOW_ACTION_MANGLE)
1038 			continue;
1039 
1040 		if (offload_type == PPE_PKT_TYPE_BRIDGE)
1041 			return -EOPNOTSUPP;
1042 
1043 		switch (act->mangle.htype) {
1044 		case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
1045 		case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
1046 			err = airoha_ppe_flow_mangle_ports(act, &data);
1047 			break;
1048 		case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
1049 			err = airoha_ppe_flow_mangle_ipv4(act, &data);
1050 			break;
1051 		case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
1052 			/* handled earlier */
1053 			break;
1054 		default:
1055 			return -EOPNOTSUPP;
1056 		}
1057 
1058 		if (err)
1059 			return err;
1060 	}
1061 
1062 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1063 		err = airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, true);
1064 		if (err)
1065 			return err;
1066 	}
1067 
1068 	e = kzalloc(sizeof(*e), GFP_KERNEL);
1069 	if (!e)
1070 		return -ENOMEM;
1071 
1072 	e->cookie = f->cookie;
1073 	memcpy(&e->data, &hwe, sizeof(e->data));
1074 
1075 	err = airoha_ppe_foe_flow_commit_entry(eth->ppe, e);
1076 	if (err)
1077 		goto free_entry;
1078 
1079 	err = rhashtable_insert_fast(&eth->flow_table, &e->node,
1080 				     airoha_flow_table_params);
1081 	if (err < 0)
1082 		goto remove_foe_entry;
1083 
1084 	return 0;
1085 
1086 remove_foe_entry:
1087 	airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
1088 free_entry:
1089 	kfree(e);
1090 
1091 	return err;
1092 }
1093 
airoha_ppe_flow_offload_destroy(struct airoha_gdm_port * port,struct flow_cls_offload * f)1094 static int airoha_ppe_flow_offload_destroy(struct airoha_gdm_port *port,
1095 					   struct flow_cls_offload *f)
1096 {
1097 	struct airoha_eth *eth = port->qdma->eth;
1098 	struct airoha_flow_table_entry *e;
1099 
1100 	e = rhashtable_lookup(&eth->flow_table, &f->cookie,
1101 			      airoha_flow_table_params);
1102 	if (!e)
1103 		return -ENOENT;
1104 
1105 	airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
1106 	rhashtable_remove_fast(&eth->flow_table, &e->node,
1107 			       airoha_flow_table_params);
1108 	kfree(e);
1109 
1110 	return 0;
1111 }
1112 
airoha_ppe_foe_entry_get_stats(struct airoha_ppe * ppe,u32 hash,struct airoha_foe_stats64 * stats)1113 void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
1114 				    struct airoha_foe_stats64 *stats)
1115 {
1116 	u32 index = airoha_ppe_foe_get_flow_stats_index(ppe, hash);
1117 	struct airoha_eth *eth = ppe->eth;
1118 	struct airoha_npu *npu;
1119 
1120 	if (index >= PPE_STATS_NUM_ENTRIES)
1121 		return;
1122 
1123 	rcu_read_lock();
1124 
1125 	npu = rcu_dereference(eth->npu);
1126 	if (npu) {
1127 		u64 packets = ppe->foe_stats[index].packets;
1128 		u64 bytes = ppe->foe_stats[index].bytes;
1129 		struct airoha_foe_stats npu_stats;
1130 
1131 		memcpy_fromio(&npu_stats, &npu->stats[index],
1132 			      sizeof(*npu->stats));
1133 		stats->packets = packets << 32 | npu_stats.packets;
1134 		stats->bytes = bytes << 32 | npu_stats.bytes;
1135 	}
1136 
1137 	rcu_read_unlock();
1138 }
1139 
airoha_ppe_flow_offload_stats(struct airoha_gdm_port * port,struct flow_cls_offload * f)1140 static int airoha_ppe_flow_offload_stats(struct airoha_gdm_port *port,
1141 					 struct flow_cls_offload *f)
1142 {
1143 	struct airoha_eth *eth = port->qdma->eth;
1144 	struct airoha_flow_table_entry *e;
1145 	u32 idle;
1146 
1147 	e = rhashtable_lookup(&eth->flow_table, &f->cookie,
1148 			      airoha_flow_table_params);
1149 	if (!e)
1150 		return -ENOENT;
1151 
1152 	idle = airoha_ppe_entry_idle_time(eth->ppe, e);
1153 	f->stats.lastused = jiffies - idle * HZ;
1154 
1155 	if (e->hash != 0xffff) {
1156 		struct airoha_foe_stats64 stats = {};
1157 
1158 		airoha_ppe_foe_entry_get_stats(eth->ppe, e->hash, &stats);
1159 		f->stats.pkts += (stats.packets - e->stats.packets);
1160 		f->stats.bytes += (stats.bytes - e->stats.bytes);
1161 		e->stats = stats;
1162 	}
1163 
1164 	return 0;
1165 }
1166 
airoha_ppe_flow_offload_cmd(struct airoha_gdm_port * port,struct flow_cls_offload * f)1167 static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port,
1168 				       struct flow_cls_offload *f)
1169 {
1170 	switch (f->command) {
1171 	case FLOW_CLS_REPLACE:
1172 		return airoha_ppe_flow_offload_replace(port, f);
1173 	case FLOW_CLS_DESTROY:
1174 		return airoha_ppe_flow_offload_destroy(port, f);
1175 	case FLOW_CLS_STATS:
1176 		return airoha_ppe_flow_offload_stats(port, f);
1177 	default:
1178 		break;
1179 	}
1180 
1181 	return -EOPNOTSUPP;
1182 }
1183 
airoha_ppe_flush_sram_entries(struct airoha_ppe * ppe,struct airoha_npu * npu)1184 static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe,
1185 					 struct airoha_npu *npu)
1186 {
1187 	int i, sram_num_entries = PPE_SRAM_NUM_ENTRIES;
1188 	struct airoha_foe_entry *hwe = ppe->foe;
1189 
1190 	if (airoha_ppe2_is_enabled(ppe->eth))
1191 		sram_num_entries = sram_num_entries / 2;
1192 
1193 	for (i = 0; i < sram_num_entries; i++)
1194 		memset(&hwe[i], 0, sizeof(*hwe));
1195 
1196 	return npu->ops.ppe_flush_sram_entries(npu, ppe->foe_dma,
1197 					       PPE_SRAM_NUM_ENTRIES);
1198 }
1199 
airoha_ppe_npu_get(struct airoha_eth * eth)1200 static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
1201 {
1202 	struct airoha_npu *npu = airoha_npu_get(eth->dev,
1203 						&eth->ppe->foe_stats_dma);
1204 
1205 	if (IS_ERR(npu)) {
1206 		request_module("airoha-npu");
1207 		npu = airoha_npu_get(eth->dev, &eth->ppe->foe_stats_dma);
1208 	}
1209 
1210 	return npu;
1211 }
1212 
airoha_ppe_offload_setup(struct airoha_eth * eth)1213 static int airoha_ppe_offload_setup(struct airoha_eth *eth)
1214 {
1215 	struct airoha_npu *npu = airoha_ppe_npu_get(eth);
1216 	int err;
1217 
1218 	if (IS_ERR(npu))
1219 		return PTR_ERR(npu);
1220 
1221 	err = npu->ops.ppe_init(npu);
1222 	if (err)
1223 		goto error_npu_put;
1224 
1225 	airoha_ppe_hw_init(eth->ppe);
1226 	err = airoha_ppe_flush_sram_entries(eth->ppe, npu);
1227 	if (err)
1228 		goto error_npu_put;
1229 
1230 	airoha_ppe_foe_flow_stats_reset(eth->ppe, npu);
1231 
1232 	rcu_assign_pointer(eth->npu, npu);
1233 	synchronize_rcu();
1234 
1235 	return 0;
1236 
1237 error_npu_put:
1238 	airoha_npu_put(npu);
1239 
1240 	return err;
1241 }
1242 
airoha_ppe_setup_tc_block_cb(struct net_device * dev,void * type_data)1243 int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data)
1244 {
1245 	struct airoha_gdm_port *port = netdev_priv(dev);
1246 	struct flow_cls_offload *cls = type_data;
1247 	struct airoha_eth *eth = port->qdma->eth;
1248 	int err = 0;
1249 
1250 	mutex_lock(&flow_offload_mutex);
1251 
1252 	if (!eth->npu)
1253 		err = airoha_ppe_offload_setup(eth);
1254 	if (!err)
1255 		err = airoha_ppe_flow_offload_cmd(port, cls);
1256 
1257 	mutex_unlock(&flow_offload_mutex);
1258 
1259 	return err;
1260 }
1261 
airoha_ppe_check_skb(struct airoha_ppe * ppe,struct sk_buff * skb,u16 hash)1262 void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
1263 			  u16 hash)
1264 {
1265 	u16 now, diff;
1266 
1267 	if (hash > PPE_HASH_MASK)
1268 		return;
1269 
1270 	now = (u16)jiffies;
1271 	diff = now - ppe->foe_check_time[hash];
1272 	if (diff < HZ / 10)
1273 		return;
1274 
1275 	ppe->foe_check_time[hash] = now;
1276 	airoha_ppe_foe_insert_entry(ppe, skb, hash);
1277 }
1278 
airoha_ppe_init_upd_mem(struct airoha_gdm_port * port)1279 void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port)
1280 {
1281 	struct airoha_eth *eth = port->qdma->eth;
1282 	struct net_device *dev = port->dev;
1283 	const u8 *addr = dev->dev_addr;
1284 	u32 val;
1285 
1286 	val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
1287 	airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
1288 	airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
1289 		     FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
1290 		     PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
1291 
1292 	val = (addr[0] << 8) | addr[1];
1293 	airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
1294 	airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
1295 		     FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
1296 		     FIELD_PREP(PPE_UPDMEM_OFFSET_MASK, 1) |
1297 		     PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
1298 }
1299 
airoha_ppe_init(struct airoha_eth * eth)1300 int airoha_ppe_init(struct airoha_eth *eth)
1301 {
1302 	struct airoha_ppe *ppe;
1303 	int foe_size, err;
1304 
1305 	ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL);
1306 	if (!ppe)
1307 		return -ENOMEM;
1308 
1309 	foe_size = PPE_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
1310 	ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma,
1311 				       GFP_KERNEL);
1312 	if (!ppe->foe)
1313 		return -ENOMEM;
1314 
1315 	ppe->eth = eth;
1316 	eth->ppe = ppe;
1317 
1318 	ppe->foe_flow = devm_kzalloc(eth->dev,
1319 				     PPE_NUM_ENTRIES * sizeof(*ppe->foe_flow),
1320 				     GFP_KERNEL);
1321 	if (!ppe->foe_flow)
1322 		return -ENOMEM;
1323 
1324 	foe_size = PPE_STATS_NUM_ENTRIES * sizeof(*ppe->foe_stats);
1325 	if (foe_size) {
1326 		ppe->foe_stats = dmam_alloc_coherent(eth->dev, foe_size,
1327 						     &ppe->foe_stats_dma,
1328 						     GFP_KERNEL);
1329 		if (!ppe->foe_stats)
1330 			return -ENOMEM;
1331 	}
1332 
1333 	err = rhashtable_init(&eth->flow_table, &airoha_flow_table_params);
1334 	if (err)
1335 		return err;
1336 
1337 	err = rhashtable_init(&ppe->l2_flows, &airoha_l2_flow_table_params);
1338 	if (err)
1339 		goto error_flow_table_destroy;
1340 
1341 	err = airoha_ppe_debugfs_init(ppe);
1342 	if (err)
1343 		goto error_l2_flow_table_destroy;
1344 
1345 	return 0;
1346 
1347 error_l2_flow_table_destroy:
1348 	rhashtable_destroy(&ppe->l2_flows);
1349 error_flow_table_destroy:
1350 	rhashtable_destroy(&eth->flow_table);
1351 
1352 	return err;
1353 }
1354 
airoha_ppe_deinit(struct airoha_eth * eth)1355 void airoha_ppe_deinit(struct airoha_eth *eth)
1356 {
1357 	struct airoha_npu *npu;
1358 
1359 	rcu_read_lock();
1360 	npu = rcu_dereference(eth->npu);
1361 	if (npu) {
1362 		npu->ops.ppe_deinit(npu);
1363 		airoha_npu_put(npu);
1364 	}
1365 	rcu_read_unlock();
1366 
1367 	rhashtable_destroy(&eth->ppe->l2_flows);
1368 	rhashtable_destroy(&eth->flow_table);
1369 	debugfs_remove(eth->ppe->debugfs_dir);
1370 }
1371