1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2025 AIROHA Inc
4 * Author: Lorenzo Bianconi <lorenzo@kernel.org>
5 */
6
7 #include <linux/ip.h>
8 #include <linux/ipv6.h>
9 #include <linux/rhashtable.h>
10 #include <net/ipv6.h>
11 #include <net/pkt_cls.h>
12
13 #include "airoha_npu.h"
14 #include "airoha_regs.h"
15 #include "airoha_eth.h"
16
17 static DEFINE_MUTEX(flow_offload_mutex);
18 static DEFINE_SPINLOCK(ppe_lock);
19
20 static const struct rhashtable_params airoha_flow_table_params = {
21 .head_offset = offsetof(struct airoha_flow_table_entry, node),
22 .key_offset = offsetof(struct airoha_flow_table_entry, cookie),
23 .key_len = sizeof(unsigned long),
24 .automatic_shrinking = true,
25 };
26
27 static const struct rhashtable_params airoha_l2_flow_table_params = {
28 .head_offset = offsetof(struct airoha_flow_table_entry, l2_node),
29 .key_offset = offsetof(struct airoha_flow_table_entry, data.bridge),
30 .key_len = 2 * ETH_ALEN,
31 .automatic_shrinking = true,
32 };
33
airoha_ppe2_is_enabled(struct airoha_eth * eth)34 static bool airoha_ppe2_is_enabled(struct airoha_eth *eth)
35 {
36 return airoha_fe_rr(eth, REG_PPE_GLO_CFG(1)) & PPE_GLO_CFG_EN_MASK;
37 }
38
airoha_ppe_get_timestamp(struct airoha_ppe * ppe)39 static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe)
40 {
41 u16 timestamp = airoha_fe_rr(ppe->eth, REG_FE_FOE_TS);
42
43 return FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, timestamp);
44 }
45
airoha_ppe_hw_init(struct airoha_ppe * ppe)46 static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
47 {
48 u32 sram_tb_size, sram_num_entries, dram_num_entries;
49 struct airoha_eth *eth = ppe->eth;
50 int i;
51
52 sram_tb_size = PPE_SRAM_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
53 dram_num_entries = PPE_RAM_NUM_ENTRIES_SHIFT(PPE_DRAM_NUM_ENTRIES);
54
55 for (i = 0; i < PPE_NUM; i++) {
56 int p;
57
58 airoha_fe_wr(eth, REG_PPE_TB_BASE(i),
59 ppe->foe_dma + sram_tb_size);
60
61 airoha_fe_rmw(eth, REG_PPE_BND_AGE0(i),
62 PPE_BIND_AGE0_DELTA_NON_L4 |
63 PPE_BIND_AGE0_DELTA_UDP,
64 FIELD_PREP(PPE_BIND_AGE0_DELTA_NON_L4, 1) |
65 FIELD_PREP(PPE_BIND_AGE0_DELTA_UDP, 12));
66 airoha_fe_rmw(eth, REG_PPE_BND_AGE1(i),
67 PPE_BIND_AGE1_DELTA_TCP_FIN |
68 PPE_BIND_AGE1_DELTA_TCP,
69 FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
70 FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP, 7));
71
72 airoha_fe_rmw(eth, REG_PPE_TB_HASH_CFG(i),
73 PPE_SRAM_TABLE_EN_MASK |
74 PPE_SRAM_HASH1_EN_MASK |
75 PPE_DRAM_TABLE_EN_MASK |
76 PPE_SRAM_HASH0_MODE_MASK |
77 PPE_SRAM_HASH1_MODE_MASK |
78 PPE_DRAM_HASH0_MODE_MASK |
79 PPE_DRAM_HASH1_MODE_MASK,
80 FIELD_PREP(PPE_SRAM_TABLE_EN_MASK, 1) |
81 FIELD_PREP(PPE_SRAM_HASH1_EN_MASK, 1) |
82 FIELD_PREP(PPE_SRAM_HASH1_MODE_MASK, 1) |
83 FIELD_PREP(PPE_DRAM_HASH1_MODE_MASK, 3));
84
85 airoha_fe_rmw(eth, REG_PPE_TB_CFG(i),
86 PPE_TB_CFG_SEARCH_MISS_MASK |
87 PPE_TB_CFG_KEEPALIVE_MASK |
88 PPE_TB_ENTRY_SIZE_MASK,
89 FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) |
90 FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0));
91
92 airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED);
93
94 for (p = 0; p < ARRAY_SIZE(eth->ports); p++)
95 airoha_fe_rmw(eth, REG_PPE_MTU(i, p),
96 FP0_EGRESS_MTU_MASK |
97 FP1_EGRESS_MTU_MASK,
98 FIELD_PREP(FP0_EGRESS_MTU_MASK,
99 AIROHA_MAX_MTU) |
100 FIELD_PREP(FP1_EGRESS_MTU_MASK,
101 AIROHA_MAX_MTU));
102 }
103
104 if (airoha_ppe2_is_enabled(eth)) {
105 sram_num_entries =
106 PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_DATA_ENTRIES);
107 airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
108 PPE_SRAM_TB_NUM_ENTRY_MASK |
109 PPE_DRAM_TB_NUM_ENTRY_MASK,
110 FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
111 sram_num_entries) |
112 FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
113 dram_num_entries));
114 airoha_fe_rmw(eth, REG_PPE_TB_CFG(1),
115 PPE_SRAM_TB_NUM_ENTRY_MASK |
116 PPE_DRAM_TB_NUM_ENTRY_MASK,
117 FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
118 sram_num_entries) |
119 FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
120 dram_num_entries));
121 } else {
122 sram_num_entries =
123 PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_DATA_ENTRIES);
124 airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
125 PPE_SRAM_TB_NUM_ENTRY_MASK |
126 PPE_DRAM_TB_NUM_ENTRY_MASK,
127 FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
128 sram_num_entries) |
129 FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
130 dram_num_entries));
131 }
132 }
133
airoha_ppe_flow_mangle_eth(const struct flow_action_entry * act,void * eth)134 static void airoha_ppe_flow_mangle_eth(const struct flow_action_entry *act, void *eth)
135 {
136 void *dest = eth + act->mangle.offset;
137 const void *src = &act->mangle.val;
138
139 if (act->mangle.offset > 8)
140 return;
141
142 if (act->mangle.mask == 0xffff) {
143 src += 2;
144 dest += 2;
145 }
146
147 memcpy(dest, src, act->mangle.mask ? 2 : 4);
148 }
149
airoha_ppe_flow_mangle_ports(const struct flow_action_entry * act,struct airoha_flow_data * data)150 static int airoha_ppe_flow_mangle_ports(const struct flow_action_entry *act,
151 struct airoha_flow_data *data)
152 {
153 u32 val = be32_to_cpu((__force __be32)act->mangle.val);
154
155 switch (act->mangle.offset) {
156 case 0:
157 if ((__force __be32)act->mangle.mask == ~cpu_to_be32(0xffff))
158 data->dst_port = cpu_to_be16(val);
159 else
160 data->src_port = cpu_to_be16(val >> 16);
161 break;
162 case 2:
163 data->dst_port = cpu_to_be16(val);
164 break;
165 default:
166 return -EINVAL;
167 }
168
169 return 0;
170 }
171
airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry * act,struct airoha_flow_data * data)172 static int airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry *act,
173 struct airoha_flow_data *data)
174 {
175 __be32 *dest;
176
177 switch (act->mangle.offset) {
178 case offsetof(struct iphdr, saddr):
179 dest = &data->v4.src_addr;
180 break;
181 case offsetof(struct iphdr, daddr):
182 dest = &data->v4.dst_addr;
183 break;
184 default:
185 return -EINVAL;
186 }
187
188 memcpy(dest, &act->mangle.val, sizeof(u32));
189
190 return 0;
191 }
192
airoha_get_dsa_port(struct net_device ** dev)193 static int airoha_get_dsa_port(struct net_device **dev)
194 {
195 #if IS_ENABLED(CONFIG_NET_DSA)
196 struct dsa_port *dp = dsa_port_from_netdev(*dev);
197
198 if (IS_ERR(dp))
199 return -ENODEV;
200
201 *dev = dsa_port_to_conduit(dp);
202 return dp->index;
203 #else
204 return -ENODEV;
205 #endif
206 }
207
airoha_ppe_foe_set_bridge_addrs(struct airoha_foe_bridge * br,struct ethhdr * eh)208 static void airoha_ppe_foe_set_bridge_addrs(struct airoha_foe_bridge *br,
209 struct ethhdr *eh)
210 {
211 br->dest_mac_hi = get_unaligned_be32(eh->h_dest);
212 br->dest_mac_lo = get_unaligned_be16(eh->h_dest + 4);
213 br->src_mac_hi = get_unaligned_be16(eh->h_source);
214 br->src_mac_lo = get_unaligned_be32(eh->h_source + 2);
215 }
216
airoha_ppe_foe_entry_prepare(struct airoha_eth * eth,struct airoha_foe_entry * hwe,struct net_device * dev,int type,struct airoha_flow_data * data,int l4proto)217 static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
218 struct airoha_foe_entry *hwe,
219 struct net_device *dev, int type,
220 struct airoha_flow_data *data,
221 int l4proto)
222 {
223 int dsa_port = airoha_get_dsa_port(&dev);
224 struct airoha_foe_mac_info_common *l2;
225 u32 qdata, ports_pad, val;
226 u8 smac_id = 0xf;
227
228 memset(hwe, 0, sizeof(*hwe));
229
230 val = FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE, AIROHA_FOE_STATE_BIND) |
231 FIELD_PREP(AIROHA_FOE_IB1_BIND_PACKET_TYPE, type) |
232 FIELD_PREP(AIROHA_FOE_IB1_BIND_UDP, l4proto == IPPROTO_UDP) |
233 FIELD_PREP(AIROHA_FOE_IB1_BIND_VLAN_LAYER, data->vlan.num) |
234 FIELD_PREP(AIROHA_FOE_IB1_BIND_VPM, data->vlan.num) |
235 AIROHA_FOE_IB1_BIND_TTL;
236 hwe->ib1 = val;
237
238 val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f) |
239 AIROHA_FOE_IB2_PSE_QOS;
240 if (dsa_port >= 0)
241 val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, dsa_port);
242
243 if (dev) {
244 struct airoha_gdm_port *port = netdev_priv(dev);
245 u8 pse_port;
246
247 if (!airoha_is_valid_gdm_port(eth, port))
248 return -EINVAL;
249
250 if (dsa_port >= 0)
251 pse_port = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
252 else
253 pse_port = 2; /* uplink relies on GDM2 loopback */
254 val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port);
255
256 /* For downlink traffic consume SRAM memory for hw forwarding
257 * descriptors queue.
258 */
259 if (airhoa_is_lan_gdm_port(port))
260 val |= AIROHA_FOE_IB2_FAST_PATH;
261
262 smac_id = port->id;
263 }
264
265 if (is_multicast_ether_addr(data->eth.h_dest))
266 val |= AIROHA_FOE_IB2_MULTICAST;
267
268 ports_pad = 0xa5a5a500 | (l4proto & 0xff);
269 if (type == PPE_PKT_TYPE_IPV4_ROUTE)
270 hwe->ipv4.orig_tuple.ports = ports_pad;
271 if (type == PPE_PKT_TYPE_IPV6_ROUTE_3T)
272 hwe->ipv6.ports = ports_pad;
273
274 qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f);
275 if (type == PPE_PKT_TYPE_BRIDGE) {
276 airoha_ppe_foe_set_bridge_addrs(&hwe->bridge, &data->eth);
277 hwe->bridge.data = qdata;
278 hwe->bridge.ib2 = val;
279 l2 = &hwe->bridge.l2.common;
280 } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
281 hwe->ipv6.data = qdata;
282 hwe->ipv6.ib2 = val;
283 l2 = &hwe->ipv6.l2;
284 } else {
285 hwe->ipv4.data = qdata;
286 hwe->ipv4.ib2 = val;
287 l2 = &hwe->ipv4.l2.common;
288 }
289
290 l2->dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
291 l2->dest_mac_lo = get_unaligned_be16(data->eth.h_dest + 4);
292 if (type <= PPE_PKT_TYPE_IPV4_DSLITE) {
293 l2->src_mac_hi = get_unaligned_be32(data->eth.h_source);
294 hwe->ipv4.l2.src_mac_lo =
295 get_unaligned_be16(data->eth.h_source + 4);
296 } else {
297 l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id);
298 }
299
300 if (data->vlan.num) {
301 l2->etype = dsa_port >= 0 ? BIT(dsa_port) : 0;
302 l2->vlan1 = data->vlan.hdr[0].id;
303 if (data->vlan.num == 2)
304 l2->vlan2 = data->vlan.hdr[1].id;
305 } else if (dsa_port >= 0) {
306 l2->etype = BIT(15) | BIT(dsa_port);
307 } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
308 l2->etype = ETH_P_IPV6;
309 } else {
310 l2->etype = ETH_P_IP;
311 }
312
313 return 0;
314 }
315
airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry * hwe,struct airoha_flow_data * data,bool egress)316 static int airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry *hwe,
317 struct airoha_flow_data *data,
318 bool egress)
319 {
320 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
321 struct airoha_foe_ipv4_tuple *t;
322
323 switch (type) {
324 case PPE_PKT_TYPE_IPV4_HNAPT:
325 if (egress) {
326 t = &hwe->ipv4.new_tuple;
327 break;
328 }
329 fallthrough;
330 case PPE_PKT_TYPE_IPV4_DSLITE:
331 case PPE_PKT_TYPE_IPV4_ROUTE:
332 t = &hwe->ipv4.orig_tuple;
333 break;
334 default:
335 WARN_ON_ONCE(1);
336 return -EINVAL;
337 }
338
339 t->src_ip = be32_to_cpu(data->v4.src_addr);
340 t->dest_ip = be32_to_cpu(data->v4.dst_addr);
341
342 if (type != PPE_PKT_TYPE_IPV4_ROUTE) {
343 t->src_port = be16_to_cpu(data->src_port);
344 t->dest_port = be16_to_cpu(data->dst_port);
345 }
346
347 return 0;
348 }
349
airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry * hwe,struct airoha_flow_data * data)350 static int airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry *hwe,
351 struct airoha_flow_data *data)
352
353 {
354 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
355 u32 *src, *dest;
356
357 switch (type) {
358 case PPE_PKT_TYPE_IPV6_ROUTE_5T:
359 case PPE_PKT_TYPE_IPV6_6RD:
360 hwe->ipv6.src_port = be16_to_cpu(data->src_port);
361 hwe->ipv6.dest_port = be16_to_cpu(data->dst_port);
362 fallthrough;
363 case PPE_PKT_TYPE_IPV6_ROUTE_3T:
364 src = hwe->ipv6.src_ip;
365 dest = hwe->ipv6.dest_ip;
366 break;
367 default:
368 WARN_ON_ONCE(1);
369 return -EINVAL;
370 }
371
372 ipv6_addr_be32_to_cpu(src, data->v6.src_addr.s6_addr32);
373 ipv6_addr_be32_to_cpu(dest, data->v6.dst_addr.s6_addr32);
374
375 return 0;
376 }
377
airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry * hwe)378 static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe)
379 {
380 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
381 u32 hash, hv1, hv2, hv3;
382
383 switch (type) {
384 case PPE_PKT_TYPE_IPV4_ROUTE:
385 case PPE_PKT_TYPE_IPV4_HNAPT:
386 hv1 = hwe->ipv4.orig_tuple.ports;
387 hv2 = hwe->ipv4.orig_tuple.dest_ip;
388 hv3 = hwe->ipv4.orig_tuple.src_ip;
389 break;
390 case PPE_PKT_TYPE_IPV6_ROUTE_3T:
391 case PPE_PKT_TYPE_IPV6_ROUTE_5T:
392 hv1 = hwe->ipv6.src_ip[3] ^ hwe->ipv6.dest_ip[3];
393 hv1 ^= hwe->ipv6.ports;
394
395 hv2 = hwe->ipv6.src_ip[2] ^ hwe->ipv6.dest_ip[2];
396 hv2 ^= hwe->ipv6.dest_ip[0];
397
398 hv3 = hwe->ipv6.src_ip[1] ^ hwe->ipv6.dest_ip[1];
399 hv3 ^= hwe->ipv6.src_ip[0];
400 break;
401 case PPE_PKT_TYPE_BRIDGE: {
402 struct airoha_foe_mac_info *l2 = &hwe->bridge.l2;
403
404 hv1 = l2->common.src_mac_hi & 0xffff;
405 hv1 = hv1 << 16 | l2->src_mac_lo;
406
407 hv2 = l2->common.dest_mac_lo;
408 hv2 = hv2 << 16;
409 hv2 = hv2 | ((l2->common.src_mac_hi & 0xffff0000) >> 16);
410
411 hv3 = l2->common.dest_mac_hi;
412 break;
413 }
414 case PPE_PKT_TYPE_IPV4_DSLITE:
415 case PPE_PKT_TYPE_IPV6_6RD:
416 default:
417 WARN_ON_ONCE(1);
418 return PPE_HASH_MASK;
419 }
420
421 hash = (hv1 & hv2) | ((~hv1) & hv3);
422 hash = (hash >> 24) | ((hash & 0xffffff) << 8);
423 hash ^= hv1 ^ hv2 ^ hv3;
424 hash ^= hash >> 16;
425 hash &= PPE_NUM_ENTRIES - 1;
426
427 return hash;
428 }
429
airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe * ppe,u32 hash)430 static u32 airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe, u32 hash)
431 {
432 if (!airoha_ppe2_is_enabled(ppe->eth))
433 return hash;
434
435 return hash >= PPE_STATS_NUM_ENTRIES ? hash - PPE1_STATS_NUM_ENTRIES
436 : hash;
437 }
438
airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe * ppe,struct airoha_npu * npu,int index)439 static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe,
440 struct airoha_npu *npu,
441 int index)
442 {
443 memset_io(&npu->stats[index], 0, sizeof(*npu->stats));
444 memset(&ppe->foe_stats[index], 0, sizeof(*ppe->foe_stats));
445 }
446
airoha_ppe_foe_flow_stats_reset(struct airoha_ppe * ppe,struct airoha_npu * npu)447 static void airoha_ppe_foe_flow_stats_reset(struct airoha_ppe *ppe,
448 struct airoha_npu *npu)
449 {
450 int i;
451
452 for (i = 0; i < PPE_STATS_NUM_ENTRIES; i++)
453 airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, i);
454 }
455
airoha_ppe_foe_flow_stats_update(struct airoha_ppe * ppe,struct airoha_npu * npu,struct airoha_foe_entry * hwe,u32 hash)456 static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe,
457 struct airoha_npu *npu,
458 struct airoha_foe_entry *hwe,
459 u32 hash)
460 {
461 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
462 u32 index, pse_port, val, *data, *ib2, *meter;
463 u8 nbq;
464
465 index = airoha_ppe_foe_get_flow_stats_index(ppe, hash);
466 if (index >= PPE_STATS_NUM_ENTRIES)
467 return;
468
469 if (type == PPE_PKT_TYPE_BRIDGE) {
470 data = &hwe->bridge.data;
471 ib2 = &hwe->bridge.ib2;
472 meter = &hwe->bridge.l2.meter;
473 } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
474 data = &hwe->ipv6.data;
475 ib2 = &hwe->ipv6.ib2;
476 meter = &hwe->ipv6.meter;
477 } else {
478 data = &hwe->ipv4.data;
479 ib2 = &hwe->ipv4.ib2;
480 meter = &hwe->ipv4.l2.meter;
481 }
482
483 airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, index);
484
485 val = FIELD_GET(AIROHA_FOE_CHANNEL | AIROHA_FOE_QID, *data);
486 *data = (*data & ~AIROHA_FOE_ACTDP) |
487 FIELD_PREP(AIROHA_FOE_ACTDP, val);
488
489 val = *ib2 & (AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
490 AIROHA_FOE_IB2_PSE_QOS | AIROHA_FOE_IB2_FAST_PATH);
491 *meter |= FIELD_PREP(AIROHA_FOE_TUNNEL_MTU, val);
492
493 pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2);
494 nbq = pse_port == 1 ? 6 : 5;
495 *ib2 &= ~(AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
496 AIROHA_FOE_IB2_PSE_QOS);
497 *ib2 |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, 6) |
498 FIELD_PREP(AIROHA_FOE_IB2_NBQ, nbq);
499 }
500
airoha_ppe_foe_get_entry(struct airoha_ppe * ppe,u32 hash)501 struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
502 u32 hash)
503 {
504 if (hash < PPE_SRAM_NUM_ENTRIES) {
505 u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry);
506 struct airoha_eth *eth = ppe->eth;
507 bool ppe2;
508 u32 val;
509 int i;
510
511 ppe2 = airoha_ppe2_is_enabled(ppe->eth) &&
512 hash >= PPE1_SRAM_NUM_ENTRIES;
513 airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2),
514 FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) |
515 PPE_SRAM_CTRL_REQ_MASK);
516 if (read_poll_timeout_atomic(airoha_fe_rr, val,
517 val & PPE_SRAM_CTRL_ACK_MASK,
518 10, 100, false, eth,
519 REG_PPE_RAM_CTRL(ppe2)))
520 return NULL;
521
522 for (i = 0; i < sizeof(struct airoha_foe_entry) / 4; i++)
523 hwe[i] = airoha_fe_rr(eth,
524 REG_PPE_RAM_ENTRY(ppe2, i));
525 }
526
527 return ppe->foe + hash * sizeof(struct airoha_foe_entry);
528 }
529
airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry * e,struct airoha_foe_entry * hwe)530 static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e,
531 struct airoha_foe_entry *hwe)
532 {
533 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
534 int len;
535
536 if ((hwe->ib1 ^ e->data.ib1) & AIROHA_FOE_IB1_BIND_UDP)
537 return false;
538
539 if (type > PPE_PKT_TYPE_IPV4_DSLITE)
540 len = offsetof(struct airoha_foe_entry, ipv6.data);
541 else
542 len = offsetof(struct airoha_foe_entry, ipv4.ib2);
543
544 return !memcmp(&e->data.d, &hwe->d, len - sizeof(hwe->ib1));
545 }
546
airoha_ppe_foe_commit_entry(struct airoha_ppe * ppe,struct airoha_foe_entry * e,u32 hash)547 static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
548 struct airoha_foe_entry *e,
549 u32 hash)
550 {
551 struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
552 u32 ts = airoha_ppe_get_timestamp(ppe);
553 struct airoha_eth *eth = ppe->eth;
554 struct airoha_npu *npu;
555 int err = 0;
556
557 memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1));
558 wmb();
559
560 e->ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
561 e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts);
562 hwe->ib1 = e->ib1;
563
564 rcu_read_lock();
565
566 npu = rcu_dereference(eth->npu);
567 if (!npu) {
568 err = -ENODEV;
569 goto unlock;
570 }
571
572 airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash);
573
574 if (hash < PPE_SRAM_NUM_ENTRIES) {
575 dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe);
576 bool ppe2 = airoha_ppe2_is_enabled(eth) &&
577 hash >= PPE1_SRAM_NUM_ENTRIES;
578
579 err = npu->ops.ppe_foe_commit_entry(npu, addr, sizeof(*hwe),
580 hash, ppe2);
581 }
582 unlock:
583 rcu_read_unlock();
584
585 return err;
586 }
587
airoha_ppe_foe_remove_flow(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)588 static void airoha_ppe_foe_remove_flow(struct airoha_ppe *ppe,
589 struct airoha_flow_table_entry *e)
590 {
591 lockdep_assert_held(&ppe_lock);
592
593 hlist_del_init(&e->list);
594 if (e->hash != 0xffff) {
595 e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
596 e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
597 AIROHA_FOE_STATE_INVALID);
598 airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash);
599 e->hash = 0xffff;
600 }
601 if (e->type == FLOW_TYPE_L2_SUBFLOW) {
602 hlist_del_init(&e->l2_subflow_node);
603 kfree(e);
604 }
605 }
606
airoha_ppe_foe_remove_l2_flow(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)607 static void airoha_ppe_foe_remove_l2_flow(struct airoha_ppe *ppe,
608 struct airoha_flow_table_entry *e)
609 {
610 struct hlist_head *head = &e->l2_flows;
611 struct hlist_node *n;
612
613 lockdep_assert_held(&ppe_lock);
614
615 rhashtable_remove_fast(&ppe->l2_flows, &e->l2_node,
616 airoha_l2_flow_table_params);
617 hlist_for_each_entry_safe(e, n, head, l2_subflow_node)
618 airoha_ppe_foe_remove_flow(ppe, e);
619 }
620
airoha_ppe_foe_flow_remove_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)621 static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
622 struct airoha_flow_table_entry *e)
623 {
624 spin_lock_bh(&ppe_lock);
625
626 if (e->type == FLOW_TYPE_L2)
627 airoha_ppe_foe_remove_l2_flow(ppe, e);
628 else
629 airoha_ppe_foe_remove_flow(ppe, e);
630
631 spin_unlock_bh(&ppe_lock);
632 }
633
634 static int
airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e,u32 hash)635 airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
636 struct airoha_flow_table_entry *e,
637 u32 hash)
638 {
639 u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP;
640 struct airoha_foe_entry *hwe_p, hwe;
641 struct airoha_flow_table_entry *f;
642 int type;
643
644 hwe_p = airoha_ppe_foe_get_entry(ppe, hash);
645 if (!hwe_p)
646 return -EINVAL;
647
648 f = kzalloc(sizeof(*f), GFP_ATOMIC);
649 if (!f)
650 return -ENOMEM;
651
652 hlist_add_head(&f->l2_subflow_node, &e->l2_flows);
653 f->type = FLOW_TYPE_L2_SUBFLOW;
654 f->hash = hash;
655
656 memcpy(&hwe, hwe_p, sizeof(*hwe_p));
657 hwe.ib1 = (hwe.ib1 & mask) | (e->data.ib1 & ~mask);
658
659 type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe.ib1);
660 if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
661 memcpy(&hwe.ipv6.l2, &e->data.bridge.l2, sizeof(hwe.ipv6.l2));
662 hwe.ipv6.ib2 = e->data.bridge.ib2;
663 /* setting smac_id to 0xf instruct the hw to keep original
664 * source mac address
665 */
666 hwe.ipv6.l2.src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID,
667 0xf);
668 } else {
669 memcpy(&hwe.bridge.l2, &e->data.bridge.l2,
670 sizeof(hwe.bridge.l2));
671 hwe.bridge.ib2 = e->data.bridge.ib2;
672 if (type == PPE_PKT_TYPE_IPV4_HNAPT)
673 memcpy(&hwe.ipv4.new_tuple, &hwe.ipv4.orig_tuple,
674 sizeof(hwe.ipv4.new_tuple));
675 }
676
677 hwe.bridge.data = e->data.bridge.data;
678 airoha_ppe_foe_commit_entry(ppe, &hwe, hash);
679
680 return 0;
681 }
682
airoha_ppe_foe_insert_entry(struct airoha_ppe * ppe,struct sk_buff * skb,u32 hash)683 static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
684 struct sk_buff *skb,
685 u32 hash)
686 {
687 struct airoha_flow_table_entry *e;
688 struct airoha_foe_bridge br = {};
689 struct airoha_foe_entry *hwe;
690 bool commit_done = false;
691 struct hlist_node *n;
692 u32 index, state;
693
694 spin_lock_bh(&ppe_lock);
695
696 hwe = airoha_ppe_foe_get_entry(ppe, hash);
697 if (!hwe)
698 goto unlock;
699
700 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
701 if (state == AIROHA_FOE_STATE_BIND)
702 goto unlock;
703
704 index = airoha_ppe_foe_get_entry_hash(hwe);
705 hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) {
706 if (e->type == FLOW_TYPE_L2_SUBFLOW) {
707 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
708 if (state != AIROHA_FOE_STATE_BIND) {
709 e->hash = 0xffff;
710 airoha_ppe_foe_remove_flow(ppe, e);
711 }
712 continue;
713 }
714
715 if (commit_done || !airoha_ppe_foe_compare_entry(e, hwe)) {
716 e->hash = 0xffff;
717 continue;
718 }
719
720 airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
721 commit_done = true;
722 e->hash = hash;
723 }
724
725 if (commit_done)
726 goto unlock;
727
728 airoha_ppe_foe_set_bridge_addrs(&br, eth_hdr(skb));
729 e = rhashtable_lookup_fast(&ppe->l2_flows, &br,
730 airoha_l2_flow_table_params);
731 if (e)
732 airoha_ppe_foe_commit_subflow_entry(ppe, e, hash);
733 unlock:
734 spin_unlock_bh(&ppe_lock);
735 }
736
737 static int
airoha_ppe_foe_l2_flow_commit_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)738 airoha_ppe_foe_l2_flow_commit_entry(struct airoha_ppe *ppe,
739 struct airoha_flow_table_entry *e)
740 {
741 struct airoha_flow_table_entry *prev;
742
743 e->type = FLOW_TYPE_L2;
744 prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &e->l2_node,
745 airoha_l2_flow_table_params);
746 if (!prev)
747 return 0;
748
749 if (IS_ERR(prev))
750 return PTR_ERR(prev);
751
752 return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
753 &e->l2_node,
754 airoha_l2_flow_table_params);
755 }
756
airoha_ppe_foe_flow_commit_entry(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)757 static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe,
758 struct airoha_flow_table_entry *e)
759 {
760 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
761 u32 hash;
762
763 if (type == PPE_PKT_TYPE_BRIDGE)
764 return airoha_ppe_foe_l2_flow_commit_entry(ppe, e);
765
766 hash = airoha_ppe_foe_get_entry_hash(&e->data);
767 e->type = FLOW_TYPE_L4;
768 e->hash = 0xffff;
769
770 spin_lock_bh(&ppe_lock);
771 hlist_add_head(&e->list, &ppe->foe_flow[hash]);
772 spin_unlock_bh(&ppe_lock);
773
774 return 0;
775 }
776
airoha_ppe_get_entry_idle_time(struct airoha_ppe * ppe,u32 ib1)777 static int airoha_ppe_get_entry_idle_time(struct airoha_ppe *ppe, u32 ib1)
778 {
779 u32 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
780 u32 ts, ts_mask, now = airoha_ppe_get_timestamp(ppe);
781 int idle;
782
783 if (state == AIROHA_FOE_STATE_BIND) {
784 ts = FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, ib1);
785 ts_mask = AIROHA_FOE_IB1_BIND_TIMESTAMP;
786 } else {
787 ts = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, ib1);
788 now = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, now);
789 ts_mask = AIROHA_FOE_IB1_UNBIND_TIMESTAMP;
790 }
791 idle = now - ts;
792
793 return idle < 0 ? idle + ts_mask + 1 : idle;
794 }
795
796 static void
airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)797 airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe,
798 struct airoha_flow_table_entry *e)
799 {
800 int min_idle = airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
801 struct airoha_flow_table_entry *iter;
802 struct hlist_node *n;
803
804 lockdep_assert_held(&ppe_lock);
805
806 hlist_for_each_entry_safe(iter, n, &e->l2_flows, l2_subflow_node) {
807 struct airoha_foe_entry *hwe;
808 u32 ib1, state;
809 int idle;
810
811 hwe = airoha_ppe_foe_get_entry(ppe, iter->hash);
812 if (!hwe)
813 continue;
814
815 ib1 = READ_ONCE(hwe->ib1);
816 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
817 if (state != AIROHA_FOE_STATE_BIND) {
818 iter->hash = 0xffff;
819 airoha_ppe_foe_remove_flow(ppe, iter);
820 continue;
821 }
822
823 idle = airoha_ppe_get_entry_idle_time(ppe, ib1);
824 if (idle >= min_idle)
825 continue;
826
827 min_idle = idle;
828 e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
829 e->data.ib1 |= ib1 & AIROHA_FOE_IB1_BIND_TIMESTAMP;
830 }
831 }
832
airoha_ppe_foe_flow_entry_update(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)833 static void airoha_ppe_foe_flow_entry_update(struct airoha_ppe *ppe,
834 struct airoha_flow_table_entry *e)
835 {
836 struct airoha_foe_entry *hwe_p, hwe = {};
837
838 spin_lock_bh(&ppe_lock);
839
840 if (e->type == FLOW_TYPE_L2) {
841 airoha_ppe_foe_flow_l2_entry_update(ppe, e);
842 goto unlock;
843 }
844
845 if (e->hash == 0xffff)
846 goto unlock;
847
848 hwe_p = airoha_ppe_foe_get_entry(ppe, e->hash);
849 if (!hwe_p)
850 goto unlock;
851
852 memcpy(&hwe, hwe_p, sizeof(*hwe_p));
853 if (!airoha_ppe_foe_compare_entry(e, &hwe)) {
854 e->hash = 0xffff;
855 goto unlock;
856 }
857
858 e->data.ib1 = hwe.ib1;
859 unlock:
860 spin_unlock_bh(&ppe_lock);
861 }
862
airoha_ppe_entry_idle_time(struct airoha_ppe * ppe,struct airoha_flow_table_entry * e)863 static int airoha_ppe_entry_idle_time(struct airoha_ppe *ppe,
864 struct airoha_flow_table_entry *e)
865 {
866 airoha_ppe_foe_flow_entry_update(ppe, e);
867
868 return airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
869 }
870
airoha_ppe_flow_offload_replace(struct airoha_gdm_port * port,struct flow_cls_offload * f)871 static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port,
872 struct flow_cls_offload *f)
873 {
874 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
875 struct airoha_eth *eth = port->qdma->eth;
876 struct airoha_flow_table_entry *e;
877 struct airoha_flow_data data = {};
878 struct net_device *odev = NULL;
879 struct flow_action_entry *act;
880 struct airoha_foe_entry hwe;
881 int err, i, offload_type;
882 u16 addr_type = 0;
883 u8 l4proto = 0;
884
885 if (rhashtable_lookup(ð->flow_table, &f->cookie,
886 airoha_flow_table_params))
887 return -EEXIST;
888
889 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
890 return -EOPNOTSUPP;
891
892 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
893 struct flow_match_control match;
894
895 flow_rule_match_control(rule, &match);
896 addr_type = match.key->addr_type;
897 if (flow_rule_has_control_flags(match.mask->flags,
898 f->common.extack))
899 return -EOPNOTSUPP;
900 } else {
901 return -EOPNOTSUPP;
902 }
903
904 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
905 struct flow_match_basic match;
906
907 flow_rule_match_basic(rule, &match);
908 l4proto = match.key->ip_proto;
909 } else {
910 return -EOPNOTSUPP;
911 }
912
913 switch (addr_type) {
914 case 0:
915 offload_type = PPE_PKT_TYPE_BRIDGE;
916 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
917 struct flow_match_eth_addrs match;
918
919 flow_rule_match_eth_addrs(rule, &match);
920 memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
921 memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
922 } else {
923 return -EOPNOTSUPP;
924 }
925 break;
926 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
927 offload_type = PPE_PKT_TYPE_IPV4_HNAPT;
928 break;
929 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
930 offload_type = PPE_PKT_TYPE_IPV6_ROUTE_5T;
931 break;
932 default:
933 return -EOPNOTSUPP;
934 }
935
936 flow_action_for_each(i, act, &rule->action) {
937 switch (act->id) {
938 case FLOW_ACTION_MANGLE:
939 if (offload_type == PPE_PKT_TYPE_BRIDGE)
940 return -EOPNOTSUPP;
941
942 if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
943 airoha_ppe_flow_mangle_eth(act, &data.eth);
944 break;
945 case FLOW_ACTION_REDIRECT:
946 odev = act->dev;
947 break;
948 case FLOW_ACTION_CSUM:
949 break;
950 case FLOW_ACTION_VLAN_PUSH:
951 if (data.vlan.num == 2 ||
952 act->vlan.proto != htons(ETH_P_8021Q))
953 return -EOPNOTSUPP;
954
955 data.vlan.hdr[data.vlan.num].id = act->vlan.vid;
956 data.vlan.hdr[data.vlan.num].proto = act->vlan.proto;
957 data.vlan.num++;
958 break;
959 case FLOW_ACTION_VLAN_POP:
960 break;
961 case FLOW_ACTION_PPPOE_PUSH:
962 break;
963 default:
964 return -EOPNOTSUPP;
965 }
966 }
967
968 if (!is_valid_ether_addr(data.eth.h_source) ||
969 !is_valid_ether_addr(data.eth.h_dest))
970 return -EINVAL;
971
972 err = airoha_ppe_foe_entry_prepare(eth, &hwe, odev, offload_type,
973 &data, l4proto);
974 if (err)
975 return err;
976
977 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
978 struct flow_match_ports ports;
979
980 if (offload_type == PPE_PKT_TYPE_BRIDGE)
981 return -EOPNOTSUPP;
982
983 flow_rule_match_ports(rule, &ports);
984 data.src_port = ports.key->src;
985 data.dst_port = ports.key->dst;
986 } else if (offload_type != PPE_PKT_TYPE_BRIDGE) {
987 return -EOPNOTSUPP;
988 }
989
990 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
991 struct flow_match_ipv4_addrs addrs;
992
993 flow_rule_match_ipv4_addrs(rule, &addrs);
994 data.v4.src_addr = addrs.key->src;
995 data.v4.dst_addr = addrs.key->dst;
996 airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, false);
997 }
998
999 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1000 struct flow_match_ipv6_addrs addrs;
1001
1002 flow_rule_match_ipv6_addrs(rule, &addrs);
1003
1004 data.v6.src_addr = addrs.key->src;
1005 data.v6.dst_addr = addrs.key->dst;
1006 airoha_ppe_foe_entry_set_ipv6_tuple(&hwe, &data);
1007 }
1008
1009 flow_action_for_each(i, act, &rule->action) {
1010 if (act->id != FLOW_ACTION_MANGLE)
1011 continue;
1012
1013 if (offload_type == PPE_PKT_TYPE_BRIDGE)
1014 return -EOPNOTSUPP;
1015
1016 switch (act->mangle.htype) {
1017 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
1018 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
1019 err = airoha_ppe_flow_mangle_ports(act, &data);
1020 break;
1021 case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
1022 err = airoha_ppe_flow_mangle_ipv4(act, &data);
1023 break;
1024 case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
1025 /* handled earlier */
1026 break;
1027 default:
1028 return -EOPNOTSUPP;
1029 }
1030
1031 if (err)
1032 return err;
1033 }
1034
1035 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1036 err = airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, true);
1037 if (err)
1038 return err;
1039 }
1040
1041 e = kzalloc(sizeof(*e), GFP_KERNEL);
1042 if (!e)
1043 return -ENOMEM;
1044
1045 e->cookie = f->cookie;
1046 memcpy(&e->data, &hwe, sizeof(e->data));
1047
1048 err = airoha_ppe_foe_flow_commit_entry(eth->ppe, e);
1049 if (err)
1050 goto free_entry;
1051
1052 err = rhashtable_insert_fast(ð->flow_table, &e->node,
1053 airoha_flow_table_params);
1054 if (err < 0)
1055 goto remove_foe_entry;
1056
1057 return 0;
1058
1059 remove_foe_entry:
1060 airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
1061 free_entry:
1062 kfree(e);
1063
1064 return err;
1065 }
1066
airoha_ppe_flow_offload_destroy(struct airoha_gdm_port * port,struct flow_cls_offload * f)1067 static int airoha_ppe_flow_offload_destroy(struct airoha_gdm_port *port,
1068 struct flow_cls_offload *f)
1069 {
1070 struct airoha_eth *eth = port->qdma->eth;
1071 struct airoha_flow_table_entry *e;
1072
1073 e = rhashtable_lookup(ð->flow_table, &f->cookie,
1074 airoha_flow_table_params);
1075 if (!e)
1076 return -ENOENT;
1077
1078 airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
1079 rhashtable_remove_fast(ð->flow_table, &e->node,
1080 airoha_flow_table_params);
1081 kfree(e);
1082
1083 return 0;
1084 }
1085
airoha_ppe_foe_entry_get_stats(struct airoha_ppe * ppe,u32 hash,struct airoha_foe_stats64 * stats)1086 void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
1087 struct airoha_foe_stats64 *stats)
1088 {
1089 u32 index = airoha_ppe_foe_get_flow_stats_index(ppe, hash);
1090 struct airoha_eth *eth = ppe->eth;
1091 struct airoha_npu *npu;
1092
1093 if (index >= PPE_STATS_NUM_ENTRIES)
1094 return;
1095
1096 rcu_read_lock();
1097
1098 npu = rcu_dereference(eth->npu);
1099 if (npu) {
1100 u64 packets = ppe->foe_stats[index].packets;
1101 u64 bytes = ppe->foe_stats[index].bytes;
1102 struct airoha_foe_stats npu_stats;
1103
1104 memcpy_fromio(&npu_stats, &npu->stats[index],
1105 sizeof(*npu->stats));
1106 stats->packets = packets << 32 | npu_stats.packets;
1107 stats->bytes = bytes << 32 | npu_stats.bytes;
1108 }
1109
1110 rcu_read_unlock();
1111 }
1112
airoha_ppe_flow_offload_stats(struct airoha_gdm_port * port,struct flow_cls_offload * f)1113 static int airoha_ppe_flow_offload_stats(struct airoha_gdm_port *port,
1114 struct flow_cls_offload *f)
1115 {
1116 struct airoha_eth *eth = port->qdma->eth;
1117 struct airoha_flow_table_entry *e;
1118 u32 idle;
1119
1120 e = rhashtable_lookup(ð->flow_table, &f->cookie,
1121 airoha_flow_table_params);
1122 if (!e)
1123 return -ENOENT;
1124
1125 idle = airoha_ppe_entry_idle_time(eth->ppe, e);
1126 f->stats.lastused = jiffies - idle * HZ;
1127
1128 if (e->hash != 0xffff) {
1129 struct airoha_foe_stats64 stats = {};
1130
1131 airoha_ppe_foe_entry_get_stats(eth->ppe, e->hash, &stats);
1132 f->stats.pkts += (stats.packets - e->stats.packets);
1133 f->stats.bytes += (stats.bytes - e->stats.bytes);
1134 e->stats = stats;
1135 }
1136
1137 return 0;
1138 }
1139
airoha_ppe_flow_offload_cmd(struct airoha_gdm_port * port,struct flow_cls_offload * f)1140 static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port,
1141 struct flow_cls_offload *f)
1142 {
1143 switch (f->command) {
1144 case FLOW_CLS_REPLACE:
1145 return airoha_ppe_flow_offload_replace(port, f);
1146 case FLOW_CLS_DESTROY:
1147 return airoha_ppe_flow_offload_destroy(port, f);
1148 case FLOW_CLS_STATS:
1149 return airoha_ppe_flow_offload_stats(port, f);
1150 default:
1151 break;
1152 }
1153
1154 return -EOPNOTSUPP;
1155 }
1156
airoha_ppe_flush_sram_entries(struct airoha_ppe * ppe,struct airoha_npu * npu)1157 static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe,
1158 struct airoha_npu *npu)
1159 {
1160 int i, sram_num_entries = PPE_SRAM_NUM_ENTRIES;
1161 struct airoha_foe_entry *hwe = ppe->foe;
1162
1163 if (airoha_ppe2_is_enabled(ppe->eth))
1164 sram_num_entries = sram_num_entries / 2;
1165
1166 for (i = 0; i < sram_num_entries; i++)
1167 memset(&hwe[i], 0, sizeof(*hwe));
1168
1169 return npu->ops.ppe_flush_sram_entries(npu, ppe->foe_dma,
1170 PPE_SRAM_NUM_ENTRIES);
1171 }
1172
airoha_ppe_npu_get(struct airoha_eth * eth)1173 static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
1174 {
1175 struct airoha_npu *npu = airoha_npu_get(eth->dev,
1176 ð->ppe->foe_stats_dma);
1177
1178 if (IS_ERR(npu)) {
1179 request_module("airoha-npu");
1180 npu = airoha_npu_get(eth->dev, ð->ppe->foe_stats_dma);
1181 }
1182
1183 return npu;
1184 }
1185
airoha_ppe_offload_setup(struct airoha_eth * eth)1186 static int airoha_ppe_offload_setup(struct airoha_eth *eth)
1187 {
1188 struct airoha_npu *npu = airoha_ppe_npu_get(eth);
1189 int err;
1190
1191 if (IS_ERR(npu))
1192 return PTR_ERR(npu);
1193
1194 err = npu->ops.ppe_init(npu);
1195 if (err)
1196 goto error_npu_put;
1197
1198 airoha_ppe_hw_init(eth->ppe);
1199 err = airoha_ppe_flush_sram_entries(eth->ppe, npu);
1200 if (err)
1201 goto error_npu_put;
1202
1203 airoha_ppe_foe_flow_stats_reset(eth->ppe, npu);
1204
1205 rcu_assign_pointer(eth->npu, npu);
1206 synchronize_rcu();
1207
1208 return 0;
1209
1210 error_npu_put:
1211 airoha_npu_put(npu);
1212
1213 return err;
1214 }
1215
airoha_ppe_setup_tc_block_cb(struct net_device * dev,void * type_data)1216 int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data)
1217 {
1218 struct airoha_gdm_port *port = netdev_priv(dev);
1219 struct flow_cls_offload *cls = type_data;
1220 struct airoha_eth *eth = port->qdma->eth;
1221 int err = 0;
1222
1223 mutex_lock(&flow_offload_mutex);
1224
1225 if (!eth->npu)
1226 err = airoha_ppe_offload_setup(eth);
1227 if (!err)
1228 err = airoha_ppe_flow_offload_cmd(port, cls);
1229
1230 mutex_unlock(&flow_offload_mutex);
1231
1232 return err;
1233 }
1234
airoha_ppe_check_skb(struct airoha_ppe * ppe,struct sk_buff * skb,u16 hash)1235 void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
1236 u16 hash)
1237 {
1238 u16 now, diff;
1239
1240 if (hash > PPE_HASH_MASK)
1241 return;
1242
1243 now = (u16)jiffies;
1244 diff = now - ppe->foe_check_time[hash];
1245 if (diff < HZ / 10)
1246 return;
1247
1248 ppe->foe_check_time[hash] = now;
1249 airoha_ppe_foe_insert_entry(ppe, skb, hash);
1250 }
1251
airoha_ppe_init_upd_mem(struct airoha_gdm_port * port)1252 void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port)
1253 {
1254 struct airoha_eth *eth = port->qdma->eth;
1255 struct net_device *dev = port->dev;
1256 const u8 *addr = dev->dev_addr;
1257 u32 val;
1258
1259 val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
1260 airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
1261 airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
1262 FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
1263 PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
1264
1265 val = (addr[0] << 8) | addr[1];
1266 airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
1267 airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
1268 FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
1269 FIELD_PREP(PPE_UPDMEM_OFFSET_MASK, 1) |
1270 PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
1271 }
1272
airoha_ppe_init(struct airoha_eth * eth)1273 int airoha_ppe_init(struct airoha_eth *eth)
1274 {
1275 struct airoha_ppe *ppe;
1276 int foe_size, err;
1277
1278 ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL);
1279 if (!ppe)
1280 return -ENOMEM;
1281
1282 foe_size = PPE_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
1283 ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma,
1284 GFP_KERNEL);
1285 if (!ppe->foe)
1286 return -ENOMEM;
1287
1288 ppe->eth = eth;
1289 eth->ppe = ppe;
1290
1291 ppe->foe_flow = devm_kzalloc(eth->dev,
1292 PPE_NUM_ENTRIES * sizeof(*ppe->foe_flow),
1293 GFP_KERNEL);
1294 if (!ppe->foe_flow)
1295 return -ENOMEM;
1296
1297 foe_size = PPE_STATS_NUM_ENTRIES * sizeof(*ppe->foe_stats);
1298 if (foe_size) {
1299 ppe->foe_stats = dmam_alloc_coherent(eth->dev, foe_size,
1300 &ppe->foe_stats_dma,
1301 GFP_KERNEL);
1302 if (!ppe->foe_stats)
1303 return -ENOMEM;
1304 }
1305
1306 err = rhashtable_init(ð->flow_table, &airoha_flow_table_params);
1307 if (err)
1308 return err;
1309
1310 err = rhashtable_init(&ppe->l2_flows, &airoha_l2_flow_table_params);
1311 if (err)
1312 goto error_flow_table_destroy;
1313
1314 err = airoha_ppe_debugfs_init(ppe);
1315 if (err)
1316 goto error_l2_flow_table_destroy;
1317
1318 return 0;
1319
1320 error_l2_flow_table_destroy:
1321 rhashtable_destroy(&ppe->l2_flows);
1322 error_flow_table_destroy:
1323 rhashtable_destroy(ð->flow_table);
1324
1325 return err;
1326 }
1327
airoha_ppe_deinit(struct airoha_eth * eth)1328 void airoha_ppe_deinit(struct airoha_eth *eth)
1329 {
1330 struct airoha_npu *npu;
1331
1332 rcu_read_lock();
1333 npu = rcu_dereference(eth->npu);
1334 if (npu) {
1335 npu->ops.ppe_deinit(npu);
1336 airoha_npu_put(npu);
1337 }
1338 rcu_read_unlock();
1339
1340 rhashtable_destroy(ð->ppe->l2_flows);
1341 rhashtable_destroy(ð->flow_table);
1342 debugfs_remove(eth->ppe->debugfs_dir);
1343 }
1344