1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2025 AIROHA Inc 4 * Author: Lorenzo Bianconi <lorenzo@kernel.org> 5 */ 6 7 #include <linux/ip.h> 8 #include <linux/ipv6.h> 9 #include <linux/rhashtable.h> 10 #include <net/ipv6.h> 11 #include <net/pkt_cls.h> 12 13 #include "airoha_npu.h" 14 #include "airoha_regs.h" 15 #include "airoha_eth.h" 16 17 static DEFINE_MUTEX(flow_offload_mutex); 18 static DEFINE_SPINLOCK(ppe_lock); 19 20 static const struct rhashtable_params airoha_flow_table_params = { 21 .head_offset = offsetof(struct airoha_flow_table_entry, node), 22 .key_offset = offsetof(struct airoha_flow_table_entry, cookie), 23 .key_len = sizeof(unsigned long), 24 .automatic_shrinking = true, 25 }; 26 27 static const struct rhashtable_params airoha_l2_flow_table_params = { 28 .head_offset = offsetof(struct airoha_flow_table_entry, l2_node), 29 .key_offset = offsetof(struct airoha_flow_table_entry, data.bridge), 30 .key_len = 2 * ETH_ALEN, 31 .automatic_shrinking = true, 32 }; 33 34 static bool airoha_ppe2_is_enabled(struct airoha_eth *eth) 35 { 36 return airoha_fe_rr(eth, REG_PPE_GLO_CFG(1)) & PPE_GLO_CFG_EN_MASK; 37 } 38 39 static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe) 40 { 41 u16 timestamp = airoha_fe_rr(ppe->eth, REG_FE_FOE_TS); 42 43 return FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, timestamp); 44 } 45 46 static void airoha_ppe_hw_init(struct airoha_ppe *ppe) 47 { 48 u32 sram_tb_size, sram_num_entries, dram_num_entries; 49 struct airoha_eth *eth = ppe->eth; 50 int i; 51 52 sram_tb_size = PPE_SRAM_NUM_ENTRIES * sizeof(struct airoha_foe_entry); 53 dram_num_entries = PPE_RAM_NUM_ENTRIES_SHIFT(PPE_DRAM_NUM_ENTRIES); 54 55 for (i = 0; i < PPE_NUM; i++) { 56 int p; 57 58 airoha_fe_wr(eth, REG_PPE_TB_BASE(i), 59 ppe->foe_dma + sram_tb_size); 60 61 airoha_fe_rmw(eth, REG_PPE_BND_AGE0(i), 62 PPE_BIND_AGE0_DELTA_NON_L4 | 63 PPE_BIND_AGE0_DELTA_UDP, 64 FIELD_PREP(PPE_BIND_AGE0_DELTA_NON_L4, 1) | 65 FIELD_PREP(PPE_BIND_AGE0_DELTA_UDP, 12)); 66 airoha_fe_rmw(eth, REG_PPE_BND_AGE1(i), 67 PPE_BIND_AGE1_DELTA_TCP_FIN | 68 PPE_BIND_AGE1_DELTA_TCP, 69 FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP_FIN, 1) | 70 FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP, 7)); 71 72 airoha_fe_rmw(eth, REG_PPE_TB_HASH_CFG(i), 73 PPE_SRAM_TABLE_EN_MASK | 74 PPE_SRAM_HASH1_EN_MASK | 75 PPE_DRAM_TABLE_EN_MASK | 76 PPE_SRAM_HASH0_MODE_MASK | 77 PPE_SRAM_HASH1_MODE_MASK | 78 PPE_DRAM_HASH0_MODE_MASK | 79 PPE_DRAM_HASH1_MODE_MASK, 80 FIELD_PREP(PPE_SRAM_TABLE_EN_MASK, 1) | 81 FIELD_PREP(PPE_SRAM_HASH1_EN_MASK, 1) | 82 FIELD_PREP(PPE_SRAM_HASH1_MODE_MASK, 1) | 83 FIELD_PREP(PPE_DRAM_HASH1_MODE_MASK, 3)); 84 85 airoha_fe_rmw(eth, REG_PPE_TB_CFG(i), 86 PPE_TB_CFG_SEARCH_MISS_MASK | 87 PPE_TB_CFG_KEEPALIVE_MASK | 88 PPE_TB_ENTRY_SIZE_MASK, 89 FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) | 90 FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0)); 91 92 airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED); 93 94 for (p = 0; p < ARRAY_SIZE(eth->ports); p++) 95 airoha_fe_rmw(eth, REG_PPE_MTU(i, p), 96 FP0_EGRESS_MTU_MASK | 97 FP1_EGRESS_MTU_MASK, 98 FIELD_PREP(FP0_EGRESS_MTU_MASK, 99 AIROHA_MAX_MTU) | 100 FIELD_PREP(FP1_EGRESS_MTU_MASK, 101 AIROHA_MAX_MTU)); 102 } 103 104 if (airoha_ppe2_is_enabled(eth)) { 105 sram_num_entries = 106 PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_DATA_ENTRIES); 107 airoha_fe_rmw(eth, REG_PPE_TB_CFG(0), 108 PPE_SRAM_TB_NUM_ENTRY_MASK | 109 PPE_DRAM_TB_NUM_ENTRY_MASK, 110 FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK, 111 sram_num_entries) | 112 FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK, 113 dram_num_entries)); 114 airoha_fe_rmw(eth, REG_PPE_TB_CFG(1), 115 PPE_SRAM_TB_NUM_ENTRY_MASK | 116 PPE_DRAM_TB_NUM_ENTRY_MASK, 117 FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK, 118 sram_num_entries) | 119 FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK, 120 dram_num_entries)); 121 } else { 122 sram_num_entries = 123 PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_DATA_ENTRIES); 124 airoha_fe_rmw(eth, REG_PPE_TB_CFG(0), 125 PPE_SRAM_TB_NUM_ENTRY_MASK | 126 PPE_DRAM_TB_NUM_ENTRY_MASK, 127 FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK, 128 sram_num_entries) | 129 FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK, 130 dram_num_entries)); 131 } 132 } 133 134 static void airoha_ppe_flow_mangle_eth(const struct flow_action_entry *act, void *eth) 135 { 136 void *dest = eth + act->mangle.offset; 137 const void *src = &act->mangle.val; 138 139 if (act->mangle.offset > 8) 140 return; 141 142 if (act->mangle.mask == 0xffff) { 143 src += 2; 144 dest += 2; 145 } 146 147 memcpy(dest, src, act->mangle.mask ? 2 : 4); 148 } 149 150 static int airoha_ppe_flow_mangle_ports(const struct flow_action_entry *act, 151 struct airoha_flow_data *data) 152 { 153 u32 val = be32_to_cpu((__force __be32)act->mangle.val); 154 155 switch (act->mangle.offset) { 156 case 0: 157 if ((__force __be32)act->mangle.mask == ~cpu_to_be32(0xffff)) 158 data->dst_port = cpu_to_be16(val); 159 else 160 data->src_port = cpu_to_be16(val >> 16); 161 break; 162 case 2: 163 data->dst_port = cpu_to_be16(val); 164 break; 165 default: 166 return -EINVAL; 167 } 168 169 return 0; 170 } 171 172 static int airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry *act, 173 struct airoha_flow_data *data) 174 { 175 __be32 *dest; 176 177 switch (act->mangle.offset) { 178 case offsetof(struct iphdr, saddr): 179 dest = &data->v4.src_addr; 180 break; 181 case offsetof(struct iphdr, daddr): 182 dest = &data->v4.dst_addr; 183 break; 184 default: 185 return -EINVAL; 186 } 187 188 memcpy(dest, &act->mangle.val, sizeof(u32)); 189 190 return 0; 191 } 192 193 static int airoha_get_dsa_port(struct net_device **dev) 194 { 195 #if IS_ENABLED(CONFIG_NET_DSA) 196 struct dsa_port *dp = dsa_port_from_netdev(*dev); 197 198 if (IS_ERR(dp)) 199 return -ENODEV; 200 201 *dev = dsa_port_to_conduit(dp); 202 return dp->index; 203 #else 204 return -ENODEV; 205 #endif 206 } 207 208 static void airoha_ppe_foe_set_bridge_addrs(struct airoha_foe_bridge *br, 209 struct ethhdr *eh) 210 { 211 br->dest_mac_hi = get_unaligned_be32(eh->h_dest); 212 br->dest_mac_lo = get_unaligned_be16(eh->h_dest + 4); 213 br->src_mac_hi = get_unaligned_be16(eh->h_source); 214 br->src_mac_lo = get_unaligned_be32(eh->h_source + 2); 215 } 216 217 static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth, 218 struct airoha_foe_entry *hwe, 219 struct net_device *dev, int type, 220 struct airoha_flow_data *data, 221 int l4proto) 222 { 223 int dsa_port = airoha_get_dsa_port(&dev); 224 struct airoha_foe_mac_info_common *l2; 225 u32 qdata, ports_pad, val; 226 u8 smac_id = 0xf; 227 228 memset(hwe, 0, sizeof(*hwe)); 229 230 val = FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE, AIROHA_FOE_STATE_BIND) | 231 FIELD_PREP(AIROHA_FOE_IB1_BIND_PACKET_TYPE, type) | 232 FIELD_PREP(AIROHA_FOE_IB1_BIND_UDP, l4proto == IPPROTO_UDP) | 233 FIELD_PREP(AIROHA_FOE_IB1_BIND_VLAN_LAYER, data->vlan.num) | 234 FIELD_PREP(AIROHA_FOE_IB1_BIND_VPM, data->vlan.num) | 235 FIELD_PREP(AIROHA_FOE_IB1_BIND_PPPOE, data->pppoe.num) | 236 AIROHA_FOE_IB1_BIND_TTL; 237 hwe->ib1 = val; 238 239 val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f) | 240 AIROHA_FOE_IB2_PSE_QOS; 241 if (dsa_port >= 0) 242 val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, dsa_port); 243 244 if (dev) { 245 struct airoha_gdm_port *port = netdev_priv(dev); 246 u8 pse_port; 247 248 if (!airoha_is_valid_gdm_port(eth, port)) 249 return -EINVAL; 250 251 if (dsa_port >= 0) 252 pse_port = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id; 253 else 254 pse_port = 2; /* uplink relies on GDM2 loopback */ 255 val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port); 256 257 /* For downlink traffic consume SRAM memory for hw forwarding 258 * descriptors queue. 259 */ 260 if (airhoa_is_lan_gdm_port(port)) 261 val |= AIROHA_FOE_IB2_FAST_PATH; 262 263 smac_id = port->id; 264 } 265 266 if (is_multicast_ether_addr(data->eth.h_dest)) 267 val |= AIROHA_FOE_IB2_MULTICAST; 268 269 ports_pad = 0xa5a5a500 | (l4proto & 0xff); 270 if (type == PPE_PKT_TYPE_IPV4_ROUTE) 271 hwe->ipv4.orig_tuple.ports = ports_pad; 272 if (type == PPE_PKT_TYPE_IPV6_ROUTE_3T) 273 hwe->ipv6.ports = ports_pad; 274 275 qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f); 276 if (type == PPE_PKT_TYPE_BRIDGE) { 277 airoha_ppe_foe_set_bridge_addrs(&hwe->bridge, &data->eth); 278 hwe->bridge.data = qdata; 279 hwe->bridge.ib2 = val; 280 l2 = &hwe->bridge.l2.common; 281 } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) { 282 hwe->ipv6.data = qdata; 283 hwe->ipv6.ib2 = val; 284 l2 = &hwe->ipv6.l2; 285 l2->etype = ETH_P_IPV6; 286 } else { 287 hwe->ipv4.data = qdata; 288 hwe->ipv4.ib2 = val; 289 l2 = &hwe->ipv4.l2.common; 290 l2->etype = ETH_P_IP; 291 } 292 293 l2->dest_mac_hi = get_unaligned_be32(data->eth.h_dest); 294 l2->dest_mac_lo = get_unaligned_be16(data->eth.h_dest + 4); 295 if (type <= PPE_PKT_TYPE_IPV4_DSLITE) { 296 struct airoha_foe_mac_info *mac_info; 297 298 l2->src_mac_hi = get_unaligned_be32(data->eth.h_source); 299 hwe->ipv4.l2.src_mac_lo = 300 get_unaligned_be16(data->eth.h_source + 4); 301 302 mac_info = (struct airoha_foe_mac_info *)l2; 303 mac_info->pppoe_id = data->pppoe.sid; 304 } else { 305 l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id) | 306 FIELD_PREP(AIROHA_FOE_MAC_PPPOE_ID, 307 data->pppoe.sid); 308 } 309 310 if (data->vlan.num) { 311 l2->vlan1 = data->vlan.hdr[0].id; 312 if (data->vlan.num == 2) 313 l2->vlan2 = data->vlan.hdr[1].id; 314 } 315 316 if (dsa_port >= 0) { 317 l2->etype = BIT(dsa_port); 318 l2->etype |= !data->vlan.num ? BIT(15) : 0; 319 } else if (data->pppoe.num) { 320 l2->etype = ETH_P_PPP_SES; 321 } 322 323 return 0; 324 } 325 326 static int airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry *hwe, 327 struct airoha_flow_data *data, 328 bool egress) 329 { 330 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1); 331 struct airoha_foe_ipv4_tuple *t; 332 333 switch (type) { 334 case PPE_PKT_TYPE_IPV4_HNAPT: 335 if (egress) { 336 t = &hwe->ipv4.new_tuple; 337 break; 338 } 339 fallthrough; 340 case PPE_PKT_TYPE_IPV4_DSLITE: 341 case PPE_PKT_TYPE_IPV4_ROUTE: 342 t = &hwe->ipv4.orig_tuple; 343 break; 344 default: 345 WARN_ON_ONCE(1); 346 return -EINVAL; 347 } 348 349 t->src_ip = be32_to_cpu(data->v4.src_addr); 350 t->dest_ip = be32_to_cpu(data->v4.dst_addr); 351 352 if (type != PPE_PKT_TYPE_IPV4_ROUTE) { 353 t->src_port = be16_to_cpu(data->src_port); 354 t->dest_port = be16_to_cpu(data->dst_port); 355 } 356 357 return 0; 358 } 359 360 static int airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry *hwe, 361 struct airoha_flow_data *data) 362 363 { 364 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1); 365 u32 *src, *dest; 366 367 switch (type) { 368 case PPE_PKT_TYPE_IPV6_ROUTE_5T: 369 case PPE_PKT_TYPE_IPV6_6RD: 370 hwe->ipv6.src_port = be16_to_cpu(data->src_port); 371 hwe->ipv6.dest_port = be16_to_cpu(data->dst_port); 372 fallthrough; 373 case PPE_PKT_TYPE_IPV6_ROUTE_3T: 374 src = hwe->ipv6.src_ip; 375 dest = hwe->ipv6.dest_ip; 376 break; 377 default: 378 WARN_ON_ONCE(1); 379 return -EINVAL; 380 } 381 382 ipv6_addr_be32_to_cpu(src, data->v6.src_addr.s6_addr32); 383 ipv6_addr_be32_to_cpu(dest, data->v6.dst_addr.s6_addr32); 384 385 return 0; 386 } 387 388 static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe) 389 { 390 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1); 391 u32 hash, hv1, hv2, hv3; 392 393 switch (type) { 394 case PPE_PKT_TYPE_IPV4_ROUTE: 395 case PPE_PKT_TYPE_IPV4_HNAPT: 396 hv1 = hwe->ipv4.orig_tuple.ports; 397 hv2 = hwe->ipv4.orig_tuple.dest_ip; 398 hv3 = hwe->ipv4.orig_tuple.src_ip; 399 break; 400 case PPE_PKT_TYPE_IPV6_ROUTE_3T: 401 case PPE_PKT_TYPE_IPV6_ROUTE_5T: 402 hv1 = hwe->ipv6.src_ip[3] ^ hwe->ipv6.dest_ip[3]; 403 hv1 ^= hwe->ipv6.ports; 404 405 hv2 = hwe->ipv6.src_ip[2] ^ hwe->ipv6.dest_ip[2]; 406 hv2 ^= hwe->ipv6.dest_ip[0]; 407 408 hv3 = hwe->ipv6.src_ip[1] ^ hwe->ipv6.dest_ip[1]; 409 hv3 ^= hwe->ipv6.src_ip[0]; 410 break; 411 case PPE_PKT_TYPE_BRIDGE: { 412 struct airoha_foe_mac_info *l2 = &hwe->bridge.l2; 413 414 hv1 = l2->common.src_mac_hi & 0xffff; 415 hv1 = hv1 << 16 | l2->src_mac_lo; 416 417 hv2 = l2->common.dest_mac_lo; 418 hv2 = hv2 << 16; 419 hv2 = hv2 | ((l2->common.src_mac_hi & 0xffff0000) >> 16); 420 421 hv3 = l2->common.dest_mac_hi; 422 break; 423 } 424 case PPE_PKT_TYPE_IPV4_DSLITE: 425 case PPE_PKT_TYPE_IPV6_6RD: 426 default: 427 WARN_ON_ONCE(1); 428 return PPE_HASH_MASK; 429 } 430 431 hash = (hv1 & hv2) | ((~hv1) & hv3); 432 hash = (hash >> 24) | ((hash & 0xffffff) << 8); 433 hash ^= hv1 ^ hv2 ^ hv3; 434 hash ^= hash >> 16; 435 hash &= PPE_NUM_ENTRIES - 1; 436 437 return hash; 438 } 439 440 static u32 airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe, u32 hash) 441 { 442 if (!airoha_ppe2_is_enabled(ppe->eth)) 443 return hash; 444 445 return hash >= PPE_STATS_NUM_ENTRIES ? hash - PPE1_STATS_NUM_ENTRIES 446 : hash; 447 } 448 449 static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe, 450 struct airoha_npu *npu, 451 int index) 452 { 453 memset_io(&npu->stats[index], 0, sizeof(*npu->stats)); 454 memset(&ppe->foe_stats[index], 0, sizeof(*ppe->foe_stats)); 455 } 456 457 static void airoha_ppe_foe_flow_stats_reset(struct airoha_ppe *ppe, 458 struct airoha_npu *npu) 459 { 460 int i; 461 462 for (i = 0; i < PPE_STATS_NUM_ENTRIES; i++) 463 airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, i); 464 } 465 466 static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe, 467 struct airoha_npu *npu, 468 struct airoha_foe_entry *hwe, 469 u32 hash) 470 { 471 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1); 472 u32 index, pse_port, val, *data, *ib2, *meter; 473 u8 nbq; 474 475 index = airoha_ppe_foe_get_flow_stats_index(ppe, hash); 476 if (index >= PPE_STATS_NUM_ENTRIES) 477 return; 478 479 if (type == PPE_PKT_TYPE_BRIDGE) { 480 data = &hwe->bridge.data; 481 ib2 = &hwe->bridge.ib2; 482 meter = &hwe->bridge.l2.meter; 483 } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) { 484 data = &hwe->ipv6.data; 485 ib2 = &hwe->ipv6.ib2; 486 meter = &hwe->ipv6.meter; 487 } else { 488 data = &hwe->ipv4.data; 489 ib2 = &hwe->ipv4.ib2; 490 meter = &hwe->ipv4.l2.meter; 491 } 492 493 airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, index); 494 495 val = FIELD_GET(AIROHA_FOE_CHANNEL | AIROHA_FOE_QID, *data); 496 *data = (*data & ~AIROHA_FOE_ACTDP) | 497 FIELD_PREP(AIROHA_FOE_ACTDP, val); 498 499 val = *ib2 & (AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT | 500 AIROHA_FOE_IB2_PSE_QOS | AIROHA_FOE_IB2_FAST_PATH); 501 *meter |= FIELD_PREP(AIROHA_FOE_TUNNEL_MTU, val); 502 503 pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2); 504 nbq = pse_port == 1 ? 6 : 5; 505 *ib2 &= ~(AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT | 506 AIROHA_FOE_IB2_PSE_QOS); 507 *ib2 |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, 6) | 508 FIELD_PREP(AIROHA_FOE_IB2_NBQ, nbq); 509 } 510 511 struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe, 512 u32 hash) 513 { 514 if (hash < PPE_SRAM_NUM_ENTRIES) { 515 u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry); 516 struct airoha_eth *eth = ppe->eth; 517 bool ppe2; 518 u32 val; 519 int i; 520 521 ppe2 = airoha_ppe2_is_enabled(ppe->eth) && 522 hash >= PPE1_SRAM_NUM_ENTRIES; 523 airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2), 524 FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) | 525 PPE_SRAM_CTRL_REQ_MASK); 526 if (read_poll_timeout_atomic(airoha_fe_rr, val, 527 val & PPE_SRAM_CTRL_ACK_MASK, 528 10, 100, false, eth, 529 REG_PPE_RAM_CTRL(ppe2))) 530 return NULL; 531 532 for (i = 0; i < sizeof(struct airoha_foe_entry) / 4; i++) 533 hwe[i] = airoha_fe_rr(eth, 534 REG_PPE_RAM_ENTRY(ppe2, i)); 535 } 536 537 return ppe->foe + hash * sizeof(struct airoha_foe_entry); 538 } 539 540 static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e, 541 struct airoha_foe_entry *hwe) 542 { 543 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1); 544 int len; 545 546 if ((hwe->ib1 ^ e->data.ib1) & AIROHA_FOE_IB1_BIND_UDP) 547 return false; 548 549 if (type > PPE_PKT_TYPE_IPV4_DSLITE) 550 len = offsetof(struct airoha_foe_entry, ipv6.data); 551 else 552 len = offsetof(struct airoha_foe_entry, ipv4.ib2); 553 554 return !memcmp(&e->data.d, &hwe->d, len - sizeof(hwe->ib1)); 555 } 556 557 static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe, 558 struct airoha_foe_entry *e, 559 u32 hash) 560 { 561 struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe); 562 u32 ts = airoha_ppe_get_timestamp(ppe); 563 struct airoha_eth *eth = ppe->eth; 564 struct airoha_npu *npu; 565 int err = 0; 566 567 memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1)); 568 wmb(); 569 570 e->ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP; 571 e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts); 572 hwe->ib1 = e->ib1; 573 574 rcu_read_lock(); 575 576 npu = rcu_dereference(eth->npu); 577 if (!npu) { 578 err = -ENODEV; 579 goto unlock; 580 } 581 582 airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash); 583 584 if (hash < PPE_SRAM_NUM_ENTRIES) { 585 dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe); 586 bool ppe2 = airoha_ppe2_is_enabled(eth) && 587 hash >= PPE1_SRAM_NUM_ENTRIES; 588 589 err = npu->ops.ppe_foe_commit_entry(npu, addr, sizeof(*hwe), 590 hash, ppe2); 591 } 592 unlock: 593 rcu_read_unlock(); 594 595 return err; 596 } 597 598 static void airoha_ppe_foe_remove_flow(struct airoha_ppe *ppe, 599 struct airoha_flow_table_entry *e) 600 { 601 lockdep_assert_held(&ppe_lock); 602 603 hlist_del_init(&e->list); 604 if (e->hash != 0xffff) { 605 e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE; 606 e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE, 607 AIROHA_FOE_STATE_INVALID); 608 airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash); 609 e->hash = 0xffff; 610 } 611 if (e->type == FLOW_TYPE_L2_SUBFLOW) { 612 hlist_del_init(&e->l2_subflow_node); 613 kfree(e); 614 } 615 } 616 617 static void airoha_ppe_foe_remove_l2_flow(struct airoha_ppe *ppe, 618 struct airoha_flow_table_entry *e) 619 { 620 struct hlist_head *head = &e->l2_flows; 621 struct hlist_node *n; 622 623 lockdep_assert_held(&ppe_lock); 624 625 rhashtable_remove_fast(&ppe->l2_flows, &e->l2_node, 626 airoha_l2_flow_table_params); 627 hlist_for_each_entry_safe(e, n, head, l2_subflow_node) 628 airoha_ppe_foe_remove_flow(ppe, e); 629 } 630 631 static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe, 632 struct airoha_flow_table_entry *e) 633 { 634 spin_lock_bh(&ppe_lock); 635 636 if (e->type == FLOW_TYPE_L2) 637 airoha_ppe_foe_remove_l2_flow(ppe, e); 638 else 639 airoha_ppe_foe_remove_flow(ppe, e); 640 641 spin_unlock_bh(&ppe_lock); 642 } 643 644 static int 645 airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe, 646 struct airoha_flow_table_entry *e, 647 u32 hash) 648 { 649 u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP; 650 struct airoha_foe_entry *hwe_p, hwe; 651 struct airoha_flow_table_entry *f; 652 int type; 653 654 hwe_p = airoha_ppe_foe_get_entry(ppe, hash); 655 if (!hwe_p) 656 return -EINVAL; 657 658 f = kzalloc(sizeof(*f), GFP_ATOMIC); 659 if (!f) 660 return -ENOMEM; 661 662 hlist_add_head(&f->l2_subflow_node, &e->l2_flows); 663 f->type = FLOW_TYPE_L2_SUBFLOW; 664 f->hash = hash; 665 666 memcpy(&hwe, hwe_p, sizeof(*hwe_p)); 667 hwe.ib1 = (hwe.ib1 & mask) | (e->data.ib1 & ~mask); 668 669 type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe.ib1); 670 if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) { 671 memcpy(&hwe.ipv6.l2, &e->data.bridge.l2, sizeof(hwe.ipv6.l2)); 672 hwe.ipv6.ib2 = e->data.bridge.ib2; 673 /* setting smac_id to 0xf instruct the hw to keep original 674 * source mac address 675 */ 676 hwe.ipv6.l2.src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, 677 0xf); 678 } else { 679 memcpy(&hwe.bridge.l2, &e->data.bridge.l2, 680 sizeof(hwe.bridge.l2)); 681 hwe.bridge.ib2 = e->data.bridge.ib2; 682 if (type == PPE_PKT_TYPE_IPV4_HNAPT) 683 memcpy(&hwe.ipv4.new_tuple, &hwe.ipv4.orig_tuple, 684 sizeof(hwe.ipv4.new_tuple)); 685 } 686 687 hwe.bridge.data = e->data.bridge.data; 688 airoha_ppe_foe_commit_entry(ppe, &hwe, hash); 689 690 return 0; 691 } 692 693 static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe, 694 struct sk_buff *skb, 695 u32 hash) 696 { 697 struct airoha_flow_table_entry *e; 698 struct airoha_foe_bridge br = {}; 699 struct airoha_foe_entry *hwe; 700 bool commit_done = false; 701 struct hlist_node *n; 702 u32 index, state; 703 704 spin_lock_bh(&ppe_lock); 705 706 hwe = airoha_ppe_foe_get_entry(ppe, hash); 707 if (!hwe) 708 goto unlock; 709 710 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1); 711 if (state == AIROHA_FOE_STATE_BIND) 712 goto unlock; 713 714 index = airoha_ppe_foe_get_entry_hash(hwe); 715 hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) { 716 if (e->type == FLOW_TYPE_L2_SUBFLOW) { 717 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1); 718 if (state != AIROHA_FOE_STATE_BIND) { 719 e->hash = 0xffff; 720 airoha_ppe_foe_remove_flow(ppe, e); 721 } 722 continue; 723 } 724 725 if (commit_done || !airoha_ppe_foe_compare_entry(e, hwe)) { 726 e->hash = 0xffff; 727 continue; 728 } 729 730 airoha_ppe_foe_commit_entry(ppe, &e->data, hash); 731 commit_done = true; 732 e->hash = hash; 733 } 734 735 if (commit_done) 736 goto unlock; 737 738 airoha_ppe_foe_set_bridge_addrs(&br, eth_hdr(skb)); 739 e = rhashtable_lookup_fast(&ppe->l2_flows, &br, 740 airoha_l2_flow_table_params); 741 if (e) 742 airoha_ppe_foe_commit_subflow_entry(ppe, e, hash); 743 unlock: 744 spin_unlock_bh(&ppe_lock); 745 } 746 747 static int 748 airoha_ppe_foe_l2_flow_commit_entry(struct airoha_ppe *ppe, 749 struct airoha_flow_table_entry *e) 750 { 751 struct airoha_flow_table_entry *prev; 752 753 e->type = FLOW_TYPE_L2; 754 prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &e->l2_node, 755 airoha_l2_flow_table_params); 756 if (!prev) 757 return 0; 758 759 if (IS_ERR(prev)) 760 return PTR_ERR(prev); 761 762 return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node, 763 &e->l2_node, 764 airoha_l2_flow_table_params); 765 } 766 767 static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe, 768 struct airoha_flow_table_entry *e) 769 { 770 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1); 771 u32 hash; 772 773 if (type == PPE_PKT_TYPE_BRIDGE) 774 return airoha_ppe_foe_l2_flow_commit_entry(ppe, e); 775 776 hash = airoha_ppe_foe_get_entry_hash(&e->data); 777 e->type = FLOW_TYPE_L4; 778 e->hash = 0xffff; 779 780 spin_lock_bh(&ppe_lock); 781 hlist_add_head(&e->list, &ppe->foe_flow[hash]); 782 spin_unlock_bh(&ppe_lock); 783 784 return 0; 785 } 786 787 static int airoha_ppe_get_entry_idle_time(struct airoha_ppe *ppe, u32 ib1) 788 { 789 u32 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1); 790 u32 ts, ts_mask, now = airoha_ppe_get_timestamp(ppe); 791 int idle; 792 793 if (state == AIROHA_FOE_STATE_BIND) { 794 ts = FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, ib1); 795 ts_mask = AIROHA_FOE_IB1_BIND_TIMESTAMP; 796 } else { 797 ts = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, ib1); 798 now = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, now); 799 ts_mask = AIROHA_FOE_IB1_UNBIND_TIMESTAMP; 800 } 801 idle = now - ts; 802 803 return idle < 0 ? idle + ts_mask + 1 : idle; 804 } 805 806 static void 807 airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe, 808 struct airoha_flow_table_entry *e) 809 { 810 int min_idle = airoha_ppe_get_entry_idle_time(ppe, e->data.ib1); 811 struct airoha_flow_table_entry *iter; 812 struct hlist_node *n; 813 814 lockdep_assert_held(&ppe_lock); 815 816 hlist_for_each_entry_safe(iter, n, &e->l2_flows, l2_subflow_node) { 817 struct airoha_foe_entry *hwe; 818 u32 ib1, state; 819 int idle; 820 821 hwe = airoha_ppe_foe_get_entry(ppe, iter->hash); 822 if (!hwe) 823 continue; 824 825 ib1 = READ_ONCE(hwe->ib1); 826 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1); 827 if (state != AIROHA_FOE_STATE_BIND) { 828 iter->hash = 0xffff; 829 airoha_ppe_foe_remove_flow(ppe, iter); 830 continue; 831 } 832 833 idle = airoha_ppe_get_entry_idle_time(ppe, ib1); 834 if (idle >= min_idle) 835 continue; 836 837 min_idle = idle; 838 e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP; 839 e->data.ib1 |= ib1 & AIROHA_FOE_IB1_BIND_TIMESTAMP; 840 } 841 } 842 843 static void airoha_ppe_foe_flow_entry_update(struct airoha_ppe *ppe, 844 struct airoha_flow_table_entry *e) 845 { 846 struct airoha_foe_entry *hwe_p, hwe = {}; 847 848 spin_lock_bh(&ppe_lock); 849 850 if (e->type == FLOW_TYPE_L2) { 851 airoha_ppe_foe_flow_l2_entry_update(ppe, e); 852 goto unlock; 853 } 854 855 if (e->hash == 0xffff) 856 goto unlock; 857 858 hwe_p = airoha_ppe_foe_get_entry(ppe, e->hash); 859 if (!hwe_p) 860 goto unlock; 861 862 memcpy(&hwe, hwe_p, sizeof(*hwe_p)); 863 if (!airoha_ppe_foe_compare_entry(e, &hwe)) { 864 e->hash = 0xffff; 865 goto unlock; 866 } 867 868 e->data.ib1 = hwe.ib1; 869 unlock: 870 spin_unlock_bh(&ppe_lock); 871 } 872 873 static int airoha_ppe_entry_idle_time(struct airoha_ppe *ppe, 874 struct airoha_flow_table_entry *e) 875 { 876 airoha_ppe_foe_flow_entry_update(ppe, e); 877 878 return airoha_ppe_get_entry_idle_time(ppe, e->data.ib1); 879 } 880 881 static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port, 882 struct flow_cls_offload *f) 883 { 884 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 885 struct airoha_eth *eth = port->qdma->eth; 886 struct airoha_flow_table_entry *e; 887 struct airoha_flow_data data = {}; 888 struct net_device *odev = NULL; 889 struct flow_action_entry *act; 890 struct airoha_foe_entry hwe; 891 int err, i, offload_type; 892 u16 addr_type = 0; 893 u8 l4proto = 0; 894 895 if (rhashtable_lookup(ð->flow_table, &f->cookie, 896 airoha_flow_table_params)) 897 return -EEXIST; 898 899 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) 900 return -EOPNOTSUPP; 901 902 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 903 struct flow_match_control match; 904 905 flow_rule_match_control(rule, &match); 906 addr_type = match.key->addr_type; 907 if (flow_rule_has_control_flags(match.mask->flags, 908 f->common.extack)) 909 return -EOPNOTSUPP; 910 } else { 911 return -EOPNOTSUPP; 912 } 913 914 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 915 struct flow_match_basic match; 916 917 flow_rule_match_basic(rule, &match); 918 l4proto = match.key->ip_proto; 919 } else { 920 return -EOPNOTSUPP; 921 } 922 923 switch (addr_type) { 924 case 0: 925 offload_type = PPE_PKT_TYPE_BRIDGE; 926 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 927 struct flow_match_eth_addrs match; 928 929 flow_rule_match_eth_addrs(rule, &match); 930 memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN); 931 memcpy(data.eth.h_source, match.key->src, ETH_ALEN); 932 } else { 933 return -EOPNOTSUPP; 934 } 935 break; 936 case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 937 offload_type = PPE_PKT_TYPE_IPV4_HNAPT; 938 break; 939 case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 940 offload_type = PPE_PKT_TYPE_IPV6_ROUTE_5T; 941 break; 942 default: 943 return -EOPNOTSUPP; 944 } 945 946 flow_action_for_each(i, act, &rule->action) { 947 switch (act->id) { 948 case FLOW_ACTION_MANGLE: 949 if (offload_type == PPE_PKT_TYPE_BRIDGE) 950 return -EOPNOTSUPP; 951 952 if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH) 953 airoha_ppe_flow_mangle_eth(act, &data.eth); 954 break; 955 case FLOW_ACTION_REDIRECT: 956 odev = act->dev; 957 break; 958 case FLOW_ACTION_CSUM: 959 break; 960 case FLOW_ACTION_VLAN_PUSH: 961 if (data.vlan.num == 2 || 962 act->vlan.proto != htons(ETH_P_8021Q)) 963 return -EOPNOTSUPP; 964 965 data.vlan.hdr[data.vlan.num].id = act->vlan.vid; 966 data.vlan.hdr[data.vlan.num].proto = act->vlan.proto; 967 data.vlan.num++; 968 break; 969 case FLOW_ACTION_VLAN_POP: 970 break; 971 case FLOW_ACTION_PPPOE_PUSH: 972 if (data.pppoe.num == 1 || data.vlan.num == 2) 973 return -EOPNOTSUPP; 974 975 data.pppoe.sid = act->pppoe.sid; 976 data.pppoe.num++; 977 break; 978 default: 979 return -EOPNOTSUPP; 980 } 981 } 982 983 if (!is_valid_ether_addr(data.eth.h_source) || 984 !is_valid_ether_addr(data.eth.h_dest)) 985 return -EINVAL; 986 987 err = airoha_ppe_foe_entry_prepare(eth, &hwe, odev, offload_type, 988 &data, l4proto); 989 if (err) 990 return err; 991 992 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 993 struct flow_match_ports ports; 994 995 if (offload_type == PPE_PKT_TYPE_BRIDGE) 996 return -EOPNOTSUPP; 997 998 flow_rule_match_ports(rule, &ports); 999 data.src_port = ports.key->src; 1000 data.dst_port = ports.key->dst; 1001 } else if (offload_type != PPE_PKT_TYPE_BRIDGE) { 1002 return -EOPNOTSUPP; 1003 } 1004 1005 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 1006 struct flow_match_ipv4_addrs addrs; 1007 1008 flow_rule_match_ipv4_addrs(rule, &addrs); 1009 data.v4.src_addr = addrs.key->src; 1010 data.v4.dst_addr = addrs.key->dst; 1011 airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, false); 1012 } 1013 1014 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 1015 struct flow_match_ipv6_addrs addrs; 1016 1017 flow_rule_match_ipv6_addrs(rule, &addrs); 1018 1019 data.v6.src_addr = addrs.key->src; 1020 data.v6.dst_addr = addrs.key->dst; 1021 airoha_ppe_foe_entry_set_ipv6_tuple(&hwe, &data); 1022 } 1023 1024 flow_action_for_each(i, act, &rule->action) { 1025 if (act->id != FLOW_ACTION_MANGLE) 1026 continue; 1027 1028 if (offload_type == PPE_PKT_TYPE_BRIDGE) 1029 return -EOPNOTSUPP; 1030 1031 switch (act->mangle.htype) { 1032 case FLOW_ACT_MANGLE_HDR_TYPE_TCP: 1033 case FLOW_ACT_MANGLE_HDR_TYPE_UDP: 1034 err = airoha_ppe_flow_mangle_ports(act, &data); 1035 break; 1036 case FLOW_ACT_MANGLE_HDR_TYPE_IP4: 1037 err = airoha_ppe_flow_mangle_ipv4(act, &data); 1038 break; 1039 case FLOW_ACT_MANGLE_HDR_TYPE_ETH: 1040 /* handled earlier */ 1041 break; 1042 default: 1043 return -EOPNOTSUPP; 1044 } 1045 1046 if (err) 1047 return err; 1048 } 1049 1050 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 1051 err = airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, true); 1052 if (err) 1053 return err; 1054 } 1055 1056 e = kzalloc(sizeof(*e), GFP_KERNEL); 1057 if (!e) 1058 return -ENOMEM; 1059 1060 e->cookie = f->cookie; 1061 memcpy(&e->data, &hwe, sizeof(e->data)); 1062 1063 err = airoha_ppe_foe_flow_commit_entry(eth->ppe, e); 1064 if (err) 1065 goto free_entry; 1066 1067 err = rhashtable_insert_fast(ð->flow_table, &e->node, 1068 airoha_flow_table_params); 1069 if (err < 0) 1070 goto remove_foe_entry; 1071 1072 return 0; 1073 1074 remove_foe_entry: 1075 airoha_ppe_foe_flow_remove_entry(eth->ppe, e); 1076 free_entry: 1077 kfree(e); 1078 1079 return err; 1080 } 1081 1082 static int airoha_ppe_flow_offload_destroy(struct airoha_gdm_port *port, 1083 struct flow_cls_offload *f) 1084 { 1085 struct airoha_eth *eth = port->qdma->eth; 1086 struct airoha_flow_table_entry *e; 1087 1088 e = rhashtable_lookup(ð->flow_table, &f->cookie, 1089 airoha_flow_table_params); 1090 if (!e) 1091 return -ENOENT; 1092 1093 airoha_ppe_foe_flow_remove_entry(eth->ppe, e); 1094 rhashtable_remove_fast(ð->flow_table, &e->node, 1095 airoha_flow_table_params); 1096 kfree(e); 1097 1098 return 0; 1099 } 1100 1101 void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash, 1102 struct airoha_foe_stats64 *stats) 1103 { 1104 u32 index = airoha_ppe_foe_get_flow_stats_index(ppe, hash); 1105 struct airoha_eth *eth = ppe->eth; 1106 struct airoha_npu *npu; 1107 1108 if (index >= PPE_STATS_NUM_ENTRIES) 1109 return; 1110 1111 rcu_read_lock(); 1112 1113 npu = rcu_dereference(eth->npu); 1114 if (npu) { 1115 u64 packets = ppe->foe_stats[index].packets; 1116 u64 bytes = ppe->foe_stats[index].bytes; 1117 struct airoha_foe_stats npu_stats; 1118 1119 memcpy_fromio(&npu_stats, &npu->stats[index], 1120 sizeof(*npu->stats)); 1121 stats->packets = packets << 32 | npu_stats.packets; 1122 stats->bytes = bytes << 32 | npu_stats.bytes; 1123 } 1124 1125 rcu_read_unlock(); 1126 } 1127 1128 static int airoha_ppe_flow_offload_stats(struct airoha_gdm_port *port, 1129 struct flow_cls_offload *f) 1130 { 1131 struct airoha_eth *eth = port->qdma->eth; 1132 struct airoha_flow_table_entry *e; 1133 u32 idle; 1134 1135 e = rhashtable_lookup(ð->flow_table, &f->cookie, 1136 airoha_flow_table_params); 1137 if (!e) 1138 return -ENOENT; 1139 1140 idle = airoha_ppe_entry_idle_time(eth->ppe, e); 1141 f->stats.lastused = jiffies - idle * HZ; 1142 1143 if (e->hash != 0xffff) { 1144 struct airoha_foe_stats64 stats = {}; 1145 1146 airoha_ppe_foe_entry_get_stats(eth->ppe, e->hash, &stats); 1147 f->stats.pkts += (stats.packets - e->stats.packets); 1148 f->stats.bytes += (stats.bytes - e->stats.bytes); 1149 e->stats = stats; 1150 } 1151 1152 return 0; 1153 } 1154 1155 static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port, 1156 struct flow_cls_offload *f) 1157 { 1158 switch (f->command) { 1159 case FLOW_CLS_REPLACE: 1160 return airoha_ppe_flow_offload_replace(port, f); 1161 case FLOW_CLS_DESTROY: 1162 return airoha_ppe_flow_offload_destroy(port, f); 1163 case FLOW_CLS_STATS: 1164 return airoha_ppe_flow_offload_stats(port, f); 1165 default: 1166 break; 1167 } 1168 1169 return -EOPNOTSUPP; 1170 } 1171 1172 static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe, 1173 struct airoha_npu *npu) 1174 { 1175 int i, sram_num_entries = PPE_SRAM_NUM_ENTRIES; 1176 struct airoha_foe_entry *hwe = ppe->foe; 1177 1178 if (airoha_ppe2_is_enabled(ppe->eth)) 1179 sram_num_entries = sram_num_entries / 2; 1180 1181 for (i = 0; i < sram_num_entries; i++) 1182 memset(&hwe[i], 0, sizeof(*hwe)); 1183 1184 return npu->ops.ppe_flush_sram_entries(npu, ppe->foe_dma, 1185 PPE_SRAM_NUM_ENTRIES); 1186 } 1187 1188 static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth) 1189 { 1190 struct airoha_npu *npu = airoha_npu_get(eth->dev, 1191 ð->ppe->foe_stats_dma); 1192 1193 if (IS_ERR(npu)) { 1194 request_module("airoha-npu"); 1195 npu = airoha_npu_get(eth->dev, ð->ppe->foe_stats_dma); 1196 } 1197 1198 return npu; 1199 } 1200 1201 static int airoha_ppe_offload_setup(struct airoha_eth *eth) 1202 { 1203 struct airoha_npu *npu = airoha_ppe_npu_get(eth); 1204 int err; 1205 1206 if (IS_ERR(npu)) 1207 return PTR_ERR(npu); 1208 1209 err = npu->ops.ppe_init(npu); 1210 if (err) 1211 goto error_npu_put; 1212 1213 airoha_ppe_hw_init(eth->ppe); 1214 err = airoha_ppe_flush_sram_entries(eth->ppe, npu); 1215 if (err) 1216 goto error_npu_put; 1217 1218 airoha_ppe_foe_flow_stats_reset(eth->ppe, npu); 1219 1220 rcu_assign_pointer(eth->npu, npu); 1221 synchronize_rcu(); 1222 1223 return 0; 1224 1225 error_npu_put: 1226 airoha_npu_put(npu); 1227 1228 return err; 1229 } 1230 1231 int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data) 1232 { 1233 struct airoha_gdm_port *port = netdev_priv(dev); 1234 struct flow_cls_offload *cls = type_data; 1235 struct airoha_eth *eth = port->qdma->eth; 1236 int err = 0; 1237 1238 mutex_lock(&flow_offload_mutex); 1239 1240 if (!eth->npu) 1241 err = airoha_ppe_offload_setup(eth); 1242 if (!err) 1243 err = airoha_ppe_flow_offload_cmd(port, cls); 1244 1245 mutex_unlock(&flow_offload_mutex); 1246 1247 return err; 1248 } 1249 1250 void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb, 1251 u16 hash) 1252 { 1253 u16 now, diff; 1254 1255 if (hash > PPE_HASH_MASK) 1256 return; 1257 1258 now = (u16)jiffies; 1259 diff = now - ppe->foe_check_time[hash]; 1260 if (diff < HZ / 10) 1261 return; 1262 1263 ppe->foe_check_time[hash] = now; 1264 airoha_ppe_foe_insert_entry(ppe, skb, hash); 1265 } 1266 1267 void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port) 1268 { 1269 struct airoha_eth *eth = port->qdma->eth; 1270 struct net_device *dev = port->dev; 1271 const u8 *addr = dev->dev_addr; 1272 u32 val; 1273 1274 val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5]; 1275 airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val); 1276 airoha_fe_wr(eth, REG_UPDMEM_CTRL(0), 1277 FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) | 1278 PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK); 1279 1280 val = (addr[0] << 8) | addr[1]; 1281 airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val); 1282 airoha_fe_wr(eth, REG_UPDMEM_CTRL(0), 1283 FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) | 1284 FIELD_PREP(PPE_UPDMEM_OFFSET_MASK, 1) | 1285 PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK); 1286 } 1287 1288 int airoha_ppe_init(struct airoha_eth *eth) 1289 { 1290 struct airoha_ppe *ppe; 1291 int foe_size, err; 1292 1293 ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL); 1294 if (!ppe) 1295 return -ENOMEM; 1296 1297 foe_size = PPE_NUM_ENTRIES * sizeof(struct airoha_foe_entry); 1298 ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma, 1299 GFP_KERNEL); 1300 if (!ppe->foe) 1301 return -ENOMEM; 1302 1303 ppe->eth = eth; 1304 eth->ppe = ppe; 1305 1306 ppe->foe_flow = devm_kzalloc(eth->dev, 1307 PPE_NUM_ENTRIES * sizeof(*ppe->foe_flow), 1308 GFP_KERNEL); 1309 if (!ppe->foe_flow) 1310 return -ENOMEM; 1311 1312 foe_size = PPE_STATS_NUM_ENTRIES * sizeof(*ppe->foe_stats); 1313 if (foe_size) { 1314 ppe->foe_stats = dmam_alloc_coherent(eth->dev, foe_size, 1315 &ppe->foe_stats_dma, 1316 GFP_KERNEL); 1317 if (!ppe->foe_stats) 1318 return -ENOMEM; 1319 } 1320 1321 err = rhashtable_init(ð->flow_table, &airoha_flow_table_params); 1322 if (err) 1323 return err; 1324 1325 err = rhashtable_init(&ppe->l2_flows, &airoha_l2_flow_table_params); 1326 if (err) 1327 goto error_flow_table_destroy; 1328 1329 err = airoha_ppe_debugfs_init(ppe); 1330 if (err) 1331 goto error_l2_flow_table_destroy; 1332 1333 return 0; 1334 1335 error_l2_flow_table_destroy: 1336 rhashtable_destroy(&ppe->l2_flows); 1337 error_flow_table_destroy: 1338 rhashtable_destroy(ð->flow_table); 1339 1340 return err; 1341 } 1342 1343 void airoha_ppe_deinit(struct airoha_eth *eth) 1344 { 1345 struct airoha_npu *npu; 1346 1347 rcu_read_lock(); 1348 npu = rcu_dereference(eth->npu); 1349 if (npu) { 1350 npu->ops.ppe_deinit(npu); 1351 airoha_npu_put(npu); 1352 } 1353 rcu_read_unlock(); 1354 1355 rhashtable_destroy(ð->ppe->l2_flows); 1356 rhashtable_destroy(ð->flow_table); 1357 debugfs_remove(eth->ppe->debugfs_dir); 1358 } 1359