1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2024 AIROHA Inc 4 * Author: Lorenzo Bianconi <lorenzo@kernel.org> 5 */ 6 #include <linux/of.h> 7 #include <linux/of_net.h> 8 #include <linux/of_reserved_mem.h> 9 #include <linux/platform_device.h> 10 #include <linux/tcp.h> 11 #include <linux/u64_stats_sync.h> 12 #include <net/dst_metadata.h> 13 #include <net/page_pool/helpers.h> 14 #include <net/pkt_cls.h> 15 #include <uapi/linux/ppp_defs.h> 16 17 #include "airoha_regs.h" 18 #include "airoha_eth.h" 19 20 u32 airoha_rr(void __iomem *base, u32 offset) 21 { 22 return readl(base + offset); 23 } 24 25 void airoha_wr(void __iomem *base, u32 offset, u32 val) 26 { 27 writel(val, base + offset); 28 } 29 30 u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val) 31 { 32 val |= (airoha_rr(base, offset) & ~mask); 33 airoha_wr(base, offset, val); 34 35 return val; 36 } 37 38 static void airoha_qdma_set_irqmask(struct airoha_irq_bank *irq_bank, 39 int index, u32 clear, u32 set) 40 { 41 struct airoha_qdma *qdma = irq_bank->qdma; 42 int bank = irq_bank - &qdma->irq_banks[0]; 43 unsigned long flags; 44 45 if (WARN_ON_ONCE(index >= ARRAY_SIZE(irq_bank->irqmask))) 46 return; 47 48 spin_lock_irqsave(&irq_bank->irq_lock, flags); 49 50 irq_bank->irqmask[index] &= ~clear; 51 irq_bank->irqmask[index] |= set; 52 airoha_qdma_wr(qdma, REG_INT_ENABLE(bank, index), 53 irq_bank->irqmask[index]); 54 /* Read irq_enable register in order to guarantee the update above 55 * completes in the spinlock critical section. 56 */ 57 airoha_qdma_rr(qdma, REG_INT_ENABLE(bank, index)); 58 59 spin_unlock_irqrestore(&irq_bank->irq_lock, flags); 60 } 61 62 static void airoha_qdma_irq_enable(struct airoha_irq_bank *irq_bank, 63 int index, u32 mask) 64 { 65 airoha_qdma_set_irqmask(irq_bank, index, 0, mask); 66 } 67 68 static void airoha_qdma_irq_disable(struct airoha_irq_bank *irq_bank, 69 int index, u32 mask) 70 { 71 airoha_qdma_set_irqmask(irq_bank, index, mask, 0); 72 } 73 74 static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr) 75 { 76 struct airoha_eth *eth = port->qdma->eth; 77 u32 val, reg; 78 79 reg = airhoa_is_lan_gdm_port(port) ? REG_FE_LAN_MAC_H 80 : REG_FE_WAN_MAC_H; 81 val = (addr[0] << 16) | (addr[1] << 8) | addr[2]; 82 airoha_fe_wr(eth, reg, val); 83 84 val = (addr[3] << 16) | (addr[4] << 8) | addr[5]; 85 airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val); 86 airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val); 87 88 airoha_ppe_init_upd_mem(port); 89 } 90 91 static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr, 92 u32 val) 93 { 94 airoha_fe_rmw(eth, addr, GDM_OCFQ_MASK, 95 FIELD_PREP(GDM_OCFQ_MASK, val)); 96 airoha_fe_rmw(eth, addr, GDM_MCFQ_MASK, 97 FIELD_PREP(GDM_MCFQ_MASK, val)); 98 airoha_fe_rmw(eth, addr, GDM_BCFQ_MASK, 99 FIELD_PREP(GDM_BCFQ_MASK, val)); 100 airoha_fe_rmw(eth, addr, GDM_UCFQ_MASK, 101 FIELD_PREP(GDM_UCFQ_MASK, val)); 102 } 103 104 static int airoha_set_vip_for_gdm_port(struct airoha_gdm_port *port, 105 bool enable) 106 { 107 struct airoha_eth *eth = port->qdma->eth; 108 u32 vip_port; 109 110 switch (port->id) { 111 case AIROHA_GDM3_IDX: 112 /* FIXME: handle XSI_PCIE1_PORT */ 113 vip_port = XSI_PCIE0_VIP_PORT_MASK; 114 break; 115 case AIROHA_GDM4_IDX: 116 /* FIXME: handle XSI_USB_PORT */ 117 vip_port = XSI_ETH_VIP_PORT_MASK; 118 break; 119 default: 120 return 0; 121 } 122 123 if (enable) { 124 airoha_fe_set(eth, REG_FE_VIP_PORT_EN, vip_port); 125 airoha_fe_set(eth, REG_FE_IFC_PORT_EN, vip_port); 126 } else { 127 airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, vip_port); 128 airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port); 129 } 130 131 return 0; 132 } 133 134 static void airoha_fe_maccr_init(struct airoha_eth *eth) 135 { 136 int p; 137 138 for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) 139 airoha_fe_set(eth, REG_GDM_FWD_CFG(p), 140 GDM_TCP_CKSUM_MASK | GDM_UDP_CKSUM_MASK | 141 GDM_IP4_CKSUM_MASK | GDM_DROP_CRC_ERR_MASK); 142 143 airoha_fe_rmw(eth, REG_CDM_VLAN_CTRL(1), CDM_VLAN_MASK, 144 FIELD_PREP(CDM_VLAN_MASK, 0x8100)); 145 146 airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD); 147 } 148 149 static void airoha_fe_vip_setup(struct airoha_eth *eth) 150 { 151 airoha_fe_wr(eth, REG_FE_VIP_PATN(3), ETH_P_PPP_DISC); 152 airoha_fe_wr(eth, REG_FE_VIP_EN(3), PATN_FCPU_EN_MASK | PATN_EN_MASK); 153 154 airoha_fe_wr(eth, REG_FE_VIP_PATN(4), PPP_LCP); 155 airoha_fe_wr(eth, REG_FE_VIP_EN(4), 156 PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | 157 PATN_EN_MASK); 158 159 airoha_fe_wr(eth, REG_FE_VIP_PATN(6), PPP_IPCP); 160 airoha_fe_wr(eth, REG_FE_VIP_EN(6), 161 PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | 162 PATN_EN_MASK); 163 164 airoha_fe_wr(eth, REG_FE_VIP_PATN(7), PPP_CHAP); 165 airoha_fe_wr(eth, REG_FE_VIP_EN(7), 166 PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | 167 PATN_EN_MASK); 168 169 /* BOOTP (0x43) */ 170 airoha_fe_wr(eth, REG_FE_VIP_PATN(8), 0x43); 171 airoha_fe_wr(eth, REG_FE_VIP_EN(8), 172 PATN_FCPU_EN_MASK | PATN_SP_EN_MASK | 173 FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); 174 175 /* BOOTP (0x44) */ 176 airoha_fe_wr(eth, REG_FE_VIP_PATN(9), 0x44); 177 airoha_fe_wr(eth, REG_FE_VIP_EN(9), 178 PATN_FCPU_EN_MASK | PATN_SP_EN_MASK | 179 FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); 180 181 /* ISAKMP */ 182 airoha_fe_wr(eth, REG_FE_VIP_PATN(10), 0x1f401f4); 183 airoha_fe_wr(eth, REG_FE_VIP_EN(10), 184 PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK | 185 FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); 186 187 airoha_fe_wr(eth, REG_FE_VIP_PATN(11), PPP_IPV6CP); 188 airoha_fe_wr(eth, REG_FE_VIP_EN(11), 189 PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | 190 PATN_EN_MASK); 191 192 /* DHCPv6 */ 193 airoha_fe_wr(eth, REG_FE_VIP_PATN(12), 0x2220223); 194 airoha_fe_wr(eth, REG_FE_VIP_EN(12), 195 PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK | 196 FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); 197 198 airoha_fe_wr(eth, REG_FE_VIP_PATN(19), PPP_PAP); 199 airoha_fe_wr(eth, REG_FE_VIP_EN(19), 200 PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | 201 PATN_EN_MASK); 202 203 /* ETH->ETH_P_1905 (0x893a) */ 204 airoha_fe_wr(eth, REG_FE_VIP_PATN(20), 0x893a); 205 airoha_fe_wr(eth, REG_FE_VIP_EN(20), 206 PATN_FCPU_EN_MASK | PATN_EN_MASK); 207 208 airoha_fe_wr(eth, REG_FE_VIP_PATN(21), ETH_P_LLDP); 209 airoha_fe_wr(eth, REG_FE_VIP_EN(21), 210 PATN_FCPU_EN_MASK | PATN_EN_MASK); 211 } 212 213 static u32 airoha_fe_get_pse_queue_rsv_pages(struct airoha_eth *eth, 214 u32 port, u32 queue) 215 { 216 u32 val; 217 218 airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR, 219 PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK, 220 FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) | 221 FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue)); 222 val = airoha_fe_rr(eth, REG_FE_PSE_QUEUE_CFG_VAL); 223 224 return FIELD_GET(PSE_CFG_OQ_RSV_MASK, val); 225 } 226 227 static void airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth *eth, 228 u32 port, u32 queue, u32 val) 229 { 230 airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_VAL, PSE_CFG_OQ_RSV_MASK, 231 FIELD_PREP(PSE_CFG_OQ_RSV_MASK, val)); 232 airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR, 233 PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK | 234 PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK, 235 FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) | 236 FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue) | 237 PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK); 238 } 239 240 static u32 airoha_fe_get_pse_all_rsv(struct airoha_eth *eth) 241 { 242 u32 val = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET); 243 244 return FIELD_GET(PSE_ALLRSV_MASK, val); 245 } 246 247 static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth, 248 u32 port, u32 queue, u32 val) 249 { 250 u32 orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue); 251 u32 tmp, all_rsv, fq_limit; 252 253 airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val); 254 255 /* modify all rsv */ 256 all_rsv = airoha_fe_get_pse_all_rsv(eth); 257 all_rsv += (val - orig_val); 258 airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK, 259 FIELD_PREP(PSE_ALLRSV_MASK, all_rsv)); 260 261 /* modify hthd */ 262 tmp = airoha_fe_rr(eth, PSE_FQ_CFG); 263 fq_limit = FIELD_GET(PSE_FQ_LIMIT_MASK, tmp); 264 tmp = fq_limit - all_rsv - 0x20; 265 airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD, 266 PSE_SHARE_USED_HTHD_MASK, 267 FIELD_PREP(PSE_SHARE_USED_HTHD_MASK, tmp)); 268 269 tmp = fq_limit - all_rsv - 0x100; 270 airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD, 271 PSE_SHARE_USED_MTHD_MASK, 272 FIELD_PREP(PSE_SHARE_USED_MTHD_MASK, tmp)); 273 tmp = (3 * tmp) >> 2; 274 airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, 275 PSE_SHARE_USED_LTHD_MASK, 276 FIELD_PREP(PSE_SHARE_USED_LTHD_MASK, tmp)); 277 278 return 0; 279 } 280 281 static void airoha_fe_pse_ports_init(struct airoha_eth *eth) 282 { 283 const u32 pse_port_num_queues[] = { 284 [FE_PSE_PORT_CDM1] = 6, 285 [FE_PSE_PORT_GDM1] = 6, 286 [FE_PSE_PORT_GDM2] = 32, 287 [FE_PSE_PORT_GDM3] = 6, 288 [FE_PSE_PORT_PPE1] = 4, 289 [FE_PSE_PORT_CDM2] = 6, 290 [FE_PSE_PORT_CDM3] = 8, 291 [FE_PSE_PORT_CDM4] = 10, 292 [FE_PSE_PORT_PPE2] = 4, 293 [FE_PSE_PORT_GDM4] = 2, 294 [FE_PSE_PORT_CDM5] = 2, 295 }; 296 u32 all_rsv; 297 int q; 298 299 all_rsv = airoha_fe_get_pse_all_rsv(eth); 300 if (airoha_ppe_is_enabled(eth, 1)) { 301 /* hw misses PPE2 oq rsv */ 302 all_rsv += PSE_RSV_PAGES * 303 pse_port_num_queues[FE_PSE_PORT_PPE2]; 304 } 305 airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv); 306 307 /* CMD1 */ 308 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++) 309 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM1, q, 310 PSE_QUEUE_RSV_PAGES); 311 /* GMD1 */ 312 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM1]; q++) 313 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM1, q, 314 PSE_QUEUE_RSV_PAGES); 315 /* GMD2 */ 316 for (q = 6; q < pse_port_num_queues[FE_PSE_PORT_GDM2]; q++) 317 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM2, q, 0); 318 /* GMD3 */ 319 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM3]; q++) 320 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM3, q, 321 PSE_QUEUE_RSV_PAGES); 322 /* PPE1 */ 323 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE1]; q++) { 324 if (q < pse_port_num_queues[FE_PSE_PORT_PPE1]) 325 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 326 PSE_QUEUE_RSV_PAGES); 327 else 328 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 0); 329 } 330 /* CDM2 */ 331 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM2]; q++) 332 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM2, q, 333 PSE_QUEUE_RSV_PAGES); 334 /* CDM3 */ 335 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++) 336 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM3, q, 0); 337 /* CDM4 */ 338 for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++) 339 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q, 340 PSE_QUEUE_RSV_PAGES); 341 if (airoha_ppe_is_enabled(eth, 1)) { 342 /* PPE2 */ 343 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) { 344 if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2) 345 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, 346 q, 347 PSE_QUEUE_RSV_PAGES); 348 else 349 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, 350 q, 0); 351 } 352 } 353 /* GMD4 */ 354 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++) 355 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM4, q, 356 PSE_QUEUE_RSV_PAGES); 357 /* CDM5 */ 358 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM5]; q++) 359 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM5, q, 360 PSE_QUEUE_RSV_PAGES); 361 } 362 363 static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth) 364 { 365 int i; 366 367 for (i = 0; i < AIROHA_FE_MC_MAX_VLAN_TABLE; i++) { 368 int err, j; 369 u32 val; 370 371 airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0); 372 373 val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) | 374 MC_VLAN_CFG_TABLE_SEL_MASK | MC_VLAN_CFG_RW_MASK; 375 airoha_fe_wr(eth, REG_MC_VLAN_CFG, val); 376 err = read_poll_timeout(airoha_fe_rr, val, 377 val & MC_VLAN_CFG_CMD_DONE_MASK, 378 USEC_PER_MSEC, 5 * USEC_PER_MSEC, 379 false, eth, REG_MC_VLAN_CFG); 380 if (err) 381 return err; 382 383 for (j = 0; j < AIROHA_FE_MC_MAX_VLAN_PORT; j++) { 384 airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0); 385 386 val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) | 387 FIELD_PREP(MC_VLAN_CFG_PORT_ID_MASK, j) | 388 MC_VLAN_CFG_RW_MASK; 389 airoha_fe_wr(eth, REG_MC_VLAN_CFG, val); 390 err = read_poll_timeout(airoha_fe_rr, val, 391 val & MC_VLAN_CFG_CMD_DONE_MASK, 392 USEC_PER_MSEC, 393 5 * USEC_PER_MSEC, false, eth, 394 REG_MC_VLAN_CFG); 395 if (err) 396 return err; 397 } 398 } 399 400 return 0; 401 } 402 403 static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth) 404 { 405 /* CDM1_CRSN_QSEL */ 406 airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_22 >> 2), 407 CDM_CRSN_QSEL_REASON_MASK(CRSN_22), 408 FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_22), 409 CDM_CRSN_QSEL_Q1)); 410 airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_08 >> 2), 411 CDM_CRSN_QSEL_REASON_MASK(CRSN_08), 412 FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_08), 413 CDM_CRSN_QSEL_Q1)); 414 airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_21 >> 2), 415 CDM_CRSN_QSEL_REASON_MASK(CRSN_21), 416 FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_21), 417 CDM_CRSN_QSEL_Q1)); 418 airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_24 >> 2), 419 CDM_CRSN_QSEL_REASON_MASK(CRSN_24), 420 FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_24), 421 CDM_CRSN_QSEL_Q6)); 422 airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_25 >> 2), 423 CDM_CRSN_QSEL_REASON_MASK(CRSN_25), 424 FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_25), 425 CDM_CRSN_QSEL_Q1)); 426 /* CDM2_CRSN_QSEL */ 427 airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_08 >> 2), 428 CDM_CRSN_QSEL_REASON_MASK(CRSN_08), 429 FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_08), 430 CDM_CRSN_QSEL_Q1)); 431 airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_21 >> 2), 432 CDM_CRSN_QSEL_REASON_MASK(CRSN_21), 433 FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_21), 434 CDM_CRSN_QSEL_Q1)); 435 airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_22 >> 2), 436 CDM_CRSN_QSEL_REASON_MASK(CRSN_22), 437 FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_22), 438 CDM_CRSN_QSEL_Q1)); 439 airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_24 >> 2), 440 CDM_CRSN_QSEL_REASON_MASK(CRSN_24), 441 FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_24), 442 CDM_CRSN_QSEL_Q6)); 443 airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_25 >> 2), 444 CDM_CRSN_QSEL_REASON_MASK(CRSN_25), 445 FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_25), 446 CDM_CRSN_QSEL_Q1)); 447 } 448 449 static int airoha_fe_init(struct airoha_eth *eth) 450 { 451 airoha_fe_maccr_init(eth); 452 453 /* PSE IQ reserve */ 454 airoha_fe_rmw(eth, REG_PSE_IQ_REV1, PSE_IQ_RES1_P2_MASK, 455 FIELD_PREP(PSE_IQ_RES1_P2_MASK, 0x10)); 456 airoha_fe_rmw(eth, REG_PSE_IQ_REV2, 457 PSE_IQ_RES2_P5_MASK | PSE_IQ_RES2_P4_MASK, 458 FIELD_PREP(PSE_IQ_RES2_P5_MASK, 0x40) | 459 FIELD_PREP(PSE_IQ_RES2_P4_MASK, 0x34)); 460 461 /* enable FE copy engine for MC/KA/DPI */ 462 airoha_fe_wr(eth, REG_FE_PCE_CFG, 463 PCE_DPI_EN_MASK | PCE_KA_EN_MASK | PCE_MC_EN_MASK); 464 /* set vip queue selection to ring 1 */ 465 airoha_fe_rmw(eth, REG_CDM_FWD_CFG(1), CDM_VIP_QSEL_MASK, 466 FIELD_PREP(CDM_VIP_QSEL_MASK, 0x4)); 467 airoha_fe_rmw(eth, REG_CDM_FWD_CFG(2), CDM_VIP_QSEL_MASK, 468 FIELD_PREP(CDM_VIP_QSEL_MASK, 0x4)); 469 /* set GDM4 source interface offset to 8 */ 470 airoha_fe_rmw(eth, REG_GDM_SRC_PORT_SET(4), 471 GDM_SPORT_OFF2_MASK | 472 GDM_SPORT_OFF1_MASK | 473 GDM_SPORT_OFF0_MASK, 474 FIELD_PREP(GDM_SPORT_OFF2_MASK, 8) | 475 FIELD_PREP(GDM_SPORT_OFF1_MASK, 8) | 476 FIELD_PREP(GDM_SPORT_OFF0_MASK, 8)); 477 478 /* set PSE Page as 128B */ 479 airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG, 480 FE_DMA_GLO_L2_SPACE_MASK | FE_DMA_GLO_PG_SZ_MASK, 481 FIELD_PREP(FE_DMA_GLO_L2_SPACE_MASK, 2) | 482 FE_DMA_GLO_PG_SZ_MASK); 483 airoha_fe_wr(eth, REG_FE_RST_GLO_CFG, 484 FE_RST_CORE_MASK | FE_RST_GDM3_MBI_ARB_MASK | 485 FE_RST_GDM4_MBI_ARB_MASK); 486 usleep_range(1000, 2000); 487 488 /* connect RxRing1 and RxRing15 to PSE Port0 OQ-1 489 * connect other rings to PSE Port0 OQ-0 490 */ 491 airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP0, BIT(4)); 492 airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP1, BIT(28)); 493 airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP2, BIT(4)); 494 airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP3, BIT(28)); 495 496 airoha_fe_vip_setup(eth); 497 airoha_fe_pse_ports_init(eth); 498 499 airoha_fe_set(eth, REG_GDM_MISC_CFG, 500 GDM2_RDM_ACK_WAIT_PREF_MASK | 501 GDM2_CHN_VLD_MODE_MASK); 502 airoha_fe_rmw(eth, REG_CDM_FWD_CFG(2), CDM_OAM_QSEL_MASK, 503 FIELD_PREP(CDM_OAM_QSEL_MASK, 15)); 504 505 /* init fragment and assemble Force Port */ 506 /* NPU Core-3, NPU Bridge Channel-3 */ 507 airoha_fe_rmw(eth, REG_IP_FRAG_FP, 508 IP_FRAGMENT_PORT_MASK | IP_FRAGMENT_NBQ_MASK, 509 FIELD_PREP(IP_FRAGMENT_PORT_MASK, 6) | 510 FIELD_PREP(IP_FRAGMENT_NBQ_MASK, 3)); 511 /* QDMA LAN, RX Ring-22 */ 512 airoha_fe_rmw(eth, REG_IP_FRAG_FP, 513 IP_ASSEMBLE_PORT_MASK | IP_ASSEMBLE_NBQ_MASK, 514 FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) | 515 FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22)); 516 517 airoha_fe_set(eth, REG_GDM_FWD_CFG(AIROHA_GDM3_IDX), GDM_PAD_EN_MASK); 518 airoha_fe_set(eth, REG_GDM_FWD_CFG(AIROHA_GDM4_IDX), GDM_PAD_EN_MASK); 519 520 airoha_fe_crsn_qsel_init(eth); 521 522 airoha_fe_clear(eth, REG_FE_CPORT_CFG, FE_CPORT_QUEUE_XFC_MASK); 523 airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK); 524 525 /* default aging mode for mbi unlock issue */ 526 airoha_fe_rmw(eth, REG_GDM_CHN_RLS(2), 527 MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK, 528 FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) | 529 FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3)); 530 531 /* disable IFC by default */ 532 airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK); 533 534 /* enable 1:N vlan action, init vlan table */ 535 airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK); 536 537 return airoha_fe_mc_vlan_clear(eth); 538 } 539 540 static int airoha_qdma_fill_rx_queue(struct airoha_queue *q) 541 { 542 struct airoha_qdma *qdma = q->qdma; 543 int qid = q - &qdma->q_rx[0]; 544 int nframes = 0; 545 546 while (q->queued < q->ndesc - 1) { 547 struct airoha_queue_entry *e = &q->entry[q->head]; 548 struct airoha_qdma_desc *desc = &q->desc[q->head]; 549 struct page *page; 550 int offset; 551 u32 val; 552 553 page = page_pool_dev_alloc_frag(q->page_pool, &offset, 554 q->buf_size); 555 if (!page) 556 break; 557 558 q->head = (q->head + 1) % q->ndesc; 559 q->queued++; 560 nframes++; 561 562 e->buf = page_address(page) + offset; 563 e->dma_addr = page_pool_get_dma_addr(page) + offset; 564 e->dma_len = SKB_WITH_OVERHEAD(q->buf_size); 565 566 val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len); 567 WRITE_ONCE(desc->ctrl, cpu_to_le32(val)); 568 WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr)); 569 val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head); 570 WRITE_ONCE(desc->data, cpu_to_le32(val)); 571 WRITE_ONCE(desc->msg0, 0); 572 WRITE_ONCE(desc->msg1, 0); 573 WRITE_ONCE(desc->msg2, 0); 574 WRITE_ONCE(desc->msg3, 0); 575 576 airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), 577 RX_RING_CPU_IDX_MASK, 578 FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head)); 579 } 580 581 return nframes; 582 } 583 584 static int airoha_qdma_get_gdm_port(struct airoha_eth *eth, 585 struct airoha_qdma_desc *desc) 586 { 587 u32 port, sport, msg1 = le32_to_cpu(desc->msg1); 588 589 sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1); 590 switch (sport) { 591 case 0x10 ... 0x14: 592 port = 0; 593 break; 594 case 0x2 ... 0x4: 595 port = sport - 1; 596 break; 597 default: 598 return -EINVAL; 599 } 600 601 return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port; 602 } 603 604 static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) 605 { 606 enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); 607 struct airoha_qdma *qdma = q->qdma; 608 struct airoha_eth *eth = qdma->eth; 609 int qid = q - &qdma->q_rx[0]; 610 int done = 0; 611 612 while (done < budget) { 613 struct airoha_queue_entry *e = &q->entry[q->tail]; 614 struct airoha_qdma_desc *desc = &q->desc[q->tail]; 615 u32 hash, reason, msg1 = le32_to_cpu(desc->msg1); 616 struct page *page = virt_to_head_page(e->buf); 617 u32 desc_ctrl = le32_to_cpu(desc->ctrl); 618 struct airoha_gdm_port *port; 619 int data_len, len, p; 620 621 if (!(desc_ctrl & QDMA_DESC_DONE_MASK)) 622 break; 623 624 q->tail = (q->tail + 1) % q->ndesc; 625 q->queued--; 626 627 dma_sync_single_for_cpu(eth->dev, e->dma_addr, 628 SKB_WITH_OVERHEAD(q->buf_size), dir); 629 630 len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl); 631 data_len = q->skb ? q->buf_size 632 : SKB_WITH_OVERHEAD(q->buf_size); 633 if (!len || data_len < len) 634 goto free_frag; 635 636 p = airoha_qdma_get_gdm_port(eth, desc); 637 if (p < 0 || !eth->ports[p]) 638 goto free_frag; 639 640 port = eth->ports[p]; 641 if (!q->skb) { /* first buffer */ 642 q->skb = napi_build_skb(e->buf, q->buf_size); 643 if (!q->skb) 644 goto free_frag; 645 646 __skb_put(q->skb, len); 647 skb_mark_for_recycle(q->skb); 648 q->skb->dev = port->dev; 649 q->skb->protocol = eth_type_trans(q->skb, port->dev); 650 q->skb->ip_summed = CHECKSUM_UNNECESSARY; 651 skb_record_rx_queue(q->skb, qid); 652 } else { /* scattered frame */ 653 struct skb_shared_info *shinfo = skb_shinfo(q->skb); 654 int nr_frags = shinfo->nr_frags; 655 656 if (nr_frags >= ARRAY_SIZE(shinfo->frags)) 657 goto free_frag; 658 659 skb_add_rx_frag(q->skb, nr_frags, page, 660 e->buf - page_address(page), len, 661 q->buf_size); 662 } 663 664 if (FIELD_GET(QDMA_DESC_MORE_MASK, desc_ctrl)) 665 continue; 666 667 if (netdev_uses_dsa(port->dev)) { 668 /* PPE module requires untagged packets to work 669 * properly and it provides DSA port index via the 670 * DMA descriptor. Report DSA tag to the DSA stack 671 * via skb dst info. 672 */ 673 u32 sptag = FIELD_GET(QDMA_ETH_RXMSG_SPTAG, 674 le32_to_cpu(desc->msg0)); 675 676 if (sptag < ARRAY_SIZE(port->dsa_meta) && 677 port->dsa_meta[sptag]) 678 skb_dst_set_noref(q->skb, 679 &port->dsa_meta[sptag]->dst); 680 } 681 682 hash = FIELD_GET(AIROHA_RXD4_FOE_ENTRY, msg1); 683 if (hash != AIROHA_RXD4_FOE_ENTRY) 684 skb_set_hash(q->skb, jhash_1word(hash, 0), 685 PKT_HASH_TYPE_L4); 686 687 reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1); 688 if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) 689 airoha_ppe_check_skb(ð->ppe->dev, q->skb, hash, 690 false); 691 692 done++; 693 napi_gro_receive(&q->napi, q->skb); 694 q->skb = NULL; 695 continue; 696 free_frag: 697 if (q->skb) { 698 dev_kfree_skb(q->skb); 699 q->skb = NULL; 700 } else { 701 page_pool_put_full_page(q->page_pool, page, true); 702 } 703 } 704 airoha_qdma_fill_rx_queue(q); 705 706 return done; 707 } 708 709 static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget) 710 { 711 struct airoha_queue *q = container_of(napi, struct airoha_queue, napi); 712 int cur, done = 0; 713 714 do { 715 cur = airoha_qdma_rx_process(q, budget - done); 716 done += cur; 717 } while (cur && done < budget); 718 719 if (done < budget && napi_complete(napi)) { 720 struct airoha_qdma *qdma = q->qdma; 721 int i, qid = q - &qdma->q_rx[0]; 722 int intr_reg = qid < RX_DONE_HIGH_OFFSET ? QDMA_INT_REG_IDX1 723 : QDMA_INT_REG_IDX2; 724 725 for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) { 726 if (!(BIT(qid) & RX_IRQ_BANK_PIN_MASK(i))) 727 continue; 728 729 airoha_qdma_irq_enable(&qdma->irq_banks[i], intr_reg, 730 BIT(qid % RX_DONE_HIGH_OFFSET)); 731 } 732 } 733 734 return done; 735 } 736 737 static int airoha_qdma_init_rx_queue(struct airoha_queue *q, 738 struct airoha_qdma *qdma, int ndesc) 739 { 740 const struct page_pool_params pp_params = { 741 .order = 0, 742 .pool_size = 256, 743 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, 744 .dma_dir = DMA_FROM_DEVICE, 745 .max_len = PAGE_SIZE, 746 .nid = NUMA_NO_NODE, 747 .dev = qdma->eth->dev, 748 .napi = &q->napi, 749 }; 750 struct airoha_eth *eth = qdma->eth; 751 int qid = q - &qdma->q_rx[0], thr; 752 dma_addr_t dma_addr; 753 754 q->buf_size = PAGE_SIZE / 2; 755 q->ndesc = ndesc; 756 q->qdma = qdma; 757 758 q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), 759 GFP_KERNEL); 760 if (!q->entry) 761 return -ENOMEM; 762 763 q->page_pool = page_pool_create(&pp_params); 764 if (IS_ERR(q->page_pool)) { 765 int err = PTR_ERR(q->page_pool); 766 767 q->page_pool = NULL; 768 return err; 769 } 770 771 q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc), 772 &dma_addr, GFP_KERNEL); 773 if (!q->desc) 774 return -ENOMEM; 775 776 netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll); 777 778 airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr); 779 airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), 780 RX_RING_SIZE_MASK, 781 FIELD_PREP(RX_RING_SIZE_MASK, ndesc)); 782 783 thr = clamp(ndesc >> 3, 1, 32); 784 airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK, 785 FIELD_PREP(RX_RING_THR_MASK, thr)); 786 airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK, 787 FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head)); 788 airoha_qdma_set(qdma, REG_RX_SCATTER_CFG(qid), RX_RING_SG_EN_MASK); 789 790 airoha_qdma_fill_rx_queue(q); 791 792 return 0; 793 } 794 795 static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q) 796 { 797 struct airoha_qdma *qdma = q->qdma; 798 struct airoha_eth *eth = qdma->eth; 799 int qid = q - &qdma->q_rx[0]; 800 801 while (q->queued) { 802 struct airoha_queue_entry *e = &q->entry[q->tail]; 803 struct airoha_qdma_desc *desc = &q->desc[q->tail]; 804 struct page *page = virt_to_head_page(e->buf); 805 806 dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len, 807 page_pool_get_dma_dir(q->page_pool)); 808 page_pool_put_full_page(q->page_pool, page, false); 809 /* Reset DMA descriptor */ 810 WRITE_ONCE(desc->ctrl, 0); 811 WRITE_ONCE(desc->addr, 0); 812 WRITE_ONCE(desc->data, 0); 813 WRITE_ONCE(desc->msg0, 0); 814 WRITE_ONCE(desc->msg1, 0); 815 WRITE_ONCE(desc->msg2, 0); 816 WRITE_ONCE(desc->msg3, 0); 817 818 q->tail = (q->tail + 1) % q->ndesc; 819 q->queued--; 820 } 821 822 q->head = q->tail; 823 airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK, 824 FIELD_PREP(RX_RING_DMA_IDX_MASK, q->tail)); 825 } 826 827 static int airoha_qdma_init_rx(struct airoha_qdma *qdma) 828 { 829 int i; 830 831 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 832 int err; 833 834 if (!(RX_DONE_INT_MASK & BIT(i))) { 835 /* rx-queue not binded to irq */ 836 continue; 837 } 838 839 err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma, 840 RX_DSCP_NUM(i)); 841 if (err) 842 return err; 843 } 844 845 return 0; 846 } 847 848 static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget) 849 { 850 struct airoha_tx_irq_queue *irq_q; 851 int id, done = 0, irq_queued; 852 struct airoha_qdma *qdma; 853 struct airoha_eth *eth; 854 u32 status, head; 855 856 irq_q = container_of(napi, struct airoha_tx_irq_queue, napi); 857 qdma = irq_q->qdma; 858 id = irq_q - &qdma->q_tx_irq[0]; 859 eth = qdma->eth; 860 861 status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(id)); 862 head = FIELD_GET(IRQ_HEAD_IDX_MASK, status); 863 head = head % irq_q->size; 864 irq_queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status); 865 866 while (irq_queued > 0 && done < budget) { 867 u32 qid, val = irq_q->q[head]; 868 struct airoha_qdma_desc *desc; 869 struct airoha_queue_entry *e; 870 struct airoha_queue *q; 871 u32 index, desc_ctrl; 872 struct sk_buff *skb; 873 874 if (val == 0xff) 875 break; 876 877 irq_q->q[head] = 0xff; /* mark as done */ 878 head = (head + 1) % irq_q->size; 879 irq_queued--; 880 done++; 881 882 qid = FIELD_GET(IRQ_RING_IDX_MASK, val); 883 if (qid >= ARRAY_SIZE(qdma->q_tx)) 884 continue; 885 886 q = &qdma->q_tx[qid]; 887 if (!q->ndesc) 888 continue; 889 890 index = FIELD_GET(IRQ_DESC_IDX_MASK, val); 891 if (index >= q->ndesc) 892 continue; 893 894 spin_lock_bh(&q->lock); 895 896 if (!q->queued) 897 goto unlock; 898 899 desc = &q->desc[index]; 900 desc_ctrl = le32_to_cpu(desc->ctrl); 901 902 if (!(desc_ctrl & QDMA_DESC_DONE_MASK) && 903 !(desc_ctrl & QDMA_DESC_DROP_MASK)) 904 goto unlock; 905 906 e = &q->entry[index]; 907 skb = e->skb; 908 909 dma_unmap_single(eth->dev, e->dma_addr, e->dma_len, 910 DMA_TO_DEVICE); 911 e->dma_addr = 0; 912 list_add_tail(&e->list, &q->tx_list); 913 914 WRITE_ONCE(desc->msg0, 0); 915 WRITE_ONCE(desc->msg1, 0); 916 q->queued--; 917 918 if (skb) { 919 u16 queue = skb_get_queue_mapping(skb); 920 struct netdev_queue *txq; 921 922 txq = netdev_get_tx_queue(skb->dev, queue); 923 netdev_tx_completed_queue(txq, 1, skb->len); 924 if (netif_tx_queue_stopped(txq) && 925 q->ndesc - q->queued >= q->free_thr) 926 netif_tx_wake_queue(txq); 927 928 dev_kfree_skb_any(skb); 929 } 930 unlock: 931 spin_unlock_bh(&q->lock); 932 } 933 934 if (done) { 935 int i, len = done >> 7; 936 937 for (i = 0; i < len; i++) 938 airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id), 939 IRQ_CLEAR_LEN_MASK, 0x80); 940 airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id), 941 IRQ_CLEAR_LEN_MASK, (done & 0x7f)); 942 } 943 944 if (done < budget && napi_complete(napi)) 945 airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0, 946 TX_DONE_INT_MASK(id)); 947 948 return done; 949 } 950 951 static int airoha_qdma_init_tx_queue(struct airoha_queue *q, 952 struct airoha_qdma *qdma, int size) 953 { 954 struct airoha_eth *eth = qdma->eth; 955 int i, qid = q - &qdma->q_tx[0]; 956 dma_addr_t dma_addr; 957 958 spin_lock_init(&q->lock); 959 q->ndesc = size; 960 q->qdma = qdma; 961 q->free_thr = 1 + MAX_SKB_FRAGS; 962 INIT_LIST_HEAD(&q->tx_list); 963 964 q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), 965 GFP_KERNEL); 966 if (!q->entry) 967 return -ENOMEM; 968 969 q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc), 970 &dma_addr, GFP_KERNEL); 971 if (!q->desc) 972 return -ENOMEM; 973 974 for (i = 0; i < q->ndesc; i++) { 975 u32 val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1); 976 977 list_add_tail(&q->entry[i].list, &q->tx_list); 978 WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val)); 979 } 980 981 /* xmit ring drop default setting */ 982 airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid), 983 TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK); 984 985 airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr); 986 airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, 987 FIELD_PREP(TX_RING_CPU_IDX_MASK, 0)); 988 airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK, 989 FIELD_PREP(TX_RING_DMA_IDX_MASK, 0)); 990 991 return 0; 992 } 993 994 static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q, 995 struct airoha_qdma *qdma, int size) 996 { 997 int id = irq_q - &qdma->q_tx_irq[0]; 998 struct airoha_eth *eth = qdma->eth; 999 dma_addr_t dma_addr; 1000 1001 netif_napi_add_tx(eth->napi_dev, &irq_q->napi, 1002 airoha_qdma_tx_napi_poll); 1003 irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32), 1004 &dma_addr, GFP_KERNEL); 1005 if (!irq_q->q) 1006 return -ENOMEM; 1007 1008 memset(irq_q->q, 0xff, size * sizeof(u32)); 1009 irq_q->size = size; 1010 irq_q->qdma = qdma; 1011 1012 airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr); 1013 airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK, 1014 FIELD_PREP(TX_IRQ_DEPTH_MASK, size)); 1015 airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK, 1016 FIELD_PREP(TX_IRQ_THR_MASK, 1)); 1017 1018 return 0; 1019 } 1020 1021 static int airoha_qdma_init_tx(struct airoha_qdma *qdma) 1022 { 1023 int i, err; 1024 1025 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { 1026 err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma, 1027 IRQ_QUEUE_LEN(i)); 1028 if (err) 1029 return err; 1030 } 1031 1032 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { 1033 err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma, 1034 TX_DSCP_NUM); 1035 if (err) 1036 return err; 1037 } 1038 1039 return 0; 1040 } 1041 1042 static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q) 1043 { 1044 struct airoha_eth *eth = q->qdma->eth; 1045 int i; 1046 1047 spin_lock_bh(&q->lock); 1048 for (i = 0; i < q->ndesc; i++) { 1049 struct airoha_queue_entry *e = &q->entry[i]; 1050 1051 if (!e->dma_addr) 1052 continue; 1053 1054 dma_unmap_single(eth->dev, e->dma_addr, e->dma_len, 1055 DMA_TO_DEVICE); 1056 dev_kfree_skb_any(e->skb); 1057 e->dma_addr = 0; 1058 e->skb = NULL; 1059 list_add_tail(&e->list, &q->tx_list); 1060 q->queued--; 1061 } 1062 spin_unlock_bh(&q->lock); 1063 } 1064 1065 static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma) 1066 { 1067 int size, index, num_desc = HW_DSCP_NUM; 1068 struct airoha_eth *eth = qdma->eth; 1069 int id = qdma - ð->qdma[0]; 1070 u32 status, buf_size; 1071 dma_addr_t dma_addr; 1072 const char *name; 1073 1074 name = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d-buf", id); 1075 if (!name) 1076 return -ENOMEM; 1077 1078 buf_size = id ? AIROHA_MAX_PACKET_SIZE / 2 : AIROHA_MAX_PACKET_SIZE; 1079 index = of_property_match_string(eth->dev->of_node, 1080 "memory-region-names", name); 1081 if (index >= 0) { 1082 struct reserved_mem *rmem; 1083 struct device_node *np; 1084 1085 /* Consume reserved memory for hw forwarding buffers queue if 1086 * available in the DTS 1087 */ 1088 np = of_parse_phandle(eth->dev->of_node, "memory-region", 1089 index); 1090 if (!np) 1091 return -ENODEV; 1092 1093 rmem = of_reserved_mem_lookup(np); 1094 of_node_put(np); 1095 dma_addr = rmem->base; 1096 /* Compute the number of hw descriptors according to the 1097 * reserved memory size and the payload buffer size 1098 */ 1099 num_desc = div_u64(rmem->size, buf_size); 1100 } else { 1101 size = buf_size * num_desc; 1102 if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, 1103 GFP_KERNEL)) 1104 return -ENOMEM; 1105 } 1106 1107 airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr); 1108 1109 size = num_desc * sizeof(struct airoha_qdma_fwd_desc); 1110 if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL)) 1111 return -ENOMEM; 1112 1113 airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr); 1114 /* QDMA0: 2KB. QDMA1: 1KB */ 1115 airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG, 1116 HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 1117 FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, !!id)); 1118 airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK, 1119 FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128)); 1120 airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG, 1121 LMGR_INIT_START | LMGR_SRAM_MODE_MASK | 1122 HW_FWD_DESC_NUM_MASK, 1123 FIELD_PREP(HW_FWD_DESC_NUM_MASK, num_desc) | 1124 LMGR_INIT_START | LMGR_SRAM_MODE_MASK); 1125 1126 return read_poll_timeout(airoha_qdma_rr, status, 1127 !(status & LMGR_INIT_START), USEC_PER_MSEC, 1128 30 * USEC_PER_MSEC, true, qdma, 1129 REG_LMGR_INIT_CFG); 1130 } 1131 1132 static void airoha_qdma_init_qos(struct airoha_qdma *qdma) 1133 { 1134 airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK); 1135 airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK); 1136 1137 airoha_qdma_clear(qdma, REG_PSE_BUF_USAGE_CFG, 1138 PSE_BUF_ESTIMATE_EN_MASK); 1139 1140 airoha_qdma_set(qdma, REG_EGRESS_RATE_METER_CFG, 1141 EGRESS_RATE_METER_EN_MASK | 1142 EGRESS_RATE_METER_EQ_RATE_EN_MASK); 1143 /* 2047us x 31 = 63.457ms */ 1144 airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG, 1145 EGRESS_RATE_METER_WINDOW_SZ_MASK, 1146 FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f)); 1147 airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG, 1148 EGRESS_RATE_METER_TIMESLICE_MASK, 1149 FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff)); 1150 1151 /* ratelimit init */ 1152 airoha_qdma_set(qdma, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK); 1153 /* fast-tick 25us */ 1154 airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK, 1155 FIELD_PREP(GLB_FAST_TICK_MASK, 25)); 1156 airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK, 1157 FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40)); 1158 1159 airoha_qdma_set(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK); 1160 airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK, 1161 FIELD_PREP(EGRESS_FAST_TICK_MASK, 25)); 1162 airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, 1163 EGRESS_SLOW_TICK_RATIO_MASK, 1164 FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40)); 1165 1166 airoha_qdma_set(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK); 1167 airoha_qdma_clear(qdma, REG_INGRESS_TRTCM_CFG, 1168 INGRESS_TRTCM_MODE_MASK); 1169 airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK, 1170 FIELD_PREP(INGRESS_FAST_TICK_MASK, 125)); 1171 airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, 1172 INGRESS_SLOW_TICK_RATIO_MASK, 1173 FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8)); 1174 1175 airoha_qdma_set(qdma, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK); 1176 airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK, 1177 FIELD_PREP(SLA_FAST_TICK_MASK, 25)); 1178 airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK, 1179 FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40)); 1180 } 1181 1182 static void airoha_qdma_init_qos_stats(struct airoha_qdma *qdma) 1183 { 1184 int i; 1185 1186 for (i = 0; i < AIROHA_NUM_QOS_CHANNELS; i++) { 1187 /* Tx-cpu transferred count */ 1188 airoha_qdma_wr(qdma, REG_CNTR_VAL(i << 1), 0); 1189 airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1), 1190 CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK | 1191 CNTR_ALL_DSCP_RING_EN_MASK | 1192 FIELD_PREP(CNTR_CHAN_MASK, i)); 1193 /* Tx-fwd transferred count */ 1194 airoha_qdma_wr(qdma, REG_CNTR_VAL((i << 1) + 1), 0); 1195 airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1), 1196 CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK | 1197 CNTR_ALL_DSCP_RING_EN_MASK | 1198 FIELD_PREP(CNTR_SRC_MASK, 1) | 1199 FIELD_PREP(CNTR_CHAN_MASK, i)); 1200 } 1201 } 1202 1203 static int airoha_qdma_hw_init(struct airoha_qdma *qdma) 1204 { 1205 int i; 1206 1207 for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) { 1208 /* clear pending irqs */ 1209 airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff); 1210 /* setup rx irqs */ 1211 airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX0, 1212 INT_RX0_MASK(RX_IRQ_BANK_PIN_MASK(i))); 1213 airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX1, 1214 INT_RX1_MASK(RX_IRQ_BANK_PIN_MASK(i))); 1215 airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX2, 1216 INT_RX2_MASK(RX_IRQ_BANK_PIN_MASK(i))); 1217 airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX3, 1218 INT_RX3_MASK(RX_IRQ_BANK_PIN_MASK(i))); 1219 } 1220 /* setup tx irqs */ 1221 airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0, 1222 TX_COHERENT_LOW_INT_MASK | INT_TX_MASK); 1223 airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX4, 1224 TX_COHERENT_HIGH_INT_MASK); 1225 1226 /* setup irq binding */ 1227 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { 1228 if (!qdma->q_tx[i].ndesc) 1229 continue; 1230 1231 if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i)) 1232 airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(i), 1233 TX_RING_IRQ_BLOCKING_CFG_MASK); 1234 else 1235 airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i), 1236 TX_RING_IRQ_BLOCKING_CFG_MASK); 1237 } 1238 1239 airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG, 1240 FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) | 1241 GLOBAL_CFG_CPU_TXR_RR_MASK | 1242 GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK | 1243 GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK | 1244 GLOBAL_CFG_MULTICAST_EN_MASK | 1245 GLOBAL_CFG_IRQ0_EN_MASK | GLOBAL_CFG_IRQ1_EN_MASK | 1246 GLOBAL_CFG_TX_WB_DONE_MASK | 1247 FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2)); 1248 1249 airoha_qdma_init_qos(qdma); 1250 1251 /* disable qdma rx delay interrupt */ 1252 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 1253 if (!qdma->q_rx[i].ndesc) 1254 continue; 1255 1256 airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i), 1257 RX_DELAY_INT_MASK); 1258 } 1259 1260 airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG, 1261 TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN); 1262 airoha_qdma_init_qos_stats(qdma); 1263 1264 return 0; 1265 } 1266 1267 static irqreturn_t airoha_irq_handler(int irq, void *dev_instance) 1268 { 1269 struct airoha_irq_bank *irq_bank = dev_instance; 1270 struct airoha_qdma *qdma = irq_bank->qdma; 1271 u32 rx_intr_mask = 0, rx_intr1, rx_intr2; 1272 u32 intr[ARRAY_SIZE(irq_bank->irqmask)]; 1273 int i; 1274 1275 for (i = 0; i < ARRAY_SIZE(intr); i++) { 1276 intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i)); 1277 intr[i] &= irq_bank->irqmask[i]; 1278 airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]); 1279 } 1280 1281 if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state)) 1282 return IRQ_NONE; 1283 1284 rx_intr1 = intr[1] & RX_DONE_LOW_INT_MASK; 1285 if (rx_intr1) { 1286 airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX1, rx_intr1); 1287 rx_intr_mask |= rx_intr1; 1288 } 1289 1290 rx_intr2 = intr[2] & RX_DONE_HIGH_INT_MASK; 1291 if (rx_intr2) { 1292 airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX2, rx_intr2); 1293 rx_intr_mask |= (rx_intr2 << 16); 1294 } 1295 1296 for (i = 0; rx_intr_mask && i < ARRAY_SIZE(qdma->q_rx); i++) { 1297 if (!qdma->q_rx[i].ndesc) 1298 continue; 1299 1300 if (rx_intr_mask & BIT(i)) 1301 napi_schedule(&qdma->q_rx[i].napi); 1302 } 1303 1304 if (intr[0] & INT_TX_MASK) { 1305 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { 1306 if (!(intr[0] & TX_DONE_INT_MASK(i))) 1307 continue; 1308 1309 airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX0, 1310 TX_DONE_INT_MASK(i)); 1311 napi_schedule(&qdma->q_tx_irq[i].napi); 1312 } 1313 } 1314 1315 return IRQ_HANDLED; 1316 } 1317 1318 static int airoha_qdma_init_irq_banks(struct platform_device *pdev, 1319 struct airoha_qdma *qdma) 1320 { 1321 struct airoha_eth *eth = qdma->eth; 1322 int i, id = qdma - ð->qdma[0]; 1323 1324 for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) { 1325 struct airoha_irq_bank *irq_bank = &qdma->irq_banks[i]; 1326 int err, irq_index = 4 * id + i; 1327 const char *name; 1328 1329 spin_lock_init(&irq_bank->irq_lock); 1330 irq_bank->qdma = qdma; 1331 1332 irq_bank->irq = platform_get_irq(pdev, irq_index); 1333 if (irq_bank->irq < 0) 1334 return irq_bank->irq; 1335 1336 name = devm_kasprintf(eth->dev, GFP_KERNEL, 1337 KBUILD_MODNAME ".%d", irq_index); 1338 if (!name) 1339 return -ENOMEM; 1340 1341 err = devm_request_irq(eth->dev, irq_bank->irq, 1342 airoha_irq_handler, IRQF_SHARED, name, 1343 irq_bank); 1344 if (err) 1345 return err; 1346 } 1347 1348 return 0; 1349 } 1350 1351 static int airoha_qdma_init(struct platform_device *pdev, 1352 struct airoha_eth *eth, 1353 struct airoha_qdma *qdma) 1354 { 1355 int err, id = qdma - ð->qdma[0]; 1356 const char *res; 1357 1358 qdma->eth = eth; 1359 res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id); 1360 if (!res) 1361 return -ENOMEM; 1362 1363 qdma->regs = devm_platform_ioremap_resource_byname(pdev, res); 1364 if (IS_ERR(qdma->regs)) 1365 return dev_err_probe(eth->dev, PTR_ERR(qdma->regs), 1366 "failed to iomap qdma%d regs\n", id); 1367 1368 err = airoha_qdma_init_irq_banks(pdev, qdma); 1369 if (err) 1370 return err; 1371 1372 err = airoha_qdma_init_rx(qdma); 1373 if (err) 1374 return err; 1375 1376 err = airoha_qdma_init_tx(qdma); 1377 if (err) 1378 return err; 1379 1380 err = airoha_qdma_init_hfwd_queues(qdma); 1381 if (err) 1382 return err; 1383 1384 return airoha_qdma_hw_init(qdma); 1385 } 1386 1387 static int airoha_hw_init(struct platform_device *pdev, 1388 struct airoha_eth *eth) 1389 { 1390 int err, i; 1391 1392 /* disable xsi */ 1393 err = reset_control_bulk_assert(eth->soc->num_xsi_rsts, eth->xsi_rsts); 1394 if (err) 1395 return err; 1396 1397 err = reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts); 1398 if (err) 1399 return err; 1400 1401 msleep(20); 1402 err = reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts); 1403 if (err) 1404 return err; 1405 1406 msleep(20); 1407 err = airoha_fe_init(eth); 1408 if (err) 1409 return err; 1410 1411 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) { 1412 err = airoha_qdma_init(pdev, eth, ð->qdma[i]); 1413 if (err) 1414 return err; 1415 } 1416 1417 err = airoha_ppe_init(eth); 1418 if (err) 1419 return err; 1420 1421 set_bit(DEV_STATE_INITIALIZED, ð->state); 1422 1423 return 0; 1424 } 1425 1426 static void airoha_hw_cleanup(struct airoha_qdma *qdma) 1427 { 1428 int i; 1429 1430 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 1431 if (!qdma->q_rx[i].ndesc) 1432 continue; 1433 1434 netif_napi_del(&qdma->q_rx[i].napi); 1435 airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]); 1436 if (qdma->q_rx[i].page_pool) 1437 page_pool_destroy(qdma->q_rx[i].page_pool); 1438 } 1439 1440 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) 1441 netif_napi_del(&qdma->q_tx_irq[i].napi); 1442 1443 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { 1444 if (!qdma->q_tx[i].ndesc) 1445 continue; 1446 1447 airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]); 1448 } 1449 } 1450 1451 static void airoha_qdma_start_napi(struct airoha_qdma *qdma) 1452 { 1453 int i; 1454 1455 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) 1456 napi_enable(&qdma->q_tx_irq[i].napi); 1457 1458 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 1459 if (!qdma->q_rx[i].ndesc) 1460 continue; 1461 1462 napi_enable(&qdma->q_rx[i].napi); 1463 } 1464 } 1465 1466 static void airoha_qdma_stop_napi(struct airoha_qdma *qdma) 1467 { 1468 int i; 1469 1470 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) 1471 napi_disable(&qdma->q_tx_irq[i].napi); 1472 1473 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 1474 if (!qdma->q_rx[i].ndesc) 1475 continue; 1476 1477 napi_disable(&qdma->q_rx[i].napi); 1478 } 1479 } 1480 1481 static void airoha_update_hw_stats(struct airoha_gdm_port *port) 1482 { 1483 struct airoha_eth *eth = port->qdma->eth; 1484 u32 val, i = 0; 1485 1486 spin_lock(&port->stats.lock); 1487 u64_stats_update_begin(&port->stats.syncp); 1488 1489 /* TX */ 1490 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_H(port->id)); 1491 port->stats.tx_ok_pkts += ((u64)val << 32); 1492 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_L(port->id)); 1493 port->stats.tx_ok_pkts += val; 1494 1495 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_H(port->id)); 1496 port->stats.tx_ok_bytes += ((u64)val << 32); 1497 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_L(port->id)); 1498 port->stats.tx_ok_bytes += val; 1499 1500 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_DROP_CNT(port->id)); 1501 port->stats.tx_drops += val; 1502 1503 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_BC_CNT(port->id)); 1504 port->stats.tx_broadcast += val; 1505 1506 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_MC_CNT(port->id)); 1507 port->stats.tx_multicast += val; 1508 1509 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_RUNT_CNT(port->id)); 1510 port->stats.tx_len[i] += val; 1511 1512 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_H(port->id)); 1513 port->stats.tx_len[i] += ((u64)val << 32); 1514 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_L(port->id)); 1515 port->stats.tx_len[i++] += val; 1516 1517 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_H(port->id)); 1518 port->stats.tx_len[i] += ((u64)val << 32); 1519 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_L(port->id)); 1520 port->stats.tx_len[i++] += val; 1521 1522 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_H(port->id)); 1523 port->stats.tx_len[i] += ((u64)val << 32); 1524 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_L(port->id)); 1525 port->stats.tx_len[i++] += val; 1526 1527 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_H(port->id)); 1528 port->stats.tx_len[i] += ((u64)val << 32); 1529 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_L(port->id)); 1530 port->stats.tx_len[i++] += val; 1531 1532 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_H(port->id)); 1533 port->stats.tx_len[i] += ((u64)val << 32); 1534 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_L(port->id)); 1535 port->stats.tx_len[i++] += val; 1536 1537 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_H(port->id)); 1538 port->stats.tx_len[i] += ((u64)val << 32); 1539 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_L(port->id)); 1540 port->stats.tx_len[i++] += val; 1541 1542 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_LONG_CNT(port->id)); 1543 port->stats.tx_len[i++] += val; 1544 1545 /* RX */ 1546 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_H(port->id)); 1547 port->stats.rx_ok_pkts += ((u64)val << 32); 1548 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_L(port->id)); 1549 port->stats.rx_ok_pkts += val; 1550 1551 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_H(port->id)); 1552 port->stats.rx_ok_bytes += ((u64)val << 32); 1553 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_L(port->id)); 1554 port->stats.rx_ok_bytes += val; 1555 1556 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_DROP_CNT(port->id)); 1557 port->stats.rx_drops += val; 1558 1559 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_BC_CNT(port->id)); 1560 port->stats.rx_broadcast += val; 1561 1562 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_MC_CNT(port->id)); 1563 port->stats.rx_multicast += val; 1564 1565 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ERROR_DROP_CNT(port->id)); 1566 port->stats.rx_errors += val; 1567 1568 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_CRC_ERR_CNT(port->id)); 1569 port->stats.rx_crc_error += val; 1570 1571 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OVERFLOW_DROP_CNT(port->id)); 1572 port->stats.rx_over_errors += val; 1573 1574 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_FRAG_CNT(port->id)); 1575 port->stats.rx_fragment += val; 1576 1577 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_JABBER_CNT(port->id)); 1578 port->stats.rx_jabber += val; 1579 1580 i = 0; 1581 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_RUNT_CNT(port->id)); 1582 port->stats.rx_len[i] += val; 1583 1584 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_H(port->id)); 1585 port->stats.rx_len[i] += ((u64)val << 32); 1586 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_L(port->id)); 1587 port->stats.rx_len[i++] += val; 1588 1589 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_H(port->id)); 1590 port->stats.rx_len[i] += ((u64)val << 32); 1591 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_L(port->id)); 1592 port->stats.rx_len[i++] += val; 1593 1594 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_H(port->id)); 1595 port->stats.rx_len[i] += ((u64)val << 32); 1596 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_L(port->id)); 1597 port->stats.rx_len[i++] += val; 1598 1599 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_H(port->id)); 1600 port->stats.rx_len[i] += ((u64)val << 32); 1601 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_L(port->id)); 1602 port->stats.rx_len[i++] += val; 1603 1604 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_H(port->id)); 1605 port->stats.rx_len[i] += ((u64)val << 32); 1606 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_L(port->id)); 1607 port->stats.rx_len[i++] += val; 1608 1609 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_H(port->id)); 1610 port->stats.rx_len[i] += ((u64)val << 32); 1611 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_L(port->id)); 1612 port->stats.rx_len[i++] += val; 1613 1614 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_LONG_CNT(port->id)); 1615 port->stats.rx_len[i++] += val; 1616 1617 /* reset mib counters */ 1618 airoha_fe_set(eth, REG_FE_GDM_MIB_CLEAR(port->id), 1619 FE_GDM_MIB_RX_CLEAR_MASK | FE_GDM_MIB_TX_CLEAR_MASK); 1620 1621 u64_stats_update_end(&port->stats.syncp); 1622 spin_unlock(&port->stats.lock); 1623 } 1624 1625 static int airoha_dev_open(struct net_device *dev) 1626 { 1627 int err, len = ETH_HLEN + dev->mtu + ETH_FCS_LEN; 1628 struct airoha_gdm_port *port = netdev_priv(dev); 1629 struct airoha_qdma *qdma = port->qdma; 1630 1631 netif_tx_start_all_queues(dev); 1632 err = airoha_set_vip_for_gdm_port(port, true); 1633 if (err) 1634 return err; 1635 1636 if (netdev_uses_dsa(dev)) 1637 airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id), 1638 GDM_STAG_EN_MASK); 1639 else 1640 airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id), 1641 GDM_STAG_EN_MASK); 1642 1643 airoha_fe_rmw(qdma->eth, REG_GDM_LEN_CFG(port->id), 1644 GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK, 1645 FIELD_PREP(GDM_SHORT_LEN_MASK, 60) | 1646 FIELD_PREP(GDM_LONG_LEN_MASK, len)); 1647 1648 airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG, 1649 GLOBAL_CFG_TX_DMA_EN_MASK | 1650 GLOBAL_CFG_RX_DMA_EN_MASK); 1651 atomic_inc(&qdma->users); 1652 1653 return 0; 1654 } 1655 1656 static int airoha_dev_stop(struct net_device *dev) 1657 { 1658 struct airoha_gdm_port *port = netdev_priv(dev); 1659 struct airoha_qdma *qdma = port->qdma; 1660 int i, err; 1661 1662 netif_tx_disable(dev); 1663 err = airoha_set_vip_for_gdm_port(port, false); 1664 if (err) 1665 return err; 1666 1667 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) 1668 netdev_tx_reset_subqueue(dev, i); 1669 1670 if (atomic_dec_and_test(&qdma->users)) { 1671 airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG, 1672 GLOBAL_CFG_TX_DMA_EN_MASK | 1673 GLOBAL_CFG_RX_DMA_EN_MASK); 1674 1675 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { 1676 if (!qdma->q_tx[i].ndesc) 1677 continue; 1678 1679 airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]); 1680 } 1681 } 1682 1683 return 0; 1684 } 1685 1686 static int airoha_dev_set_macaddr(struct net_device *dev, void *p) 1687 { 1688 struct airoha_gdm_port *port = netdev_priv(dev); 1689 int err; 1690 1691 err = eth_mac_addr(dev, p); 1692 if (err) 1693 return err; 1694 1695 airoha_set_macaddr(port, dev->dev_addr); 1696 1697 return 0; 1698 } 1699 1700 static int airhoha_set_gdm2_loopback(struct airoha_gdm_port *port) 1701 { 1702 struct airoha_eth *eth = port->qdma->eth; 1703 u32 val, pse_port, chan, nbq; 1704 int src_port; 1705 1706 /* Forward the traffic to the proper GDM port */ 1707 pse_port = port->id == AIROHA_GDM3_IDX ? FE_PSE_PORT_GDM3 1708 : FE_PSE_PORT_GDM4; 1709 airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(AIROHA_GDM2_IDX), 1710 pse_port); 1711 airoha_fe_clear(eth, REG_GDM_FWD_CFG(AIROHA_GDM2_IDX), 1712 GDM_STRIP_CRC_MASK); 1713 1714 /* Enable GDM2 loopback */ 1715 airoha_fe_wr(eth, REG_GDM_TXCHN_EN(AIROHA_GDM2_IDX), 0xffffffff); 1716 airoha_fe_wr(eth, REG_GDM_RXCHN_EN(AIROHA_GDM2_IDX), 0xffff); 1717 1718 chan = port->id == AIROHA_GDM3_IDX ? airoha_is_7581(eth) ? 4 : 3 : 0; 1719 airoha_fe_rmw(eth, REG_GDM_LPBK_CFG(AIROHA_GDM2_IDX), 1720 LPBK_CHAN_MASK | LPBK_MODE_MASK | LPBK_EN_MASK, 1721 FIELD_PREP(LPBK_CHAN_MASK, chan) | 1722 LBK_GAP_MODE_MASK | LBK_LEN_MODE_MASK | 1723 LBK_CHAN_MODE_MASK | LPBK_EN_MASK); 1724 airoha_fe_rmw(eth, REG_GDM_LEN_CFG(AIROHA_GDM2_IDX), 1725 GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK, 1726 FIELD_PREP(GDM_SHORT_LEN_MASK, 60) | 1727 FIELD_PREP(GDM_LONG_LEN_MASK, AIROHA_MAX_MTU)); 1728 1729 /* Disable VIP and IFC for GDM2 */ 1730 airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, BIT(AIROHA_GDM2_IDX)); 1731 airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, BIT(AIROHA_GDM2_IDX)); 1732 1733 /* XXX: handle XSI_USB_PORT and XSI_PCE1_PORT */ 1734 nbq = port->id == AIROHA_GDM3_IDX && airoha_is_7581(eth) ? 4 : 0; 1735 src_port = eth->soc->ops.get_src_port_id(port, nbq); 1736 if (src_port < 0) 1737 return src_port; 1738 1739 airoha_fe_rmw(eth, REG_FE_WAN_PORT, 1740 WAN1_EN_MASK | WAN1_MASK | WAN0_MASK, 1741 FIELD_PREP(WAN0_MASK, src_port)); 1742 val = src_port & SP_CPORT_DFT_MASK; 1743 airoha_fe_rmw(eth, 1744 REG_SP_DFT_CPORT(src_port >> fls(SP_CPORT_DFT_MASK)), 1745 SP_CPORT_MASK(val), 1746 FE_PSE_PORT_CDM2 << __ffs(SP_CPORT_MASK(val))); 1747 1748 if (port->id != AIROHA_GDM3_IDX && airoha_is_7581(eth)) 1749 airoha_fe_rmw(eth, REG_SRC_PORT_FC_MAP6, 1750 FC_ID_OF_SRC_PORT24_MASK, 1751 FIELD_PREP(FC_ID_OF_SRC_PORT24_MASK, 2)); 1752 1753 return 0; 1754 } 1755 1756 static int airoha_dev_init(struct net_device *dev) 1757 { 1758 struct airoha_gdm_port *port = netdev_priv(dev); 1759 struct airoha_qdma *qdma = port->qdma; 1760 struct airoha_eth *eth = qdma->eth; 1761 u32 pse_port, fe_cpu_port; 1762 u8 ppe_id; 1763 1764 airoha_set_macaddr(port, dev->dev_addr); 1765 1766 switch (port->id) { 1767 case AIROHA_GDM3_IDX: 1768 case AIROHA_GDM4_IDX: 1769 /* If GDM2 is active we can't enable loopback */ 1770 if (!eth->ports[1]) { 1771 int err; 1772 1773 err = airhoha_set_gdm2_loopback(port); 1774 if (err) 1775 return err; 1776 } 1777 fallthrough; 1778 case AIROHA_GDM2_IDX: 1779 if (airoha_ppe_is_enabled(eth, 1)) { 1780 /* For PPE2 always use secondary cpu port. */ 1781 fe_cpu_port = FE_PSE_PORT_CDM2; 1782 pse_port = FE_PSE_PORT_PPE2; 1783 break; 1784 } 1785 fallthrough; 1786 default: { 1787 u8 qdma_id = qdma - ð->qdma[0]; 1788 1789 /* For PPE1 select cpu port according to the running QDMA. */ 1790 fe_cpu_port = qdma_id ? FE_PSE_PORT_CDM2 : FE_PSE_PORT_CDM1; 1791 pse_port = FE_PSE_PORT_PPE1; 1792 break; 1793 } 1794 } 1795 1796 airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(port->id), pse_port); 1797 ppe_id = pse_port == FE_PSE_PORT_PPE2 ? 1 : 0; 1798 airoha_fe_rmw(eth, REG_PPE_DFT_CPORT0(ppe_id), 1799 DFT_CPORT_MASK(port->id), 1800 fe_cpu_port << __ffs(DFT_CPORT_MASK(port->id))); 1801 1802 return 0; 1803 } 1804 1805 static void airoha_dev_get_stats64(struct net_device *dev, 1806 struct rtnl_link_stats64 *storage) 1807 { 1808 struct airoha_gdm_port *port = netdev_priv(dev); 1809 unsigned int start; 1810 1811 airoha_update_hw_stats(port); 1812 do { 1813 start = u64_stats_fetch_begin(&port->stats.syncp); 1814 storage->rx_packets = port->stats.rx_ok_pkts; 1815 storage->tx_packets = port->stats.tx_ok_pkts; 1816 storage->rx_bytes = port->stats.rx_ok_bytes; 1817 storage->tx_bytes = port->stats.tx_ok_bytes; 1818 storage->multicast = port->stats.rx_multicast; 1819 storage->rx_errors = port->stats.rx_errors; 1820 storage->rx_dropped = port->stats.rx_drops; 1821 storage->tx_dropped = port->stats.tx_drops; 1822 storage->rx_crc_errors = port->stats.rx_crc_error; 1823 storage->rx_over_errors = port->stats.rx_over_errors; 1824 } while (u64_stats_fetch_retry(&port->stats.syncp, start)); 1825 } 1826 1827 static int airoha_dev_change_mtu(struct net_device *dev, int mtu) 1828 { 1829 struct airoha_gdm_port *port = netdev_priv(dev); 1830 struct airoha_eth *eth = port->qdma->eth; 1831 u32 len = ETH_HLEN + mtu + ETH_FCS_LEN; 1832 1833 airoha_fe_rmw(eth, REG_GDM_LEN_CFG(port->id), 1834 GDM_LONG_LEN_MASK, 1835 FIELD_PREP(GDM_LONG_LEN_MASK, len)); 1836 WRITE_ONCE(dev->mtu, mtu); 1837 1838 return 0; 1839 } 1840 1841 static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb, 1842 struct net_device *sb_dev) 1843 { 1844 struct airoha_gdm_port *port = netdev_priv(dev); 1845 int queue, channel; 1846 1847 /* For dsa device select QoS channel according to the dsa user port 1848 * index, rely on port id otherwise. Select QoS queue based on the 1849 * skb priority. 1850 */ 1851 channel = netdev_uses_dsa(dev) ? skb_get_queue_mapping(skb) : port->id; 1852 channel = channel % AIROHA_NUM_QOS_CHANNELS; 1853 queue = (skb->priority - 1) % AIROHA_NUM_QOS_QUEUES; /* QoS queue */ 1854 queue = channel * AIROHA_NUM_QOS_QUEUES + queue; 1855 1856 return queue < dev->num_tx_queues ? queue : 0; 1857 } 1858 1859 static u32 airoha_get_dsa_tag(struct sk_buff *skb, struct net_device *dev) 1860 { 1861 #if IS_ENABLED(CONFIG_NET_DSA) 1862 struct ethhdr *ehdr; 1863 u8 xmit_tpid; 1864 u16 tag; 1865 1866 if (!netdev_uses_dsa(dev)) 1867 return 0; 1868 1869 if (dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK) 1870 return 0; 1871 1872 if (skb_cow_head(skb, 0)) 1873 return 0; 1874 1875 ehdr = (struct ethhdr *)skb->data; 1876 tag = be16_to_cpu(ehdr->h_proto); 1877 xmit_tpid = tag >> 8; 1878 1879 switch (xmit_tpid) { 1880 case MTK_HDR_XMIT_TAGGED_TPID_8100: 1881 ehdr->h_proto = cpu_to_be16(ETH_P_8021Q); 1882 tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_8100 << 8); 1883 break; 1884 case MTK_HDR_XMIT_TAGGED_TPID_88A8: 1885 ehdr->h_proto = cpu_to_be16(ETH_P_8021AD); 1886 tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_88A8 << 8); 1887 break; 1888 default: 1889 /* PPE module requires untagged DSA packets to work properly, 1890 * so move DSA tag to DMA descriptor. 1891 */ 1892 memmove(skb->data + MTK_HDR_LEN, skb->data, 2 * ETH_ALEN); 1893 __skb_pull(skb, MTK_HDR_LEN); 1894 break; 1895 } 1896 1897 return tag; 1898 #else 1899 return 0; 1900 #endif 1901 } 1902 1903 static int airoha_get_fe_port(struct airoha_gdm_port *port) 1904 { 1905 struct airoha_qdma *qdma = port->qdma; 1906 struct airoha_eth *eth = qdma->eth; 1907 1908 switch (eth->soc->version) { 1909 case 0x7583: 1910 return port->id == AIROHA_GDM3_IDX ? FE_PSE_PORT_GDM3 1911 : port->id; 1912 case 0x7581: 1913 default: 1914 return port->id == AIROHA_GDM4_IDX ? FE_PSE_PORT_GDM4 1915 : port->id; 1916 } 1917 } 1918 1919 static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, 1920 struct net_device *dev) 1921 { 1922 struct airoha_gdm_port *port = netdev_priv(dev); 1923 struct airoha_qdma *qdma = port->qdma; 1924 u32 nr_frags, tag, msg0, msg1, len; 1925 struct airoha_queue_entry *e; 1926 struct netdev_queue *txq; 1927 struct airoha_queue *q; 1928 LIST_HEAD(tx_list); 1929 void *data; 1930 int i, qid; 1931 u16 index; 1932 u8 fport; 1933 1934 qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx); 1935 tag = airoha_get_dsa_tag(skb, dev); 1936 1937 msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK, 1938 qid / AIROHA_NUM_QOS_QUEUES) | 1939 FIELD_PREP(QDMA_ETH_TXMSG_QUEUE_MASK, 1940 qid % AIROHA_NUM_QOS_QUEUES) | 1941 FIELD_PREP(QDMA_ETH_TXMSG_SP_TAG_MASK, tag); 1942 if (skb->ip_summed == CHECKSUM_PARTIAL) 1943 msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) | 1944 FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) | 1945 FIELD_PREP(QDMA_ETH_TXMSG_ICO_MASK, 1); 1946 1947 /* TSO: fill MSS info in tcp checksum field */ 1948 if (skb_is_gso(skb)) { 1949 if (skb_cow_head(skb, 0)) 1950 goto error; 1951 1952 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | 1953 SKB_GSO_TCPV6)) { 1954 __be16 csum = cpu_to_be16(skb_shinfo(skb)->gso_size); 1955 1956 tcp_hdr(skb)->check = (__force __sum16)csum; 1957 msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1); 1958 } 1959 } 1960 1961 fport = airoha_get_fe_port(port); 1962 msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) | 1963 FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f); 1964 1965 q = &qdma->q_tx[qid]; 1966 if (WARN_ON_ONCE(!q->ndesc)) 1967 goto error; 1968 1969 spin_lock_bh(&q->lock); 1970 1971 txq = netdev_get_tx_queue(dev, qid); 1972 nr_frags = 1 + skb_shinfo(skb)->nr_frags; 1973 1974 if (q->queued + nr_frags >= q->ndesc) { 1975 /* not enough space in the queue */ 1976 netif_tx_stop_queue(txq); 1977 spin_unlock_bh(&q->lock); 1978 return NETDEV_TX_BUSY; 1979 } 1980 1981 len = skb_headlen(skb); 1982 data = skb->data; 1983 1984 e = list_first_entry(&q->tx_list, struct airoha_queue_entry, 1985 list); 1986 index = e - q->entry; 1987 1988 for (i = 0; i < nr_frags; i++) { 1989 struct airoha_qdma_desc *desc = &q->desc[index]; 1990 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1991 dma_addr_t addr; 1992 u32 val; 1993 1994 addr = dma_map_single(dev->dev.parent, data, len, 1995 DMA_TO_DEVICE); 1996 if (unlikely(dma_mapping_error(dev->dev.parent, addr))) 1997 goto error_unmap; 1998 1999 list_move_tail(&e->list, &tx_list); 2000 e->skb = i ? NULL : skb; 2001 e->dma_addr = addr; 2002 e->dma_len = len; 2003 2004 e = list_first_entry(&q->tx_list, struct airoha_queue_entry, 2005 list); 2006 index = e - q->entry; 2007 2008 val = FIELD_PREP(QDMA_DESC_LEN_MASK, len); 2009 if (i < nr_frags - 1) 2010 val |= FIELD_PREP(QDMA_DESC_MORE_MASK, 1); 2011 WRITE_ONCE(desc->ctrl, cpu_to_le32(val)); 2012 WRITE_ONCE(desc->addr, cpu_to_le32(addr)); 2013 val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, index); 2014 WRITE_ONCE(desc->data, cpu_to_le32(val)); 2015 WRITE_ONCE(desc->msg0, cpu_to_le32(msg0)); 2016 WRITE_ONCE(desc->msg1, cpu_to_le32(msg1)); 2017 WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff)); 2018 2019 data = skb_frag_address(frag); 2020 len = skb_frag_size(frag); 2021 } 2022 q->queued += i; 2023 2024 skb_tx_timestamp(skb); 2025 netdev_tx_sent_queue(txq, skb->len); 2026 2027 if (netif_xmit_stopped(txq) || !netdev_xmit_more()) 2028 airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), 2029 TX_RING_CPU_IDX_MASK, 2030 FIELD_PREP(TX_RING_CPU_IDX_MASK, index)); 2031 2032 if (q->ndesc - q->queued < q->free_thr) 2033 netif_tx_stop_queue(txq); 2034 2035 spin_unlock_bh(&q->lock); 2036 2037 return NETDEV_TX_OK; 2038 2039 error_unmap: 2040 while (!list_empty(&tx_list)) { 2041 e = list_first_entry(&tx_list, struct airoha_queue_entry, 2042 list); 2043 dma_unmap_single(dev->dev.parent, e->dma_addr, e->dma_len, 2044 DMA_TO_DEVICE); 2045 e->dma_addr = 0; 2046 list_move_tail(&e->list, &q->tx_list); 2047 } 2048 2049 spin_unlock_bh(&q->lock); 2050 error: 2051 dev_kfree_skb_any(skb); 2052 dev->stats.tx_dropped++; 2053 2054 return NETDEV_TX_OK; 2055 } 2056 2057 static void airoha_ethtool_get_drvinfo(struct net_device *dev, 2058 struct ethtool_drvinfo *info) 2059 { 2060 struct airoha_gdm_port *port = netdev_priv(dev); 2061 struct airoha_eth *eth = port->qdma->eth; 2062 2063 strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver)); 2064 strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info)); 2065 } 2066 2067 static void airoha_ethtool_get_mac_stats(struct net_device *dev, 2068 struct ethtool_eth_mac_stats *stats) 2069 { 2070 struct airoha_gdm_port *port = netdev_priv(dev); 2071 unsigned int start; 2072 2073 airoha_update_hw_stats(port); 2074 do { 2075 start = u64_stats_fetch_begin(&port->stats.syncp); 2076 stats->FramesTransmittedOK = port->stats.tx_ok_pkts; 2077 stats->OctetsTransmittedOK = port->stats.tx_ok_bytes; 2078 stats->MulticastFramesXmittedOK = port->stats.tx_multicast; 2079 stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast; 2080 stats->FramesReceivedOK = port->stats.rx_ok_pkts; 2081 stats->OctetsReceivedOK = port->stats.rx_ok_bytes; 2082 stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast; 2083 } while (u64_stats_fetch_retry(&port->stats.syncp, start)); 2084 } 2085 2086 static const struct ethtool_rmon_hist_range airoha_ethtool_rmon_ranges[] = { 2087 { 0, 64 }, 2088 { 65, 127 }, 2089 { 128, 255 }, 2090 { 256, 511 }, 2091 { 512, 1023 }, 2092 { 1024, 1518 }, 2093 { 1519, 10239 }, 2094 {}, 2095 }; 2096 2097 static void 2098 airoha_ethtool_get_rmon_stats(struct net_device *dev, 2099 struct ethtool_rmon_stats *stats, 2100 const struct ethtool_rmon_hist_range **ranges) 2101 { 2102 struct airoha_gdm_port *port = netdev_priv(dev); 2103 struct airoha_hw_stats *hw_stats = &port->stats; 2104 unsigned int start; 2105 2106 BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) != 2107 ARRAY_SIZE(hw_stats->tx_len) + 1); 2108 BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) != 2109 ARRAY_SIZE(hw_stats->rx_len) + 1); 2110 2111 *ranges = airoha_ethtool_rmon_ranges; 2112 airoha_update_hw_stats(port); 2113 do { 2114 int i; 2115 2116 start = u64_stats_fetch_begin(&port->stats.syncp); 2117 stats->fragments = hw_stats->rx_fragment; 2118 stats->jabbers = hw_stats->rx_jabber; 2119 for (i = 0; i < ARRAY_SIZE(airoha_ethtool_rmon_ranges) - 1; 2120 i++) { 2121 stats->hist[i] = hw_stats->rx_len[i]; 2122 stats->hist_tx[i] = hw_stats->tx_len[i]; 2123 } 2124 } while (u64_stats_fetch_retry(&port->stats.syncp, start)); 2125 } 2126 2127 static int airoha_qdma_set_chan_tx_sched(struct airoha_gdm_port *port, 2128 int channel, enum tx_sched_mode mode, 2129 const u16 *weights, u8 n_weights) 2130 { 2131 int i; 2132 2133 for (i = 0; i < AIROHA_NUM_TX_RING; i++) 2134 airoha_qdma_clear(port->qdma, REG_QUEUE_CLOSE_CFG(channel), 2135 TXQ_DISABLE_CHAN_QUEUE_MASK(channel, i)); 2136 2137 for (i = 0; i < n_weights; i++) { 2138 u32 status; 2139 int err; 2140 2141 airoha_qdma_wr(port->qdma, REG_TXWRR_WEIGHT_CFG, 2142 TWRR_RW_CMD_MASK | 2143 FIELD_PREP(TWRR_CHAN_IDX_MASK, channel) | 2144 FIELD_PREP(TWRR_QUEUE_IDX_MASK, i) | 2145 FIELD_PREP(TWRR_VALUE_MASK, weights[i])); 2146 err = read_poll_timeout(airoha_qdma_rr, status, 2147 status & TWRR_RW_CMD_DONE, 2148 USEC_PER_MSEC, 10 * USEC_PER_MSEC, 2149 true, port->qdma, 2150 REG_TXWRR_WEIGHT_CFG); 2151 if (err) 2152 return err; 2153 } 2154 2155 airoha_qdma_rmw(port->qdma, REG_CHAN_QOS_MODE(channel >> 3), 2156 CHAN_QOS_MODE_MASK(channel), 2157 mode << __ffs(CHAN_QOS_MODE_MASK(channel))); 2158 2159 return 0; 2160 } 2161 2162 static int airoha_qdma_set_tx_prio_sched(struct airoha_gdm_port *port, 2163 int channel) 2164 { 2165 static const u16 w[AIROHA_NUM_QOS_QUEUES] = {}; 2166 2167 return airoha_qdma_set_chan_tx_sched(port, channel, TC_SCH_SP, w, 2168 ARRAY_SIZE(w)); 2169 } 2170 2171 static int airoha_qdma_set_tx_ets_sched(struct airoha_gdm_port *port, 2172 int channel, 2173 struct tc_ets_qopt_offload *opt) 2174 { 2175 struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params; 2176 enum tx_sched_mode mode = TC_SCH_SP; 2177 u16 w[AIROHA_NUM_QOS_QUEUES] = {}; 2178 int i, nstrict = 0; 2179 2180 if (p->bands > AIROHA_NUM_QOS_QUEUES) 2181 return -EINVAL; 2182 2183 for (i = 0; i < p->bands; i++) { 2184 if (!p->quanta[i]) 2185 nstrict++; 2186 } 2187 2188 /* this configuration is not supported by the hw */ 2189 if (nstrict == AIROHA_NUM_QOS_QUEUES - 1) 2190 return -EINVAL; 2191 2192 /* EN7581 SoC supports fixed QoS band priority where WRR queues have 2193 * lowest priorities with respect to SP ones. 2194 * e.g: WRR0, WRR1, .., WRRm, SP0, SP1, .., SPn 2195 */ 2196 for (i = 0; i < nstrict; i++) { 2197 if (p->priomap[p->bands - i - 1] != i) 2198 return -EINVAL; 2199 } 2200 2201 for (i = 0; i < p->bands - nstrict; i++) { 2202 if (p->priomap[i] != nstrict + i) 2203 return -EINVAL; 2204 2205 w[i] = p->weights[nstrict + i]; 2206 } 2207 2208 if (!nstrict) 2209 mode = TC_SCH_WRR8; 2210 else if (nstrict < AIROHA_NUM_QOS_QUEUES - 1) 2211 mode = nstrict + 1; 2212 2213 return airoha_qdma_set_chan_tx_sched(port, channel, mode, w, 2214 ARRAY_SIZE(w)); 2215 } 2216 2217 static int airoha_qdma_get_tx_ets_stats(struct airoha_gdm_port *port, 2218 int channel, 2219 struct tc_ets_qopt_offload *opt) 2220 { 2221 u64 cpu_tx_packets = airoha_qdma_rr(port->qdma, 2222 REG_CNTR_VAL(channel << 1)); 2223 u64 fwd_tx_packets = airoha_qdma_rr(port->qdma, 2224 REG_CNTR_VAL((channel << 1) + 1)); 2225 u64 tx_packets = (cpu_tx_packets - port->cpu_tx_packets) + 2226 (fwd_tx_packets - port->fwd_tx_packets); 2227 _bstats_update(opt->stats.bstats, 0, tx_packets); 2228 2229 port->cpu_tx_packets = cpu_tx_packets; 2230 port->fwd_tx_packets = fwd_tx_packets; 2231 2232 return 0; 2233 } 2234 2235 static int airoha_tc_setup_qdisc_ets(struct airoha_gdm_port *port, 2236 struct tc_ets_qopt_offload *opt) 2237 { 2238 int channel; 2239 2240 if (opt->parent == TC_H_ROOT) 2241 return -EINVAL; 2242 2243 channel = TC_H_MAJ(opt->handle) >> 16; 2244 channel = channel % AIROHA_NUM_QOS_CHANNELS; 2245 2246 switch (opt->command) { 2247 case TC_ETS_REPLACE: 2248 return airoha_qdma_set_tx_ets_sched(port, channel, opt); 2249 case TC_ETS_DESTROY: 2250 /* PRIO is default qdisc scheduler */ 2251 return airoha_qdma_set_tx_prio_sched(port, channel); 2252 case TC_ETS_STATS: 2253 return airoha_qdma_get_tx_ets_stats(port, channel, opt); 2254 default: 2255 return -EOPNOTSUPP; 2256 } 2257 } 2258 2259 static int airoha_qdma_get_rl_param(struct airoha_qdma *qdma, int queue_id, 2260 u32 addr, enum trtcm_param_type param, 2261 u32 *val_low, u32 *val_high) 2262 { 2263 u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id); 2264 u32 val, config = FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) | 2265 FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) | 2266 FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx); 2267 2268 airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config); 2269 if (read_poll_timeout(airoha_qdma_rr, val, 2270 val & RATE_LIMIT_PARAM_RW_DONE_MASK, 2271 USEC_PER_MSEC, 10 * USEC_PER_MSEC, true, qdma, 2272 REG_TRTCM_CFG_PARAM(addr))) 2273 return -ETIMEDOUT; 2274 2275 *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr)); 2276 if (val_high) 2277 *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr)); 2278 2279 return 0; 2280 } 2281 2282 static int airoha_qdma_set_rl_param(struct airoha_qdma *qdma, int queue_id, 2283 u32 addr, enum trtcm_param_type param, 2284 u32 val) 2285 { 2286 u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id); 2287 u32 config = RATE_LIMIT_PARAM_RW_MASK | 2288 FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) | 2289 FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) | 2290 FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx); 2291 2292 airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val); 2293 airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config); 2294 2295 return read_poll_timeout(airoha_qdma_rr, val, 2296 val & RATE_LIMIT_PARAM_RW_DONE_MASK, 2297 USEC_PER_MSEC, 10 * USEC_PER_MSEC, true, 2298 qdma, REG_TRTCM_CFG_PARAM(addr)); 2299 } 2300 2301 static int airoha_qdma_set_rl_config(struct airoha_qdma *qdma, int queue_id, 2302 u32 addr, bool enable, u32 enable_mask) 2303 { 2304 u32 val; 2305 int err; 2306 2307 err = airoha_qdma_get_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE, 2308 &val, NULL); 2309 if (err) 2310 return err; 2311 2312 val = enable ? val | enable_mask : val & ~enable_mask; 2313 2314 return airoha_qdma_set_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE, 2315 val); 2316 } 2317 2318 static int airoha_qdma_set_rl_token_bucket(struct airoha_qdma *qdma, 2319 int queue_id, u32 rate_val, 2320 u32 bucket_size) 2321 { 2322 u32 val, config, tick, unit, rate, rate_frac; 2323 int err; 2324 2325 err = airoha_qdma_get_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG, 2326 TRTCM_MISC_MODE, &config, NULL); 2327 if (err) 2328 return err; 2329 2330 val = airoha_qdma_rr(qdma, REG_INGRESS_TRTCM_CFG); 2331 tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val); 2332 if (config & TRTCM_TICK_SEL) 2333 tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val); 2334 if (!tick) 2335 return -EINVAL; 2336 2337 unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick; 2338 if (!unit) 2339 return -EINVAL; 2340 2341 rate = rate_val / unit; 2342 rate_frac = rate_val % unit; 2343 rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit; 2344 rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) | 2345 FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac); 2346 2347 err = airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG, 2348 TRTCM_TOKEN_RATE_MODE, rate); 2349 if (err) 2350 return err; 2351 2352 val = bucket_size; 2353 if (!(config & TRTCM_PKT_MODE)) 2354 val = max_t(u32, val, MIN_TOKEN_SIZE); 2355 val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET); 2356 2357 return airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG, 2358 TRTCM_BUCKETSIZE_SHIFT_MODE, val); 2359 } 2360 2361 static int airoha_qdma_init_rl_config(struct airoha_qdma *qdma, int queue_id, 2362 bool enable, enum trtcm_unit_type unit) 2363 { 2364 bool tick_sel = queue_id == 0 || queue_id == 2 || queue_id == 8; 2365 enum trtcm_param mode = TRTCM_METER_MODE; 2366 int err; 2367 2368 mode |= unit == TRTCM_PACKET_UNIT ? TRTCM_PKT_MODE : 0; 2369 err = airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG, 2370 enable, mode); 2371 if (err) 2372 return err; 2373 2374 return airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG, 2375 tick_sel, TRTCM_TICK_SEL); 2376 } 2377 2378 static int airoha_qdma_get_trtcm_param(struct airoha_qdma *qdma, int channel, 2379 u32 addr, enum trtcm_param_type param, 2380 enum trtcm_mode_type mode, 2381 u32 *val_low, u32 *val_high) 2382 { 2383 u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel); 2384 u32 val, config = FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) | 2385 FIELD_PREP(TRTCM_METER_GROUP_MASK, group) | 2386 FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) | 2387 FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode); 2388 2389 airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config); 2390 if (read_poll_timeout(airoha_qdma_rr, val, 2391 val & TRTCM_PARAM_RW_DONE_MASK, 2392 USEC_PER_MSEC, 10 * USEC_PER_MSEC, true, 2393 qdma, REG_TRTCM_CFG_PARAM(addr))) 2394 return -ETIMEDOUT; 2395 2396 *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr)); 2397 if (val_high) 2398 *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr)); 2399 2400 return 0; 2401 } 2402 2403 static int airoha_qdma_set_trtcm_param(struct airoha_qdma *qdma, int channel, 2404 u32 addr, enum trtcm_param_type param, 2405 enum trtcm_mode_type mode, u32 val) 2406 { 2407 u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel); 2408 u32 config = TRTCM_PARAM_RW_MASK | 2409 FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) | 2410 FIELD_PREP(TRTCM_METER_GROUP_MASK, group) | 2411 FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) | 2412 FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode); 2413 2414 airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val); 2415 airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config); 2416 2417 return read_poll_timeout(airoha_qdma_rr, val, 2418 val & TRTCM_PARAM_RW_DONE_MASK, 2419 USEC_PER_MSEC, 10 * USEC_PER_MSEC, true, 2420 qdma, REG_TRTCM_CFG_PARAM(addr)); 2421 } 2422 2423 static int airoha_qdma_set_trtcm_config(struct airoha_qdma *qdma, int channel, 2424 u32 addr, enum trtcm_mode_type mode, 2425 bool enable, u32 enable_mask) 2426 { 2427 u32 val; 2428 2429 if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE, 2430 mode, &val, NULL)) 2431 return -EINVAL; 2432 2433 val = enable ? val | enable_mask : val & ~enable_mask; 2434 2435 return airoha_qdma_set_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE, 2436 mode, val); 2437 } 2438 2439 static int airoha_qdma_set_trtcm_token_bucket(struct airoha_qdma *qdma, 2440 int channel, u32 addr, 2441 enum trtcm_mode_type mode, 2442 u32 rate_val, u32 bucket_size) 2443 { 2444 u32 val, config, tick, unit, rate, rate_frac; 2445 int err; 2446 2447 if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE, 2448 mode, &config, NULL)) 2449 return -EINVAL; 2450 2451 val = airoha_qdma_rr(qdma, addr); 2452 tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val); 2453 if (config & TRTCM_TICK_SEL) 2454 tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val); 2455 if (!tick) 2456 return -EINVAL; 2457 2458 unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick; 2459 if (!unit) 2460 return -EINVAL; 2461 2462 rate = rate_val / unit; 2463 rate_frac = rate_val % unit; 2464 rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit; 2465 rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) | 2466 FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac); 2467 2468 err = airoha_qdma_set_trtcm_param(qdma, channel, addr, 2469 TRTCM_TOKEN_RATE_MODE, mode, rate); 2470 if (err) 2471 return err; 2472 2473 val = max_t(u32, bucket_size, MIN_TOKEN_SIZE); 2474 val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET); 2475 2476 return airoha_qdma_set_trtcm_param(qdma, channel, addr, 2477 TRTCM_BUCKETSIZE_SHIFT_MODE, 2478 mode, val); 2479 } 2480 2481 static int airoha_qdma_set_tx_rate_limit(struct airoha_gdm_port *port, 2482 int channel, u32 rate, 2483 u32 bucket_size) 2484 { 2485 int i, err; 2486 2487 for (i = 0; i <= TRTCM_PEAK_MODE; i++) { 2488 err = airoha_qdma_set_trtcm_config(port->qdma, channel, 2489 REG_EGRESS_TRTCM_CFG, i, 2490 !!rate, TRTCM_METER_MODE); 2491 if (err) 2492 return err; 2493 2494 err = airoha_qdma_set_trtcm_token_bucket(port->qdma, channel, 2495 REG_EGRESS_TRTCM_CFG, 2496 i, rate, bucket_size); 2497 if (err) 2498 return err; 2499 } 2500 2501 return 0; 2502 } 2503 2504 static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port, 2505 struct tc_htb_qopt_offload *opt) 2506 { 2507 u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS; 2508 u32 rate = div_u64(opt->rate, 1000) << 3; /* kbps */ 2509 struct net_device *dev = port->dev; 2510 int num_tx_queues = dev->real_num_tx_queues; 2511 int err; 2512 2513 if (opt->parent_classid != TC_HTB_CLASSID_ROOT) { 2514 NL_SET_ERR_MSG_MOD(opt->extack, "invalid parent classid"); 2515 return -EINVAL; 2516 } 2517 2518 err = airoha_qdma_set_tx_rate_limit(port, channel, rate, opt->quantum); 2519 if (err) { 2520 NL_SET_ERR_MSG_MOD(opt->extack, 2521 "failed configuring htb offload"); 2522 return err; 2523 } 2524 2525 if (opt->command == TC_HTB_NODE_MODIFY) 2526 return 0; 2527 2528 err = netif_set_real_num_tx_queues(dev, num_tx_queues + 1); 2529 if (err) { 2530 airoha_qdma_set_tx_rate_limit(port, channel, 0, opt->quantum); 2531 NL_SET_ERR_MSG_MOD(opt->extack, 2532 "failed setting real_num_tx_queues"); 2533 return err; 2534 } 2535 2536 set_bit(channel, port->qos_sq_bmap); 2537 opt->qid = AIROHA_NUM_TX_RING + channel; 2538 2539 return 0; 2540 } 2541 2542 static int airoha_qdma_set_rx_meter(struct airoha_gdm_port *port, 2543 u32 rate, u32 bucket_size, 2544 enum trtcm_unit_type unit_type) 2545 { 2546 struct airoha_qdma *qdma = port->qdma; 2547 int i; 2548 2549 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 2550 int err; 2551 2552 if (!qdma->q_rx[i].ndesc) 2553 continue; 2554 2555 err = airoha_qdma_init_rl_config(qdma, i, !!rate, unit_type); 2556 if (err) 2557 return err; 2558 2559 err = airoha_qdma_set_rl_token_bucket(qdma, i, rate, 2560 bucket_size); 2561 if (err) 2562 return err; 2563 } 2564 2565 return 0; 2566 } 2567 2568 static int airoha_tc_matchall_act_validate(struct tc_cls_matchall_offload *f) 2569 { 2570 const struct flow_action *actions = &f->rule->action; 2571 const struct flow_action_entry *act; 2572 2573 if (!flow_action_has_entries(actions)) { 2574 NL_SET_ERR_MSG_MOD(f->common.extack, 2575 "filter run with no actions"); 2576 return -EINVAL; 2577 } 2578 2579 if (!flow_offload_has_one_action(actions)) { 2580 NL_SET_ERR_MSG_MOD(f->common.extack, 2581 "only once action per filter is supported"); 2582 return -EOPNOTSUPP; 2583 } 2584 2585 act = &actions->entries[0]; 2586 if (act->id != FLOW_ACTION_POLICE) { 2587 NL_SET_ERR_MSG_MOD(f->common.extack, "unsupported action"); 2588 return -EOPNOTSUPP; 2589 } 2590 2591 if (act->police.exceed.act_id != FLOW_ACTION_DROP) { 2592 NL_SET_ERR_MSG_MOD(f->common.extack, 2593 "invalid exceed action id"); 2594 return -EOPNOTSUPP; 2595 } 2596 2597 if (act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { 2598 NL_SET_ERR_MSG_MOD(f->common.extack, 2599 "invalid notexceed action id"); 2600 return -EOPNOTSUPP; 2601 } 2602 2603 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && 2604 !flow_action_is_last_entry(actions, act)) { 2605 NL_SET_ERR_MSG_MOD(f->common.extack, 2606 "action accept must be last"); 2607 return -EOPNOTSUPP; 2608 } 2609 2610 if (act->police.peakrate_bytes_ps || act->police.avrate || 2611 act->police.overhead || act->police.mtu) { 2612 NL_SET_ERR_MSG_MOD(f->common.extack, 2613 "peakrate/avrate/overhead/mtu unsupported"); 2614 return -EOPNOTSUPP; 2615 } 2616 2617 return 0; 2618 } 2619 2620 static int airoha_dev_tc_matchall(struct net_device *dev, 2621 struct tc_cls_matchall_offload *f) 2622 { 2623 enum trtcm_unit_type unit_type = TRTCM_BYTE_UNIT; 2624 struct airoha_gdm_port *port = netdev_priv(dev); 2625 u32 rate = 0, bucket_size = 0; 2626 2627 switch (f->command) { 2628 case TC_CLSMATCHALL_REPLACE: { 2629 const struct flow_action_entry *act; 2630 int err; 2631 2632 err = airoha_tc_matchall_act_validate(f); 2633 if (err) 2634 return err; 2635 2636 act = &f->rule->action.entries[0]; 2637 if (act->police.rate_pkt_ps) { 2638 rate = act->police.rate_pkt_ps; 2639 bucket_size = act->police.burst_pkt; 2640 unit_type = TRTCM_PACKET_UNIT; 2641 } else { 2642 rate = div_u64(act->police.rate_bytes_ps, 1000); 2643 rate = rate << 3; /* Kbps */ 2644 bucket_size = act->police.burst; 2645 } 2646 fallthrough; 2647 } 2648 case TC_CLSMATCHALL_DESTROY: 2649 return airoha_qdma_set_rx_meter(port, rate, bucket_size, 2650 unit_type); 2651 default: 2652 return -EOPNOTSUPP; 2653 } 2654 } 2655 2656 static int airoha_dev_setup_tc_block_cb(enum tc_setup_type type, 2657 void *type_data, void *cb_priv) 2658 { 2659 struct net_device *dev = cb_priv; 2660 struct airoha_gdm_port *port = netdev_priv(dev); 2661 struct airoha_eth *eth = port->qdma->eth; 2662 2663 if (!tc_can_offload(dev)) 2664 return -EOPNOTSUPP; 2665 2666 switch (type) { 2667 case TC_SETUP_CLSFLOWER: 2668 return airoha_ppe_setup_tc_block_cb(ð->ppe->dev, type_data); 2669 case TC_SETUP_CLSMATCHALL: 2670 return airoha_dev_tc_matchall(dev, type_data); 2671 default: 2672 return -EOPNOTSUPP; 2673 } 2674 } 2675 2676 static int airoha_dev_setup_tc_block(struct airoha_gdm_port *port, 2677 struct flow_block_offload *f) 2678 { 2679 flow_setup_cb_t *cb = airoha_dev_setup_tc_block_cb; 2680 static LIST_HEAD(block_cb_list); 2681 struct flow_block_cb *block_cb; 2682 2683 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 2684 return -EOPNOTSUPP; 2685 2686 f->driver_block_list = &block_cb_list; 2687 switch (f->command) { 2688 case FLOW_BLOCK_BIND: 2689 block_cb = flow_block_cb_lookup(f->block, cb, port->dev); 2690 if (block_cb) { 2691 flow_block_cb_incref(block_cb); 2692 return 0; 2693 } 2694 block_cb = flow_block_cb_alloc(cb, port->dev, port->dev, NULL); 2695 if (IS_ERR(block_cb)) 2696 return PTR_ERR(block_cb); 2697 2698 flow_block_cb_incref(block_cb); 2699 flow_block_cb_add(block_cb, f); 2700 list_add_tail(&block_cb->driver_list, &block_cb_list); 2701 return 0; 2702 case FLOW_BLOCK_UNBIND: 2703 block_cb = flow_block_cb_lookup(f->block, cb, port->dev); 2704 if (!block_cb) 2705 return -ENOENT; 2706 2707 if (!flow_block_cb_decref(block_cb)) { 2708 flow_block_cb_remove(block_cb, f); 2709 list_del(&block_cb->driver_list); 2710 } 2711 return 0; 2712 default: 2713 return -EOPNOTSUPP; 2714 } 2715 } 2716 2717 static void airoha_tc_remove_htb_queue(struct airoha_gdm_port *port, int queue) 2718 { 2719 struct net_device *dev = port->dev; 2720 2721 netif_set_real_num_tx_queues(dev, dev->real_num_tx_queues - 1); 2722 airoha_qdma_set_tx_rate_limit(port, queue + 1, 0, 0); 2723 clear_bit(queue, port->qos_sq_bmap); 2724 } 2725 2726 static int airoha_tc_htb_delete_leaf_queue(struct airoha_gdm_port *port, 2727 struct tc_htb_qopt_offload *opt) 2728 { 2729 u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS; 2730 2731 if (!test_bit(channel, port->qos_sq_bmap)) { 2732 NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id"); 2733 return -EINVAL; 2734 } 2735 2736 airoha_tc_remove_htb_queue(port, channel); 2737 2738 return 0; 2739 } 2740 2741 static int airoha_tc_htb_destroy(struct airoha_gdm_port *port) 2742 { 2743 int q; 2744 2745 for_each_set_bit(q, port->qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS) 2746 airoha_tc_remove_htb_queue(port, q); 2747 2748 return 0; 2749 } 2750 2751 static int airoha_tc_get_htb_get_leaf_queue(struct airoha_gdm_port *port, 2752 struct tc_htb_qopt_offload *opt) 2753 { 2754 u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS; 2755 2756 if (!test_bit(channel, port->qos_sq_bmap)) { 2757 NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id"); 2758 return -EINVAL; 2759 } 2760 2761 opt->qid = AIROHA_NUM_TX_RING + channel; 2762 2763 return 0; 2764 } 2765 2766 static int airoha_tc_setup_qdisc_htb(struct airoha_gdm_port *port, 2767 struct tc_htb_qopt_offload *opt) 2768 { 2769 switch (opt->command) { 2770 case TC_HTB_CREATE: 2771 break; 2772 case TC_HTB_DESTROY: 2773 return airoha_tc_htb_destroy(port); 2774 case TC_HTB_NODE_MODIFY: 2775 case TC_HTB_LEAF_ALLOC_QUEUE: 2776 return airoha_tc_htb_alloc_leaf_queue(port, opt); 2777 case TC_HTB_LEAF_DEL: 2778 case TC_HTB_LEAF_DEL_LAST: 2779 case TC_HTB_LEAF_DEL_LAST_FORCE: 2780 return airoha_tc_htb_delete_leaf_queue(port, opt); 2781 case TC_HTB_LEAF_QUERY_QUEUE: 2782 return airoha_tc_get_htb_get_leaf_queue(port, opt); 2783 default: 2784 return -EOPNOTSUPP; 2785 } 2786 2787 return 0; 2788 } 2789 2790 static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type, 2791 void *type_data) 2792 { 2793 struct airoha_gdm_port *port = netdev_priv(dev); 2794 2795 switch (type) { 2796 case TC_SETUP_QDISC_ETS: 2797 return airoha_tc_setup_qdisc_ets(port, type_data); 2798 case TC_SETUP_QDISC_HTB: 2799 return airoha_tc_setup_qdisc_htb(port, type_data); 2800 case TC_SETUP_BLOCK: 2801 case TC_SETUP_FT: 2802 return airoha_dev_setup_tc_block(port, type_data); 2803 default: 2804 return -EOPNOTSUPP; 2805 } 2806 } 2807 2808 static const struct net_device_ops airoha_netdev_ops = { 2809 .ndo_init = airoha_dev_init, 2810 .ndo_open = airoha_dev_open, 2811 .ndo_stop = airoha_dev_stop, 2812 .ndo_change_mtu = airoha_dev_change_mtu, 2813 .ndo_select_queue = airoha_dev_select_queue, 2814 .ndo_start_xmit = airoha_dev_xmit, 2815 .ndo_get_stats64 = airoha_dev_get_stats64, 2816 .ndo_set_mac_address = airoha_dev_set_macaddr, 2817 .ndo_setup_tc = airoha_dev_tc_setup, 2818 }; 2819 2820 static const struct ethtool_ops airoha_ethtool_ops = { 2821 .get_drvinfo = airoha_ethtool_get_drvinfo, 2822 .get_eth_mac_stats = airoha_ethtool_get_mac_stats, 2823 .get_rmon_stats = airoha_ethtool_get_rmon_stats, 2824 .get_link_ksettings = phy_ethtool_get_link_ksettings, 2825 .get_link = ethtool_op_get_link, 2826 }; 2827 2828 static int airoha_metadata_dst_alloc(struct airoha_gdm_port *port) 2829 { 2830 int i; 2831 2832 for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) { 2833 struct metadata_dst *md_dst; 2834 2835 md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, 2836 GFP_KERNEL); 2837 if (!md_dst) 2838 return -ENOMEM; 2839 2840 md_dst->u.port_info.port_id = i; 2841 port->dsa_meta[i] = md_dst; 2842 } 2843 2844 return 0; 2845 } 2846 2847 static void airoha_metadata_dst_free(struct airoha_gdm_port *port) 2848 { 2849 int i; 2850 2851 for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) { 2852 if (!port->dsa_meta[i]) 2853 continue; 2854 2855 metadata_dst_free(port->dsa_meta[i]); 2856 } 2857 } 2858 2859 bool airoha_is_valid_gdm_port(struct airoha_eth *eth, 2860 struct airoha_gdm_port *port) 2861 { 2862 int i; 2863 2864 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { 2865 if (eth->ports[i] == port) 2866 return true; 2867 } 2868 2869 return false; 2870 } 2871 2872 static int airoha_alloc_gdm_port(struct airoha_eth *eth, 2873 struct device_node *np, int index) 2874 { 2875 const __be32 *id_ptr = of_get_property(np, "reg", NULL); 2876 struct airoha_gdm_port *port; 2877 struct airoha_qdma *qdma; 2878 struct net_device *dev; 2879 int err, p; 2880 u32 id; 2881 2882 if (!id_ptr) { 2883 dev_err(eth->dev, "missing gdm port id\n"); 2884 return -EINVAL; 2885 } 2886 2887 id = be32_to_cpup(id_ptr); 2888 p = id - 1; 2889 2890 if (!id || id > ARRAY_SIZE(eth->ports)) { 2891 dev_err(eth->dev, "invalid gdm port id: %d\n", id); 2892 return -EINVAL; 2893 } 2894 2895 if (eth->ports[p]) { 2896 dev_err(eth->dev, "duplicate gdm port id: %d\n", id); 2897 return -EINVAL; 2898 } 2899 2900 dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port), 2901 AIROHA_NUM_NETDEV_TX_RINGS, 2902 AIROHA_NUM_RX_RING); 2903 if (!dev) { 2904 dev_err(eth->dev, "alloc_etherdev failed\n"); 2905 return -ENOMEM; 2906 } 2907 2908 qdma = ð->qdma[index % AIROHA_MAX_NUM_QDMA]; 2909 dev->netdev_ops = &airoha_netdev_ops; 2910 dev->ethtool_ops = &airoha_ethtool_ops; 2911 dev->max_mtu = AIROHA_MAX_MTU; 2912 dev->watchdog_timeo = 5 * HZ; 2913 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 2914 NETIF_F_TSO6 | NETIF_F_IPV6_CSUM | 2915 NETIF_F_SG | NETIF_F_TSO | 2916 NETIF_F_HW_TC; 2917 dev->features |= dev->hw_features; 2918 dev->vlan_features = dev->hw_features; 2919 dev->dev.of_node = np; 2920 dev->irq = qdma->irq_banks[0].irq; 2921 SET_NETDEV_DEV(dev, eth->dev); 2922 2923 /* reserve hw queues for HTB offloading */ 2924 err = netif_set_real_num_tx_queues(dev, AIROHA_NUM_TX_RING); 2925 if (err) 2926 return err; 2927 2928 err = of_get_ethdev_address(np, dev); 2929 if (err) { 2930 if (err == -EPROBE_DEFER) 2931 return err; 2932 2933 eth_hw_addr_random(dev); 2934 dev_info(eth->dev, "generated random MAC address %pM\n", 2935 dev->dev_addr); 2936 } 2937 2938 port = netdev_priv(dev); 2939 u64_stats_init(&port->stats.syncp); 2940 spin_lock_init(&port->stats.lock); 2941 port->qdma = qdma; 2942 port->dev = dev; 2943 port->id = id; 2944 eth->ports[p] = port; 2945 2946 return airoha_metadata_dst_alloc(port); 2947 } 2948 2949 static int airoha_register_gdm_devices(struct airoha_eth *eth) 2950 { 2951 int i; 2952 2953 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { 2954 struct airoha_gdm_port *port = eth->ports[i]; 2955 int err; 2956 2957 if (!port) 2958 continue; 2959 2960 err = register_netdev(port->dev); 2961 if (err) 2962 return err; 2963 } 2964 2965 set_bit(DEV_STATE_REGISTERED, ð->state); 2966 2967 return 0; 2968 } 2969 2970 static int airoha_probe(struct platform_device *pdev) 2971 { 2972 struct reset_control_bulk_data *xsi_rsts; 2973 struct device_node *np; 2974 struct airoha_eth *eth; 2975 int i, err; 2976 2977 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); 2978 if (!eth) 2979 return -ENOMEM; 2980 2981 eth->soc = of_device_get_match_data(&pdev->dev); 2982 if (!eth->soc) 2983 return -EINVAL; 2984 2985 eth->dev = &pdev->dev; 2986 2987 err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32)); 2988 if (err) { 2989 dev_err(eth->dev, "failed configuring DMA mask\n"); 2990 return err; 2991 } 2992 2993 eth->fe_regs = devm_platform_ioremap_resource_byname(pdev, "fe"); 2994 if (IS_ERR(eth->fe_regs)) 2995 return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs), 2996 "failed to iomap fe regs\n"); 2997 2998 eth->rsts[0].id = "fe"; 2999 eth->rsts[1].id = "pdma"; 3000 eth->rsts[2].id = "qdma"; 3001 err = devm_reset_control_bulk_get_exclusive(eth->dev, 3002 ARRAY_SIZE(eth->rsts), 3003 eth->rsts); 3004 if (err) { 3005 dev_err(eth->dev, "failed to get bulk reset lines\n"); 3006 return err; 3007 } 3008 3009 xsi_rsts = devm_kcalloc(eth->dev, 3010 eth->soc->num_xsi_rsts, sizeof(*xsi_rsts), 3011 GFP_KERNEL); 3012 if (!xsi_rsts) 3013 return -ENOMEM; 3014 3015 eth->xsi_rsts = xsi_rsts; 3016 for (i = 0; i < eth->soc->num_xsi_rsts; i++) 3017 eth->xsi_rsts[i].id = eth->soc->xsi_rsts_names[i]; 3018 3019 err = devm_reset_control_bulk_get_exclusive(eth->dev, 3020 eth->soc->num_xsi_rsts, 3021 eth->xsi_rsts); 3022 if (err) { 3023 dev_err(eth->dev, "failed to get bulk xsi reset lines\n"); 3024 return err; 3025 } 3026 3027 eth->napi_dev = alloc_netdev_dummy(0); 3028 if (!eth->napi_dev) 3029 return -ENOMEM; 3030 3031 /* Enable threaded NAPI by default */ 3032 eth->napi_dev->threaded = true; 3033 strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name)); 3034 platform_set_drvdata(pdev, eth); 3035 3036 err = airoha_hw_init(pdev, eth); 3037 if (err) 3038 goto error_hw_cleanup; 3039 3040 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) 3041 airoha_qdma_start_napi(ð->qdma[i]); 3042 3043 i = 0; 3044 for_each_child_of_node(pdev->dev.of_node, np) { 3045 if (!of_device_is_compatible(np, "airoha,eth-mac")) 3046 continue; 3047 3048 if (!of_device_is_available(np)) 3049 continue; 3050 3051 err = airoha_alloc_gdm_port(eth, np, i++); 3052 if (err) { 3053 of_node_put(np); 3054 goto error_napi_stop; 3055 } 3056 } 3057 3058 err = airoha_register_gdm_devices(eth); 3059 if (err) 3060 goto error_napi_stop; 3061 3062 return 0; 3063 3064 error_napi_stop: 3065 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) 3066 airoha_qdma_stop_napi(ð->qdma[i]); 3067 airoha_ppe_deinit(eth); 3068 error_hw_cleanup: 3069 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) 3070 airoha_hw_cleanup(ð->qdma[i]); 3071 3072 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { 3073 struct airoha_gdm_port *port = eth->ports[i]; 3074 3075 if (!port) 3076 continue; 3077 3078 if (port->dev->reg_state == NETREG_REGISTERED) 3079 unregister_netdev(port->dev); 3080 airoha_metadata_dst_free(port); 3081 } 3082 free_netdev(eth->napi_dev); 3083 platform_set_drvdata(pdev, NULL); 3084 3085 return err; 3086 } 3087 3088 static void airoha_remove(struct platform_device *pdev) 3089 { 3090 struct airoha_eth *eth = platform_get_drvdata(pdev); 3091 int i; 3092 3093 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) { 3094 airoha_qdma_stop_napi(ð->qdma[i]); 3095 airoha_hw_cleanup(ð->qdma[i]); 3096 } 3097 3098 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { 3099 struct airoha_gdm_port *port = eth->ports[i]; 3100 3101 if (!port) 3102 continue; 3103 3104 unregister_netdev(port->dev); 3105 airoha_metadata_dst_free(port); 3106 } 3107 free_netdev(eth->napi_dev); 3108 3109 airoha_ppe_deinit(eth); 3110 platform_set_drvdata(pdev, NULL); 3111 } 3112 3113 static const char * const en7581_xsi_rsts_names[] = { 3114 "xsi-mac", 3115 "hsi0-mac", 3116 "hsi1-mac", 3117 "hsi-mac", 3118 "xfp-mac", 3119 }; 3120 3121 static int airoha_en7581_get_src_port_id(struct airoha_gdm_port *port, int nbq) 3122 { 3123 switch (port->id) { 3124 case AIROHA_GDM3_IDX: 3125 /* 7581 SoC supports PCIe serdes on GDM3 port */ 3126 if (nbq == 4) 3127 return HSGMII_LAN_7581_PCIE0_SRCPORT; 3128 if (nbq == 5) 3129 return HSGMII_LAN_7581_PCIE1_SRCPORT; 3130 break; 3131 case AIROHA_GDM4_IDX: 3132 /* 7581 SoC supports eth and usb serdes on GDM4 port */ 3133 if (!nbq) 3134 return HSGMII_LAN_7581_ETH_SRCPORT; 3135 if (nbq == 1) 3136 return HSGMII_LAN_7581_USB_SRCPORT; 3137 break; 3138 default: 3139 break; 3140 } 3141 3142 return -EINVAL; 3143 } 3144 3145 static const char * const an7583_xsi_rsts_names[] = { 3146 "xsi-mac", 3147 "hsi0-mac", 3148 "hsi1-mac", 3149 "xfp-mac", 3150 }; 3151 3152 static int airoha_an7583_get_src_port_id(struct airoha_gdm_port *port, int nbq) 3153 { 3154 switch (port->id) { 3155 case AIROHA_GDM3_IDX: 3156 /* 7583 SoC supports eth serdes on GDM3 port */ 3157 if (!nbq) 3158 return HSGMII_LAN_7583_ETH_SRCPORT; 3159 break; 3160 case AIROHA_GDM4_IDX: 3161 /* 7583 SoC supports PCIe and USB serdes on GDM4 port */ 3162 if (!nbq) 3163 return HSGMII_LAN_7583_PCIE_SRCPORT; 3164 if (nbq == 1) 3165 return HSGMII_LAN_7583_USB_SRCPORT; 3166 break; 3167 default: 3168 break; 3169 } 3170 3171 return -EINVAL; 3172 } 3173 3174 static const struct airoha_eth_soc_data en7581_soc_data = { 3175 .version = 0x7581, 3176 .xsi_rsts_names = en7581_xsi_rsts_names, 3177 .num_xsi_rsts = ARRAY_SIZE(en7581_xsi_rsts_names), 3178 .num_ppe = 2, 3179 .ops = { 3180 .get_src_port_id = airoha_en7581_get_src_port_id, 3181 }, 3182 }; 3183 3184 static const struct airoha_eth_soc_data an7583_soc_data = { 3185 .version = 0x7583, 3186 .xsi_rsts_names = an7583_xsi_rsts_names, 3187 .num_xsi_rsts = ARRAY_SIZE(an7583_xsi_rsts_names), 3188 .num_ppe = 1, 3189 .ops = { 3190 .get_src_port_id = airoha_an7583_get_src_port_id, 3191 }, 3192 }; 3193 3194 static const struct of_device_id of_airoha_match[] = { 3195 { .compatible = "airoha,en7581-eth", .data = &en7581_soc_data }, 3196 { .compatible = "airoha,an7583-eth", .data = &an7583_soc_data }, 3197 { /* sentinel */ } 3198 }; 3199 MODULE_DEVICE_TABLE(of, of_airoha_match); 3200 3201 static struct platform_driver airoha_driver = { 3202 .probe = airoha_probe, 3203 .remove = airoha_remove, 3204 .driver = { 3205 .name = KBUILD_MODNAME, 3206 .of_match_table = of_airoha_match, 3207 }, 3208 }; 3209 module_platform_driver(airoha_driver); 3210 3211 MODULE_LICENSE("GPL"); 3212 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); 3213 MODULE_DESCRIPTION("Ethernet driver for Airoha SoC"); 3214