1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2024 AIROHA Inc 4 * Author: Lorenzo Bianconi <lorenzo@kernel.org> 5 */ 6 #include <linux/of.h> 7 #include <linux/of_net.h> 8 #include <linux/platform_device.h> 9 #include <linux/tcp.h> 10 #include <linux/u64_stats_sync.h> 11 #include <net/dst_metadata.h> 12 #include <net/page_pool/helpers.h> 13 #include <net/pkt_cls.h> 14 #include <uapi/linux/ppp_defs.h> 15 16 #include "airoha_regs.h" 17 #include "airoha_eth.h" 18 19 u32 airoha_rr(void __iomem *base, u32 offset) 20 { 21 return readl(base + offset); 22 } 23 24 void airoha_wr(void __iomem *base, u32 offset, u32 val) 25 { 26 writel(val, base + offset); 27 } 28 29 u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val) 30 { 31 val |= (airoha_rr(base, offset) & ~mask); 32 airoha_wr(base, offset, val); 33 34 return val; 35 } 36 37 static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index, 38 u32 clear, u32 set) 39 { 40 unsigned long flags; 41 42 if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask))) 43 return; 44 45 spin_lock_irqsave(&qdma->irq_lock, flags); 46 47 qdma->irqmask[index] &= ~clear; 48 qdma->irqmask[index] |= set; 49 airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]); 50 /* Read irq_enable register in order to guarantee the update above 51 * completes in the spinlock critical section. 52 */ 53 airoha_qdma_rr(qdma, REG_INT_ENABLE(index)); 54 55 spin_unlock_irqrestore(&qdma->irq_lock, flags); 56 } 57 58 static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index, 59 u32 mask) 60 { 61 airoha_qdma_set_irqmask(qdma, index, 0, mask); 62 } 63 64 static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index, 65 u32 mask) 66 { 67 airoha_qdma_set_irqmask(qdma, index, mask, 0); 68 } 69 70 static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port) 71 { 72 /* GDM1 port on EN7581 SoC is connected to the lan dsa switch. 73 * GDM{2,3,4} can be used as wan port connected to an external 74 * phy module. 75 */ 76 return port->id == 1; 77 } 78 79 static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr) 80 { 81 struct airoha_eth *eth = port->qdma->eth; 82 u32 val, reg; 83 84 reg = airhoa_is_lan_gdm_port(port) ? REG_FE_LAN_MAC_H 85 : REG_FE_WAN_MAC_H; 86 val = (addr[0] << 16) | (addr[1] << 8) | addr[2]; 87 airoha_fe_wr(eth, reg, val); 88 89 val = (addr[3] << 16) | (addr[4] << 8) | addr[5]; 90 airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val); 91 airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val); 92 } 93 94 static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr, 95 u32 val) 96 { 97 airoha_fe_rmw(eth, addr, GDM_OCFQ_MASK, 98 FIELD_PREP(GDM_OCFQ_MASK, val)); 99 airoha_fe_rmw(eth, addr, GDM_MCFQ_MASK, 100 FIELD_PREP(GDM_MCFQ_MASK, val)); 101 airoha_fe_rmw(eth, addr, GDM_BCFQ_MASK, 102 FIELD_PREP(GDM_BCFQ_MASK, val)); 103 airoha_fe_rmw(eth, addr, GDM_UCFQ_MASK, 104 FIELD_PREP(GDM_UCFQ_MASK, val)); 105 } 106 107 static int airoha_set_vip_for_gdm_port(struct airoha_gdm_port *port, 108 bool enable) 109 { 110 struct airoha_eth *eth = port->qdma->eth; 111 u32 vip_port; 112 113 switch (port->id) { 114 case 3: 115 /* FIXME: handle XSI_PCIE1_PORT */ 116 vip_port = XSI_PCIE0_VIP_PORT_MASK; 117 break; 118 case 4: 119 /* FIXME: handle XSI_USB_PORT */ 120 vip_port = XSI_ETH_VIP_PORT_MASK; 121 break; 122 default: 123 return 0; 124 } 125 126 if (enable) { 127 airoha_fe_set(eth, REG_FE_VIP_PORT_EN, vip_port); 128 airoha_fe_set(eth, REG_FE_IFC_PORT_EN, vip_port); 129 } else { 130 airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, vip_port); 131 airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port); 132 } 133 134 return 0; 135 } 136 137 static void airoha_fe_maccr_init(struct airoha_eth *eth) 138 { 139 int p; 140 141 for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) 142 airoha_fe_set(eth, REG_GDM_FWD_CFG(p), 143 GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM | 144 GDM_DROP_CRC_ERR); 145 146 airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK, 147 FIELD_PREP(CDM1_VLAN_MASK, 0x8100)); 148 149 airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD); 150 } 151 152 static void airoha_fe_vip_setup(struct airoha_eth *eth) 153 { 154 airoha_fe_wr(eth, REG_FE_VIP_PATN(3), ETH_P_PPP_DISC); 155 airoha_fe_wr(eth, REG_FE_VIP_EN(3), PATN_FCPU_EN_MASK | PATN_EN_MASK); 156 157 airoha_fe_wr(eth, REG_FE_VIP_PATN(4), PPP_LCP); 158 airoha_fe_wr(eth, REG_FE_VIP_EN(4), 159 PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | 160 PATN_EN_MASK); 161 162 airoha_fe_wr(eth, REG_FE_VIP_PATN(6), PPP_IPCP); 163 airoha_fe_wr(eth, REG_FE_VIP_EN(6), 164 PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | 165 PATN_EN_MASK); 166 167 airoha_fe_wr(eth, REG_FE_VIP_PATN(7), PPP_CHAP); 168 airoha_fe_wr(eth, REG_FE_VIP_EN(7), 169 PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | 170 PATN_EN_MASK); 171 172 /* BOOTP (0x43) */ 173 airoha_fe_wr(eth, REG_FE_VIP_PATN(8), 0x43); 174 airoha_fe_wr(eth, REG_FE_VIP_EN(8), 175 PATN_FCPU_EN_MASK | PATN_SP_EN_MASK | 176 FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); 177 178 /* BOOTP (0x44) */ 179 airoha_fe_wr(eth, REG_FE_VIP_PATN(9), 0x44); 180 airoha_fe_wr(eth, REG_FE_VIP_EN(9), 181 PATN_FCPU_EN_MASK | PATN_SP_EN_MASK | 182 FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); 183 184 /* ISAKMP */ 185 airoha_fe_wr(eth, REG_FE_VIP_PATN(10), 0x1f401f4); 186 airoha_fe_wr(eth, REG_FE_VIP_EN(10), 187 PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK | 188 FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); 189 190 airoha_fe_wr(eth, REG_FE_VIP_PATN(11), PPP_IPV6CP); 191 airoha_fe_wr(eth, REG_FE_VIP_EN(11), 192 PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | 193 PATN_EN_MASK); 194 195 /* DHCPv6 */ 196 airoha_fe_wr(eth, REG_FE_VIP_PATN(12), 0x2220223); 197 airoha_fe_wr(eth, REG_FE_VIP_EN(12), 198 PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK | 199 FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); 200 201 airoha_fe_wr(eth, REG_FE_VIP_PATN(19), PPP_PAP); 202 airoha_fe_wr(eth, REG_FE_VIP_EN(19), 203 PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | 204 PATN_EN_MASK); 205 206 /* ETH->ETH_P_1905 (0x893a) */ 207 airoha_fe_wr(eth, REG_FE_VIP_PATN(20), 0x893a); 208 airoha_fe_wr(eth, REG_FE_VIP_EN(20), 209 PATN_FCPU_EN_MASK | PATN_EN_MASK); 210 211 airoha_fe_wr(eth, REG_FE_VIP_PATN(21), ETH_P_LLDP); 212 airoha_fe_wr(eth, REG_FE_VIP_EN(21), 213 PATN_FCPU_EN_MASK | PATN_EN_MASK); 214 } 215 216 static u32 airoha_fe_get_pse_queue_rsv_pages(struct airoha_eth *eth, 217 u32 port, u32 queue) 218 { 219 u32 val; 220 221 airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR, 222 PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK, 223 FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) | 224 FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue)); 225 val = airoha_fe_rr(eth, REG_FE_PSE_QUEUE_CFG_VAL); 226 227 return FIELD_GET(PSE_CFG_OQ_RSV_MASK, val); 228 } 229 230 static void airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth *eth, 231 u32 port, u32 queue, u32 val) 232 { 233 airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_VAL, PSE_CFG_OQ_RSV_MASK, 234 FIELD_PREP(PSE_CFG_OQ_RSV_MASK, val)); 235 airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR, 236 PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK | 237 PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK, 238 FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) | 239 FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue) | 240 PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK); 241 } 242 243 static u32 airoha_fe_get_pse_all_rsv(struct airoha_eth *eth) 244 { 245 u32 val = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET); 246 247 return FIELD_GET(PSE_ALLRSV_MASK, val); 248 } 249 250 static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth, 251 u32 port, u32 queue, u32 val) 252 { 253 u32 orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue); 254 u32 tmp, all_rsv, fq_limit; 255 256 airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val); 257 258 /* modify all rsv */ 259 all_rsv = airoha_fe_get_pse_all_rsv(eth); 260 all_rsv += (val - orig_val); 261 airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK, 262 FIELD_PREP(PSE_ALLRSV_MASK, all_rsv)); 263 264 /* modify hthd */ 265 tmp = airoha_fe_rr(eth, PSE_FQ_CFG); 266 fq_limit = FIELD_GET(PSE_FQ_LIMIT_MASK, tmp); 267 tmp = fq_limit - all_rsv - 0x20; 268 airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD, 269 PSE_SHARE_USED_HTHD_MASK, 270 FIELD_PREP(PSE_SHARE_USED_HTHD_MASK, tmp)); 271 272 tmp = fq_limit - all_rsv - 0x100; 273 airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD, 274 PSE_SHARE_USED_MTHD_MASK, 275 FIELD_PREP(PSE_SHARE_USED_MTHD_MASK, tmp)); 276 tmp = (3 * tmp) >> 2; 277 airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, 278 PSE_SHARE_USED_LTHD_MASK, 279 FIELD_PREP(PSE_SHARE_USED_LTHD_MASK, tmp)); 280 281 return 0; 282 } 283 284 static void airoha_fe_pse_ports_init(struct airoha_eth *eth) 285 { 286 const u32 pse_port_num_queues[] = { 287 [FE_PSE_PORT_CDM1] = 6, 288 [FE_PSE_PORT_GDM1] = 6, 289 [FE_PSE_PORT_GDM2] = 32, 290 [FE_PSE_PORT_GDM3] = 6, 291 [FE_PSE_PORT_PPE1] = 4, 292 [FE_PSE_PORT_CDM2] = 6, 293 [FE_PSE_PORT_CDM3] = 8, 294 [FE_PSE_PORT_CDM4] = 10, 295 [FE_PSE_PORT_PPE2] = 4, 296 [FE_PSE_PORT_GDM4] = 2, 297 [FE_PSE_PORT_CDM5] = 2, 298 }; 299 u32 all_rsv; 300 int q; 301 302 all_rsv = airoha_fe_get_pse_all_rsv(eth); 303 /* hw misses PPE2 oq rsv */ 304 all_rsv += PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2]; 305 airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv); 306 307 /* CMD1 */ 308 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++) 309 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM1, q, 310 PSE_QUEUE_RSV_PAGES); 311 /* GMD1 */ 312 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM1]; q++) 313 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM1, q, 314 PSE_QUEUE_RSV_PAGES); 315 /* GMD2 */ 316 for (q = 6; q < pse_port_num_queues[FE_PSE_PORT_GDM2]; q++) 317 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM2, q, 0); 318 /* GMD3 */ 319 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM3]; q++) 320 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM3, q, 321 PSE_QUEUE_RSV_PAGES); 322 /* PPE1 */ 323 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE1]; q++) { 324 if (q < pse_port_num_queues[FE_PSE_PORT_PPE1]) 325 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 326 PSE_QUEUE_RSV_PAGES); 327 else 328 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 0); 329 } 330 /* CDM2 */ 331 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM2]; q++) 332 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM2, q, 333 PSE_QUEUE_RSV_PAGES); 334 /* CDM3 */ 335 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++) 336 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM3, q, 0); 337 /* CDM4 */ 338 for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++) 339 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q, 340 PSE_QUEUE_RSV_PAGES); 341 /* PPE2 */ 342 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) { 343 if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2) 344 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, 345 PSE_QUEUE_RSV_PAGES); 346 else 347 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, 0); 348 } 349 /* GMD4 */ 350 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++) 351 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM4, q, 352 PSE_QUEUE_RSV_PAGES); 353 /* CDM5 */ 354 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM5]; q++) 355 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM5, q, 356 PSE_QUEUE_RSV_PAGES); 357 } 358 359 static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth) 360 { 361 int i; 362 363 for (i = 0; i < AIROHA_FE_MC_MAX_VLAN_TABLE; i++) { 364 int err, j; 365 u32 val; 366 367 airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0); 368 369 val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) | 370 MC_VLAN_CFG_TABLE_SEL_MASK | MC_VLAN_CFG_RW_MASK; 371 airoha_fe_wr(eth, REG_MC_VLAN_CFG, val); 372 err = read_poll_timeout(airoha_fe_rr, val, 373 val & MC_VLAN_CFG_CMD_DONE_MASK, 374 USEC_PER_MSEC, 5 * USEC_PER_MSEC, 375 false, eth, REG_MC_VLAN_CFG); 376 if (err) 377 return err; 378 379 for (j = 0; j < AIROHA_FE_MC_MAX_VLAN_PORT; j++) { 380 airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0); 381 382 val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) | 383 FIELD_PREP(MC_VLAN_CFG_PORT_ID_MASK, j) | 384 MC_VLAN_CFG_RW_MASK; 385 airoha_fe_wr(eth, REG_MC_VLAN_CFG, val); 386 err = read_poll_timeout(airoha_fe_rr, val, 387 val & MC_VLAN_CFG_CMD_DONE_MASK, 388 USEC_PER_MSEC, 389 5 * USEC_PER_MSEC, false, eth, 390 REG_MC_VLAN_CFG); 391 if (err) 392 return err; 393 } 394 } 395 396 return 0; 397 } 398 399 static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth) 400 { 401 /* CDM1_CRSN_QSEL */ 402 airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_22 >> 2), 403 CDM1_CRSN_QSEL_REASON_MASK(CRSN_22), 404 FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_22), 405 CDM_CRSN_QSEL_Q1)); 406 airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_08 >> 2), 407 CDM1_CRSN_QSEL_REASON_MASK(CRSN_08), 408 FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_08), 409 CDM_CRSN_QSEL_Q1)); 410 airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_21 >> 2), 411 CDM1_CRSN_QSEL_REASON_MASK(CRSN_21), 412 FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_21), 413 CDM_CRSN_QSEL_Q1)); 414 airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_24 >> 2), 415 CDM1_CRSN_QSEL_REASON_MASK(CRSN_24), 416 FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_24), 417 CDM_CRSN_QSEL_Q6)); 418 airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_25 >> 2), 419 CDM1_CRSN_QSEL_REASON_MASK(CRSN_25), 420 FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_25), 421 CDM_CRSN_QSEL_Q1)); 422 /* CDM2_CRSN_QSEL */ 423 airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_08 >> 2), 424 CDM2_CRSN_QSEL_REASON_MASK(CRSN_08), 425 FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_08), 426 CDM_CRSN_QSEL_Q1)); 427 airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_21 >> 2), 428 CDM2_CRSN_QSEL_REASON_MASK(CRSN_21), 429 FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_21), 430 CDM_CRSN_QSEL_Q1)); 431 airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_22 >> 2), 432 CDM2_CRSN_QSEL_REASON_MASK(CRSN_22), 433 FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_22), 434 CDM_CRSN_QSEL_Q1)); 435 airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_24 >> 2), 436 CDM2_CRSN_QSEL_REASON_MASK(CRSN_24), 437 FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_24), 438 CDM_CRSN_QSEL_Q6)); 439 airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_25 >> 2), 440 CDM2_CRSN_QSEL_REASON_MASK(CRSN_25), 441 FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_25), 442 CDM_CRSN_QSEL_Q1)); 443 } 444 445 static int airoha_fe_init(struct airoha_eth *eth) 446 { 447 airoha_fe_maccr_init(eth); 448 449 /* PSE IQ reserve */ 450 airoha_fe_rmw(eth, REG_PSE_IQ_REV1, PSE_IQ_RES1_P2_MASK, 451 FIELD_PREP(PSE_IQ_RES1_P2_MASK, 0x10)); 452 airoha_fe_rmw(eth, REG_PSE_IQ_REV2, 453 PSE_IQ_RES2_P5_MASK | PSE_IQ_RES2_P4_MASK, 454 FIELD_PREP(PSE_IQ_RES2_P5_MASK, 0x40) | 455 FIELD_PREP(PSE_IQ_RES2_P4_MASK, 0x34)); 456 457 /* enable FE copy engine for MC/KA/DPI */ 458 airoha_fe_wr(eth, REG_FE_PCE_CFG, 459 PCE_DPI_EN_MASK | PCE_KA_EN_MASK | PCE_MC_EN_MASK); 460 /* set vip queue selection to ring 1 */ 461 airoha_fe_rmw(eth, REG_CDM1_FWD_CFG, CDM1_VIP_QSEL_MASK, 462 FIELD_PREP(CDM1_VIP_QSEL_MASK, 0x4)); 463 airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_VIP_QSEL_MASK, 464 FIELD_PREP(CDM2_VIP_QSEL_MASK, 0x4)); 465 /* set GDM4 source interface offset to 8 */ 466 airoha_fe_rmw(eth, REG_GDM4_SRC_PORT_SET, 467 GDM4_SPORT_OFF2_MASK | 468 GDM4_SPORT_OFF1_MASK | 469 GDM4_SPORT_OFF0_MASK, 470 FIELD_PREP(GDM4_SPORT_OFF2_MASK, 8) | 471 FIELD_PREP(GDM4_SPORT_OFF1_MASK, 8) | 472 FIELD_PREP(GDM4_SPORT_OFF0_MASK, 8)); 473 474 /* set PSE Page as 128B */ 475 airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG, 476 FE_DMA_GLO_L2_SPACE_MASK | FE_DMA_GLO_PG_SZ_MASK, 477 FIELD_PREP(FE_DMA_GLO_L2_SPACE_MASK, 2) | 478 FE_DMA_GLO_PG_SZ_MASK); 479 airoha_fe_wr(eth, REG_FE_RST_GLO_CFG, 480 FE_RST_CORE_MASK | FE_RST_GDM3_MBI_ARB_MASK | 481 FE_RST_GDM4_MBI_ARB_MASK); 482 usleep_range(1000, 2000); 483 484 /* connect RxRing1 and RxRing15 to PSE Port0 OQ-1 485 * connect other rings to PSE Port0 OQ-0 486 */ 487 airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP0, BIT(4)); 488 airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP1, BIT(28)); 489 airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP2, BIT(4)); 490 airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP3, BIT(28)); 491 492 airoha_fe_vip_setup(eth); 493 airoha_fe_pse_ports_init(eth); 494 495 airoha_fe_set(eth, REG_GDM_MISC_CFG, 496 GDM2_RDM_ACK_WAIT_PREF_MASK | 497 GDM2_CHN_VLD_MODE_MASK); 498 airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK, 499 FIELD_PREP(CDM2_OAM_QSEL_MASK, 15)); 500 501 /* init fragment and assemble Force Port */ 502 /* NPU Core-3, NPU Bridge Channel-3 */ 503 airoha_fe_rmw(eth, REG_IP_FRAG_FP, 504 IP_FRAGMENT_PORT_MASK | IP_FRAGMENT_NBQ_MASK, 505 FIELD_PREP(IP_FRAGMENT_PORT_MASK, 6) | 506 FIELD_PREP(IP_FRAGMENT_NBQ_MASK, 3)); 507 /* QDMA LAN, RX Ring-22 */ 508 airoha_fe_rmw(eth, REG_IP_FRAG_FP, 509 IP_ASSEMBLE_PORT_MASK | IP_ASSEMBLE_NBQ_MASK, 510 FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) | 511 FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22)); 512 513 airoha_fe_set(eth, REG_GDM3_FWD_CFG, GDM3_PAD_EN_MASK); 514 airoha_fe_set(eth, REG_GDM4_FWD_CFG, GDM4_PAD_EN_MASK); 515 516 airoha_fe_crsn_qsel_init(eth); 517 518 airoha_fe_clear(eth, REG_FE_CPORT_CFG, FE_CPORT_QUEUE_XFC_MASK); 519 airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK); 520 521 /* default aging mode for mbi unlock issue */ 522 airoha_fe_rmw(eth, REG_GDM2_CHN_RLS, 523 MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK, 524 FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) | 525 FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3)); 526 527 /* disable IFC by default */ 528 airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK); 529 530 /* enable 1:N vlan action, init vlan table */ 531 airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK); 532 533 return airoha_fe_mc_vlan_clear(eth); 534 } 535 536 static int airoha_qdma_fill_rx_queue(struct airoha_queue *q) 537 { 538 enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); 539 struct airoha_qdma *qdma = q->qdma; 540 struct airoha_eth *eth = qdma->eth; 541 int qid = q - &qdma->q_rx[0]; 542 int nframes = 0; 543 544 while (q->queued < q->ndesc - 1) { 545 struct airoha_queue_entry *e = &q->entry[q->head]; 546 struct airoha_qdma_desc *desc = &q->desc[q->head]; 547 struct page *page; 548 int offset; 549 u32 val; 550 551 page = page_pool_dev_alloc_frag(q->page_pool, &offset, 552 q->buf_size); 553 if (!page) 554 break; 555 556 q->head = (q->head + 1) % q->ndesc; 557 q->queued++; 558 nframes++; 559 560 e->buf = page_address(page) + offset; 561 e->dma_addr = page_pool_get_dma_addr(page) + offset; 562 e->dma_len = SKB_WITH_OVERHEAD(q->buf_size); 563 564 dma_sync_single_for_device(eth->dev, e->dma_addr, e->dma_len, 565 dir); 566 567 val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len); 568 WRITE_ONCE(desc->ctrl, cpu_to_le32(val)); 569 WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr)); 570 val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head); 571 WRITE_ONCE(desc->data, cpu_to_le32(val)); 572 WRITE_ONCE(desc->msg0, 0); 573 WRITE_ONCE(desc->msg1, 0); 574 WRITE_ONCE(desc->msg2, 0); 575 WRITE_ONCE(desc->msg3, 0); 576 577 airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), 578 RX_RING_CPU_IDX_MASK, 579 FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head)); 580 } 581 582 return nframes; 583 } 584 585 static int airoha_qdma_get_gdm_port(struct airoha_eth *eth, 586 struct airoha_qdma_desc *desc) 587 { 588 u32 port, sport, msg1 = le32_to_cpu(desc->msg1); 589 590 sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1); 591 switch (sport) { 592 case 0x10 ... 0x14: 593 port = 0; 594 break; 595 case 0x2 ... 0x4: 596 port = sport - 1; 597 break; 598 default: 599 return -EINVAL; 600 } 601 602 return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port; 603 } 604 605 static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) 606 { 607 enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); 608 struct airoha_qdma *qdma = q->qdma; 609 struct airoha_eth *eth = qdma->eth; 610 int qid = q - &qdma->q_rx[0]; 611 int done = 0; 612 613 while (done < budget) { 614 struct airoha_queue_entry *e = &q->entry[q->tail]; 615 struct airoha_qdma_desc *desc = &q->desc[q->tail]; 616 u32 hash, reason, msg1 = le32_to_cpu(desc->msg1); 617 struct page *page = virt_to_head_page(e->buf); 618 u32 desc_ctrl = le32_to_cpu(desc->ctrl); 619 struct airoha_gdm_port *port; 620 int data_len, len, p; 621 622 if (!(desc_ctrl & QDMA_DESC_DONE_MASK)) 623 break; 624 625 q->tail = (q->tail + 1) % q->ndesc; 626 q->queued--; 627 628 dma_sync_single_for_cpu(eth->dev, e->dma_addr, 629 SKB_WITH_OVERHEAD(q->buf_size), dir); 630 631 len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl); 632 data_len = q->skb ? q->buf_size 633 : SKB_WITH_OVERHEAD(q->buf_size); 634 if (!len || data_len < len) 635 goto free_frag; 636 637 p = airoha_qdma_get_gdm_port(eth, desc); 638 if (p < 0 || !eth->ports[p]) 639 goto free_frag; 640 641 port = eth->ports[p]; 642 if (!q->skb) { /* first buffer */ 643 q->skb = napi_build_skb(e->buf, q->buf_size); 644 if (!q->skb) 645 goto free_frag; 646 647 __skb_put(q->skb, len); 648 skb_mark_for_recycle(q->skb); 649 q->skb->dev = port->dev; 650 q->skb->protocol = eth_type_trans(q->skb, port->dev); 651 q->skb->ip_summed = CHECKSUM_UNNECESSARY; 652 skb_record_rx_queue(q->skb, qid); 653 } else { /* scattered frame */ 654 struct skb_shared_info *shinfo = skb_shinfo(q->skb); 655 int nr_frags = shinfo->nr_frags; 656 657 if (nr_frags >= ARRAY_SIZE(shinfo->frags)) 658 goto free_frag; 659 660 skb_add_rx_frag(q->skb, nr_frags, page, 661 e->buf - page_address(page), len, 662 q->buf_size); 663 } 664 665 if (FIELD_GET(QDMA_DESC_MORE_MASK, desc_ctrl)) 666 continue; 667 668 if (netdev_uses_dsa(port->dev)) { 669 /* PPE module requires untagged packets to work 670 * properly and it provides DSA port index via the 671 * DMA descriptor. Report DSA tag to the DSA stack 672 * via skb dst info. 673 */ 674 u32 sptag = FIELD_GET(QDMA_ETH_RXMSG_SPTAG, 675 le32_to_cpu(desc->msg0)); 676 677 if (sptag < ARRAY_SIZE(port->dsa_meta) && 678 port->dsa_meta[sptag]) 679 skb_dst_set_noref(q->skb, 680 &port->dsa_meta[sptag]->dst); 681 } 682 683 hash = FIELD_GET(AIROHA_RXD4_FOE_ENTRY, msg1); 684 if (hash != AIROHA_RXD4_FOE_ENTRY) 685 skb_set_hash(q->skb, jhash_1word(hash, 0), 686 PKT_HASH_TYPE_L4); 687 688 reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1); 689 if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) 690 airoha_ppe_check_skb(eth->ppe, hash); 691 692 done++; 693 napi_gro_receive(&q->napi, q->skb); 694 q->skb = NULL; 695 continue; 696 free_frag: 697 if (q->skb) { 698 dev_kfree_skb(q->skb); 699 q->skb = NULL; 700 } else { 701 page_pool_put_full_page(q->page_pool, page, true); 702 } 703 } 704 airoha_qdma_fill_rx_queue(q); 705 706 return done; 707 } 708 709 static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget) 710 { 711 struct airoha_queue *q = container_of(napi, struct airoha_queue, napi); 712 int cur, done = 0; 713 714 do { 715 cur = airoha_qdma_rx_process(q, budget - done); 716 done += cur; 717 } while (cur && done < budget); 718 719 if (done < budget && napi_complete(napi)) 720 airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1, 721 RX_DONE_INT_MASK); 722 723 return done; 724 } 725 726 static int airoha_qdma_init_rx_queue(struct airoha_queue *q, 727 struct airoha_qdma *qdma, int ndesc) 728 { 729 const struct page_pool_params pp_params = { 730 .order = 0, 731 .pool_size = 256, 732 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, 733 .dma_dir = DMA_FROM_DEVICE, 734 .max_len = PAGE_SIZE, 735 .nid = NUMA_NO_NODE, 736 .dev = qdma->eth->dev, 737 .napi = &q->napi, 738 }; 739 struct airoha_eth *eth = qdma->eth; 740 int qid = q - &qdma->q_rx[0], thr; 741 dma_addr_t dma_addr; 742 743 q->buf_size = PAGE_SIZE / 2; 744 q->ndesc = ndesc; 745 q->qdma = qdma; 746 747 q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), 748 GFP_KERNEL); 749 if (!q->entry) 750 return -ENOMEM; 751 752 q->page_pool = page_pool_create(&pp_params); 753 if (IS_ERR(q->page_pool)) { 754 int err = PTR_ERR(q->page_pool); 755 756 q->page_pool = NULL; 757 return err; 758 } 759 760 q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc), 761 &dma_addr, GFP_KERNEL); 762 if (!q->desc) 763 return -ENOMEM; 764 765 netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll); 766 767 airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr); 768 airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), 769 RX_RING_SIZE_MASK, 770 FIELD_PREP(RX_RING_SIZE_MASK, ndesc)); 771 772 thr = clamp(ndesc >> 3, 1, 32); 773 airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK, 774 FIELD_PREP(RX_RING_THR_MASK, thr)); 775 airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK, 776 FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head)); 777 airoha_qdma_set(qdma, REG_RX_SCATTER_CFG(qid), RX_RING_SG_EN_MASK); 778 779 airoha_qdma_fill_rx_queue(q); 780 781 return 0; 782 } 783 784 static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q) 785 { 786 struct airoha_eth *eth = q->qdma->eth; 787 788 while (q->queued) { 789 struct airoha_queue_entry *e = &q->entry[q->tail]; 790 struct page *page = virt_to_head_page(e->buf); 791 792 dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len, 793 page_pool_get_dma_dir(q->page_pool)); 794 page_pool_put_full_page(q->page_pool, page, false); 795 q->tail = (q->tail + 1) % q->ndesc; 796 q->queued--; 797 } 798 } 799 800 static int airoha_qdma_init_rx(struct airoha_qdma *qdma) 801 { 802 int i; 803 804 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 805 int err; 806 807 if (!(RX_DONE_INT_MASK & BIT(i))) { 808 /* rx-queue not binded to irq */ 809 continue; 810 } 811 812 err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma, 813 RX_DSCP_NUM(i)); 814 if (err) 815 return err; 816 } 817 818 return 0; 819 } 820 821 static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget) 822 { 823 struct airoha_tx_irq_queue *irq_q; 824 int id, done = 0, irq_queued; 825 struct airoha_qdma *qdma; 826 struct airoha_eth *eth; 827 u32 status, head; 828 829 irq_q = container_of(napi, struct airoha_tx_irq_queue, napi); 830 qdma = irq_q->qdma; 831 id = irq_q - &qdma->q_tx_irq[0]; 832 eth = qdma->eth; 833 834 status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(id)); 835 head = FIELD_GET(IRQ_HEAD_IDX_MASK, status); 836 head = head % irq_q->size; 837 irq_queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status); 838 839 while (irq_queued > 0 && done < budget) { 840 u32 qid, val = irq_q->q[head]; 841 struct airoha_qdma_desc *desc; 842 struct airoha_queue_entry *e; 843 struct airoha_queue *q; 844 u32 index, desc_ctrl; 845 struct sk_buff *skb; 846 847 if (val == 0xff) 848 break; 849 850 irq_q->q[head] = 0xff; /* mark as done */ 851 head = (head + 1) % irq_q->size; 852 irq_queued--; 853 done++; 854 855 qid = FIELD_GET(IRQ_RING_IDX_MASK, val); 856 if (qid >= ARRAY_SIZE(qdma->q_tx)) 857 continue; 858 859 q = &qdma->q_tx[qid]; 860 if (!q->ndesc) 861 continue; 862 863 index = FIELD_GET(IRQ_DESC_IDX_MASK, val); 864 if (index >= q->ndesc) 865 continue; 866 867 spin_lock_bh(&q->lock); 868 869 if (!q->queued) 870 goto unlock; 871 872 desc = &q->desc[index]; 873 desc_ctrl = le32_to_cpu(desc->ctrl); 874 875 if (!(desc_ctrl & QDMA_DESC_DONE_MASK) && 876 !(desc_ctrl & QDMA_DESC_DROP_MASK)) 877 goto unlock; 878 879 e = &q->entry[index]; 880 skb = e->skb; 881 882 dma_unmap_single(eth->dev, e->dma_addr, e->dma_len, 883 DMA_TO_DEVICE); 884 memset(e, 0, sizeof(*e)); 885 WRITE_ONCE(desc->msg0, 0); 886 WRITE_ONCE(desc->msg1, 0); 887 q->queued--; 888 889 /* completion ring can report out-of-order indexes if hw QoS 890 * is enabled and packets with different priority are queued 891 * to same DMA ring. Take into account possible out-of-order 892 * reports incrementing DMA ring tail pointer 893 */ 894 while (q->tail != q->head && !q->entry[q->tail].dma_addr) 895 q->tail = (q->tail + 1) % q->ndesc; 896 897 if (skb) { 898 u16 queue = skb_get_queue_mapping(skb); 899 struct netdev_queue *txq; 900 901 txq = netdev_get_tx_queue(skb->dev, queue); 902 netdev_tx_completed_queue(txq, 1, skb->len); 903 if (netif_tx_queue_stopped(txq) && 904 q->ndesc - q->queued >= q->free_thr) 905 netif_tx_wake_queue(txq); 906 907 dev_kfree_skb_any(skb); 908 } 909 unlock: 910 spin_unlock_bh(&q->lock); 911 } 912 913 if (done) { 914 int i, len = done >> 7; 915 916 for (i = 0; i < len; i++) 917 airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id), 918 IRQ_CLEAR_LEN_MASK, 0x80); 919 airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id), 920 IRQ_CLEAR_LEN_MASK, (done & 0x7f)); 921 } 922 923 if (done < budget && napi_complete(napi)) 924 airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, 925 TX_DONE_INT_MASK(id)); 926 927 return done; 928 } 929 930 static int airoha_qdma_init_tx_queue(struct airoha_queue *q, 931 struct airoha_qdma *qdma, int size) 932 { 933 struct airoha_eth *eth = qdma->eth; 934 int i, qid = q - &qdma->q_tx[0]; 935 dma_addr_t dma_addr; 936 937 spin_lock_init(&q->lock); 938 q->ndesc = size; 939 q->qdma = qdma; 940 q->free_thr = 1 + MAX_SKB_FRAGS; 941 942 q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), 943 GFP_KERNEL); 944 if (!q->entry) 945 return -ENOMEM; 946 947 q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc), 948 &dma_addr, GFP_KERNEL); 949 if (!q->desc) 950 return -ENOMEM; 951 952 for (i = 0; i < q->ndesc; i++) { 953 u32 val; 954 955 val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1); 956 WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val)); 957 } 958 959 /* xmit ring drop default setting */ 960 airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid), 961 TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK); 962 963 airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr); 964 airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, 965 FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head)); 966 airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK, 967 FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head)); 968 969 return 0; 970 } 971 972 static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q, 973 struct airoha_qdma *qdma, int size) 974 { 975 int id = irq_q - &qdma->q_tx_irq[0]; 976 struct airoha_eth *eth = qdma->eth; 977 dma_addr_t dma_addr; 978 979 netif_napi_add_tx(eth->napi_dev, &irq_q->napi, 980 airoha_qdma_tx_napi_poll); 981 irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32), 982 &dma_addr, GFP_KERNEL); 983 if (!irq_q->q) 984 return -ENOMEM; 985 986 memset(irq_q->q, 0xff, size * sizeof(u32)); 987 irq_q->size = size; 988 irq_q->qdma = qdma; 989 990 airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr); 991 airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK, 992 FIELD_PREP(TX_IRQ_DEPTH_MASK, size)); 993 airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK, 994 FIELD_PREP(TX_IRQ_THR_MASK, 1)); 995 996 return 0; 997 } 998 999 static int airoha_qdma_init_tx(struct airoha_qdma *qdma) 1000 { 1001 int i, err; 1002 1003 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { 1004 err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma, 1005 IRQ_QUEUE_LEN(i)); 1006 if (err) 1007 return err; 1008 } 1009 1010 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { 1011 err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma, 1012 TX_DSCP_NUM); 1013 if (err) 1014 return err; 1015 } 1016 1017 return 0; 1018 } 1019 1020 static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q) 1021 { 1022 struct airoha_eth *eth = q->qdma->eth; 1023 1024 spin_lock_bh(&q->lock); 1025 while (q->queued) { 1026 struct airoha_queue_entry *e = &q->entry[q->tail]; 1027 1028 dma_unmap_single(eth->dev, e->dma_addr, e->dma_len, 1029 DMA_TO_DEVICE); 1030 dev_kfree_skb_any(e->skb); 1031 e->skb = NULL; 1032 1033 q->tail = (q->tail + 1) % q->ndesc; 1034 q->queued--; 1035 } 1036 spin_unlock_bh(&q->lock); 1037 } 1038 1039 static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma) 1040 { 1041 struct airoha_eth *eth = qdma->eth; 1042 dma_addr_t dma_addr; 1043 u32 status; 1044 int size; 1045 1046 size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc); 1047 qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr, 1048 GFP_KERNEL); 1049 if (!qdma->hfwd.desc) 1050 return -ENOMEM; 1051 1052 airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr); 1053 1054 size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM; 1055 qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr, 1056 GFP_KERNEL); 1057 if (!qdma->hfwd.q) 1058 return -ENOMEM; 1059 1060 airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr); 1061 1062 airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG, 1063 HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 1064 FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0)); 1065 airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK, 1066 FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128)); 1067 airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG, 1068 LMGR_INIT_START | LMGR_SRAM_MODE_MASK | 1069 HW_FWD_DESC_NUM_MASK, 1070 FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) | 1071 LMGR_INIT_START); 1072 1073 return read_poll_timeout(airoha_qdma_rr, status, 1074 !(status & LMGR_INIT_START), USEC_PER_MSEC, 1075 30 * USEC_PER_MSEC, true, qdma, 1076 REG_LMGR_INIT_CFG); 1077 } 1078 1079 static void airoha_qdma_init_qos(struct airoha_qdma *qdma) 1080 { 1081 airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK); 1082 airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK); 1083 1084 airoha_qdma_clear(qdma, REG_PSE_BUF_USAGE_CFG, 1085 PSE_BUF_ESTIMATE_EN_MASK); 1086 1087 airoha_qdma_set(qdma, REG_EGRESS_RATE_METER_CFG, 1088 EGRESS_RATE_METER_EN_MASK | 1089 EGRESS_RATE_METER_EQ_RATE_EN_MASK); 1090 /* 2047us x 31 = 63.457ms */ 1091 airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG, 1092 EGRESS_RATE_METER_WINDOW_SZ_MASK, 1093 FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f)); 1094 airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG, 1095 EGRESS_RATE_METER_TIMESLICE_MASK, 1096 FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff)); 1097 1098 /* ratelimit init */ 1099 airoha_qdma_set(qdma, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK); 1100 /* fast-tick 25us */ 1101 airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK, 1102 FIELD_PREP(GLB_FAST_TICK_MASK, 25)); 1103 airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK, 1104 FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40)); 1105 1106 airoha_qdma_set(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK); 1107 airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK, 1108 FIELD_PREP(EGRESS_FAST_TICK_MASK, 25)); 1109 airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, 1110 EGRESS_SLOW_TICK_RATIO_MASK, 1111 FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40)); 1112 1113 airoha_qdma_set(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK); 1114 airoha_qdma_clear(qdma, REG_INGRESS_TRTCM_CFG, 1115 INGRESS_TRTCM_MODE_MASK); 1116 airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK, 1117 FIELD_PREP(INGRESS_FAST_TICK_MASK, 125)); 1118 airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, 1119 INGRESS_SLOW_TICK_RATIO_MASK, 1120 FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8)); 1121 1122 airoha_qdma_set(qdma, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK); 1123 airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK, 1124 FIELD_PREP(SLA_FAST_TICK_MASK, 25)); 1125 airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK, 1126 FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40)); 1127 } 1128 1129 static void airoha_qdma_init_qos_stats(struct airoha_qdma *qdma) 1130 { 1131 int i; 1132 1133 for (i = 0; i < AIROHA_NUM_QOS_CHANNELS; i++) { 1134 /* Tx-cpu transferred count */ 1135 airoha_qdma_wr(qdma, REG_CNTR_VAL(i << 1), 0); 1136 airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1), 1137 CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK | 1138 CNTR_ALL_DSCP_RING_EN_MASK | 1139 FIELD_PREP(CNTR_CHAN_MASK, i)); 1140 /* Tx-fwd transferred count */ 1141 airoha_qdma_wr(qdma, REG_CNTR_VAL((i << 1) + 1), 0); 1142 airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1), 1143 CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK | 1144 CNTR_ALL_DSCP_RING_EN_MASK | 1145 FIELD_PREP(CNTR_SRC_MASK, 1) | 1146 FIELD_PREP(CNTR_CHAN_MASK, i)); 1147 } 1148 } 1149 1150 static int airoha_qdma_hw_init(struct airoha_qdma *qdma) 1151 { 1152 int i; 1153 1154 /* clear pending irqs */ 1155 for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) 1156 airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff); 1157 1158 /* setup irqs */ 1159 airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK); 1160 airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK); 1161 airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK); 1162 1163 /* setup irq binding */ 1164 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { 1165 if (!qdma->q_tx[i].ndesc) 1166 continue; 1167 1168 if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i)) 1169 airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(i), 1170 TX_RING_IRQ_BLOCKING_CFG_MASK); 1171 else 1172 airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i), 1173 TX_RING_IRQ_BLOCKING_CFG_MASK); 1174 } 1175 1176 airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG, 1177 FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) | 1178 GLOBAL_CFG_CPU_TXR_RR_MASK | 1179 GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK | 1180 GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK | 1181 GLOBAL_CFG_MULTICAST_EN_MASK | 1182 GLOBAL_CFG_IRQ0_EN_MASK | GLOBAL_CFG_IRQ1_EN_MASK | 1183 GLOBAL_CFG_TX_WB_DONE_MASK | 1184 FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2)); 1185 1186 airoha_qdma_init_qos(qdma); 1187 1188 /* disable qdma rx delay interrupt */ 1189 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 1190 if (!qdma->q_rx[i].ndesc) 1191 continue; 1192 1193 airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i), 1194 RX_DELAY_INT_MASK); 1195 } 1196 1197 airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG, 1198 TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN); 1199 airoha_qdma_init_qos_stats(qdma); 1200 1201 return 0; 1202 } 1203 1204 static irqreturn_t airoha_irq_handler(int irq, void *dev_instance) 1205 { 1206 struct airoha_qdma *qdma = dev_instance; 1207 u32 intr[ARRAY_SIZE(qdma->irqmask)]; 1208 int i; 1209 1210 for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) { 1211 intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i)); 1212 intr[i] &= qdma->irqmask[i]; 1213 airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]); 1214 } 1215 1216 if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state)) 1217 return IRQ_NONE; 1218 1219 if (intr[1] & RX_DONE_INT_MASK) { 1220 airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1, 1221 RX_DONE_INT_MASK); 1222 1223 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 1224 if (!qdma->q_rx[i].ndesc) 1225 continue; 1226 1227 if (intr[1] & BIT(i)) 1228 napi_schedule(&qdma->q_rx[i].napi); 1229 } 1230 } 1231 1232 if (intr[0] & INT_TX_MASK) { 1233 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { 1234 if (!(intr[0] & TX_DONE_INT_MASK(i))) 1235 continue; 1236 1237 airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0, 1238 TX_DONE_INT_MASK(i)); 1239 napi_schedule(&qdma->q_tx_irq[i].napi); 1240 } 1241 } 1242 1243 return IRQ_HANDLED; 1244 } 1245 1246 static int airoha_qdma_init(struct platform_device *pdev, 1247 struct airoha_eth *eth, 1248 struct airoha_qdma *qdma) 1249 { 1250 int err, id = qdma - ð->qdma[0]; 1251 const char *res; 1252 1253 spin_lock_init(&qdma->irq_lock); 1254 qdma->eth = eth; 1255 1256 res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id); 1257 if (!res) 1258 return -ENOMEM; 1259 1260 qdma->regs = devm_platform_ioremap_resource_byname(pdev, res); 1261 if (IS_ERR(qdma->regs)) 1262 return dev_err_probe(eth->dev, PTR_ERR(qdma->regs), 1263 "failed to iomap qdma%d regs\n", id); 1264 1265 qdma->irq = platform_get_irq(pdev, 4 * id); 1266 if (qdma->irq < 0) 1267 return qdma->irq; 1268 1269 err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler, 1270 IRQF_SHARED, KBUILD_MODNAME, qdma); 1271 if (err) 1272 return err; 1273 1274 err = airoha_qdma_init_rx(qdma); 1275 if (err) 1276 return err; 1277 1278 err = airoha_qdma_init_tx(qdma); 1279 if (err) 1280 return err; 1281 1282 err = airoha_qdma_init_hfwd_queues(qdma); 1283 if (err) 1284 return err; 1285 1286 return airoha_qdma_hw_init(qdma); 1287 } 1288 1289 static int airoha_hw_init(struct platform_device *pdev, 1290 struct airoha_eth *eth) 1291 { 1292 int err, i; 1293 1294 /* disable xsi */ 1295 err = reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts), 1296 eth->xsi_rsts); 1297 if (err) 1298 return err; 1299 1300 err = reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts); 1301 if (err) 1302 return err; 1303 1304 msleep(20); 1305 err = reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts); 1306 if (err) 1307 return err; 1308 1309 msleep(20); 1310 err = airoha_fe_init(eth); 1311 if (err) 1312 return err; 1313 1314 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) { 1315 err = airoha_qdma_init(pdev, eth, ð->qdma[i]); 1316 if (err) 1317 return err; 1318 } 1319 1320 err = airoha_ppe_init(eth); 1321 if (err) 1322 return err; 1323 1324 set_bit(DEV_STATE_INITIALIZED, ð->state); 1325 1326 return 0; 1327 } 1328 1329 static void airoha_hw_cleanup(struct airoha_qdma *qdma) 1330 { 1331 int i; 1332 1333 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 1334 if (!qdma->q_rx[i].ndesc) 1335 continue; 1336 1337 netif_napi_del(&qdma->q_rx[i].napi); 1338 airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]); 1339 if (qdma->q_rx[i].page_pool) 1340 page_pool_destroy(qdma->q_rx[i].page_pool); 1341 } 1342 1343 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) 1344 netif_napi_del(&qdma->q_tx_irq[i].napi); 1345 1346 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { 1347 if (!qdma->q_tx[i].ndesc) 1348 continue; 1349 1350 airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]); 1351 } 1352 } 1353 1354 static void airoha_qdma_start_napi(struct airoha_qdma *qdma) 1355 { 1356 int i; 1357 1358 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) 1359 napi_enable(&qdma->q_tx_irq[i].napi); 1360 1361 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 1362 if (!qdma->q_rx[i].ndesc) 1363 continue; 1364 1365 napi_enable(&qdma->q_rx[i].napi); 1366 } 1367 } 1368 1369 static void airoha_qdma_stop_napi(struct airoha_qdma *qdma) 1370 { 1371 int i; 1372 1373 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) 1374 napi_disable(&qdma->q_tx_irq[i].napi); 1375 1376 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 1377 if (!qdma->q_rx[i].ndesc) 1378 continue; 1379 1380 napi_disable(&qdma->q_rx[i].napi); 1381 } 1382 } 1383 1384 static void airoha_update_hw_stats(struct airoha_gdm_port *port) 1385 { 1386 struct airoha_eth *eth = port->qdma->eth; 1387 u32 val, i = 0; 1388 1389 spin_lock(&port->stats.lock); 1390 u64_stats_update_begin(&port->stats.syncp); 1391 1392 /* TX */ 1393 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_H(port->id)); 1394 port->stats.tx_ok_pkts += ((u64)val << 32); 1395 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_L(port->id)); 1396 port->stats.tx_ok_pkts += val; 1397 1398 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_H(port->id)); 1399 port->stats.tx_ok_bytes += ((u64)val << 32); 1400 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_L(port->id)); 1401 port->stats.tx_ok_bytes += val; 1402 1403 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_DROP_CNT(port->id)); 1404 port->stats.tx_drops += val; 1405 1406 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_BC_CNT(port->id)); 1407 port->stats.tx_broadcast += val; 1408 1409 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_MC_CNT(port->id)); 1410 port->stats.tx_multicast += val; 1411 1412 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_RUNT_CNT(port->id)); 1413 port->stats.tx_len[i] += val; 1414 1415 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_H(port->id)); 1416 port->stats.tx_len[i] += ((u64)val << 32); 1417 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_L(port->id)); 1418 port->stats.tx_len[i++] += val; 1419 1420 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_H(port->id)); 1421 port->stats.tx_len[i] += ((u64)val << 32); 1422 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_L(port->id)); 1423 port->stats.tx_len[i++] += val; 1424 1425 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_H(port->id)); 1426 port->stats.tx_len[i] += ((u64)val << 32); 1427 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_L(port->id)); 1428 port->stats.tx_len[i++] += val; 1429 1430 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_H(port->id)); 1431 port->stats.tx_len[i] += ((u64)val << 32); 1432 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_L(port->id)); 1433 port->stats.tx_len[i++] += val; 1434 1435 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_H(port->id)); 1436 port->stats.tx_len[i] += ((u64)val << 32); 1437 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_L(port->id)); 1438 port->stats.tx_len[i++] += val; 1439 1440 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_H(port->id)); 1441 port->stats.tx_len[i] += ((u64)val << 32); 1442 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_L(port->id)); 1443 port->stats.tx_len[i++] += val; 1444 1445 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_LONG_CNT(port->id)); 1446 port->stats.tx_len[i++] += val; 1447 1448 /* RX */ 1449 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_H(port->id)); 1450 port->stats.rx_ok_pkts += ((u64)val << 32); 1451 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_L(port->id)); 1452 port->stats.rx_ok_pkts += val; 1453 1454 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_H(port->id)); 1455 port->stats.rx_ok_bytes += ((u64)val << 32); 1456 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_L(port->id)); 1457 port->stats.rx_ok_bytes += val; 1458 1459 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_DROP_CNT(port->id)); 1460 port->stats.rx_drops += val; 1461 1462 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_BC_CNT(port->id)); 1463 port->stats.rx_broadcast += val; 1464 1465 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_MC_CNT(port->id)); 1466 port->stats.rx_multicast += val; 1467 1468 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ERROR_DROP_CNT(port->id)); 1469 port->stats.rx_errors += val; 1470 1471 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_CRC_ERR_CNT(port->id)); 1472 port->stats.rx_crc_error += val; 1473 1474 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OVERFLOW_DROP_CNT(port->id)); 1475 port->stats.rx_over_errors += val; 1476 1477 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_FRAG_CNT(port->id)); 1478 port->stats.rx_fragment += val; 1479 1480 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_JABBER_CNT(port->id)); 1481 port->stats.rx_jabber += val; 1482 1483 i = 0; 1484 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_RUNT_CNT(port->id)); 1485 port->stats.rx_len[i] += val; 1486 1487 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_H(port->id)); 1488 port->stats.rx_len[i] += ((u64)val << 32); 1489 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_L(port->id)); 1490 port->stats.rx_len[i++] += val; 1491 1492 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_H(port->id)); 1493 port->stats.rx_len[i] += ((u64)val << 32); 1494 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_L(port->id)); 1495 port->stats.rx_len[i++] += val; 1496 1497 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_H(port->id)); 1498 port->stats.rx_len[i] += ((u64)val << 32); 1499 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_L(port->id)); 1500 port->stats.rx_len[i++] += val; 1501 1502 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_H(port->id)); 1503 port->stats.rx_len[i] += ((u64)val << 32); 1504 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_L(port->id)); 1505 port->stats.rx_len[i++] += val; 1506 1507 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_H(port->id)); 1508 port->stats.rx_len[i] += ((u64)val << 32); 1509 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_L(port->id)); 1510 port->stats.rx_len[i++] += val; 1511 1512 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_H(port->id)); 1513 port->stats.rx_len[i] += ((u64)val << 32); 1514 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_L(port->id)); 1515 port->stats.rx_len[i++] += val; 1516 1517 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_LONG_CNT(port->id)); 1518 port->stats.rx_len[i++] += val; 1519 1520 /* reset mib counters */ 1521 airoha_fe_set(eth, REG_FE_GDM_MIB_CLEAR(port->id), 1522 FE_GDM_MIB_RX_CLEAR_MASK | FE_GDM_MIB_TX_CLEAR_MASK); 1523 1524 u64_stats_update_end(&port->stats.syncp); 1525 spin_unlock(&port->stats.lock); 1526 } 1527 1528 static int airoha_dev_open(struct net_device *dev) 1529 { 1530 int err, len = ETH_HLEN + dev->mtu + ETH_FCS_LEN; 1531 struct airoha_gdm_port *port = netdev_priv(dev); 1532 struct airoha_qdma *qdma = port->qdma; 1533 1534 netif_tx_start_all_queues(dev); 1535 err = airoha_set_vip_for_gdm_port(port, true); 1536 if (err) 1537 return err; 1538 1539 if (netdev_uses_dsa(dev)) 1540 airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id), 1541 GDM_STAG_EN_MASK); 1542 else 1543 airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id), 1544 GDM_STAG_EN_MASK); 1545 1546 airoha_fe_rmw(qdma->eth, REG_GDM_LEN_CFG(port->id), 1547 GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK, 1548 FIELD_PREP(GDM_SHORT_LEN_MASK, 60) | 1549 FIELD_PREP(GDM_LONG_LEN_MASK, len)); 1550 1551 airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG, 1552 GLOBAL_CFG_TX_DMA_EN_MASK | 1553 GLOBAL_CFG_RX_DMA_EN_MASK); 1554 atomic_inc(&qdma->users); 1555 1556 return 0; 1557 } 1558 1559 static int airoha_dev_stop(struct net_device *dev) 1560 { 1561 struct airoha_gdm_port *port = netdev_priv(dev); 1562 struct airoha_qdma *qdma = port->qdma; 1563 int i, err; 1564 1565 netif_tx_disable(dev); 1566 err = airoha_set_vip_for_gdm_port(port, false); 1567 if (err) 1568 return err; 1569 1570 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) 1571 netdev_tx_reset_subqueue(dev, i); 1572 1573 if (atomic_dec_and_test(&qdma->users)) { 1574 airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG, 1575 GLOBAL_CFG_TX_DMA_EN_MASK | 1576 GLOBAL_CFG_RX_DMA_EN_MASK); 1577 1578 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { 1579 if (!qdma->q_tx[i].ndesc) 1580 continue; 1581 1582 airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]); 1583 } 1584 } 1585 1586 return 0; 1587 } 1588 1589 static int airoha_dev_set_macaddr(struct net_device *dev, void *p) 1590 { 1591 struct airoha_gdm_port *port = netdev_priv(dev); 1592 int err; 1593 1594 err = eth_mac_addr(dev, p); 1595 if (err) 1596 return err; 1597 1598 airoha_set_macaddr(port, dev->dev_addr); 1599 1600 return 0; 1601 } 1602 1603 static void airhoha_set_gdm2_loopback(struct airoha_gdm_port *port) 1604 { 1605 u32 pse_port = port->id == 3 ? FE_PSE_PORT_GDM3 : FE_PSE_PORT_GDM4; 1606 struct airoha_eth *eth = port->qdma->eth; 1607 u32 chan = port->id == 3 ? 4 : 0; 1608 1609 /* Forward the traffic to the proper GDM port */ 1610 airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(2), pse_port); 1611 airoha_fe_clear(eth, REG_GDM_FWD_CFG(2), GDM_STRIP_CRC); 1612 1613 /* Enable GDM2 loopback */ 1614 airoha_fe_wr(eth, REG_GDM_TXCHN_EN(2), 0xffffffff); 1615 airoha_fe_wr(eth, REG_GDM_RXCHN_EN(2), 0xffff); 1616 airoha_fe_rmw(eth, REG_GDM_LPBK_CFG(2), 1617 LPBK_CHAN_MASK | LPBK_MODE_MASK | LPBK_EN_MASK, 1618 FIELD_PREP(LPBK_CHAN_MASK, chan) | LPBK_EN_MASK); 1619 airoha_fe_rmw(eth, REG_GDM_LEN_CFG(2), 1620 GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK, 1621 FIELD_PREP(GDM_SHORT_LEN_MASK, 60) | 1622 FIELD_PREP(GDM_LONG_LEN_MASK, AIROHA_MAX_MTU)); 1623 1624 /* Disable VIP and IFC for GDM2 */ 1625 airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, BIT(2)); 1626 airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, BIT(2)); 1627 1628 if (port->id == 3) { 1629 /* FIXME: handle XSI_PCE1_PORT */ 1630 airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(0), 0x5500); 1631 airoha_fe_rmw(eth, REG_FE_WAN_PORT, 1632 WAN1_EN_MASK | WAN1_MASK | WAN0_MASK, 1633 FIELD_PREP(WAN0_MASK, HSGMII_LAN_PCIE0_SRCPORT)); 1634 airoha_fe_rmw(eth, 1635 REG_SP_DFT_CPORT(HSGMII_LAN_PCIE0_SRCPORT >> 3), 1636 SP_CPORT_PCIE0_MASK, 1637 FIELD_PREP(SP_CPORT_PCIE0_MASK, 1638 FE_PSE_PORT_CDM2)); 1639 } else { 1640 /* FIXME: handle XSI_USB_PORT */ 1641 airoha_fe_rmw(eth, REG_SRC_PORT_FC_MAP6, 1642 FC_ID_OF_SRC_PORT24_MASK, 1643 FIELD_PREP(FC_ID_OF_SRC_PORT24_MASK, 2)); 1644 airoha_fe_rmw(eth, REG_FE_WAN_PORT, 1645 WAN1_EN_MASK | WAN1_MASK | WAN0_MASK, 1646 FIELD_PREP(WAN0_MASK, HSGMII_LAN_ETH_SRCPORT)); 1647 airoha_fe_rmw(eth, 1648 REG_SP_DFT_CPORT(HSGMII_LAN_ETH_SRCPORT >> 3), 1649 SP_CPORT_ETH_MASK, 1650 FIELD_PREP(SP_CPORT_ETH_MASK, FE_PSE_PORT_CDM2)); 1651 } 1652 } 1653 1654 static int airoha_dev_init(struct net_device *dev) 1655 { 1656 struct airoha_gdm_port *port = netdev_priv(dev); 1657 struct airoha_eth *eth = port->qdma->eth; 1658 u32 pse_port; 1659 1660 airoha_set_macaddr(port, dev->dev_addr); 1661 1662 switch (port->id) { 1663 case 3: 1664 case 4: 1665 /* If GDM2 is active we can't enable loopback */ 1666 if (!eth->ports[1]) 1667 airhoha_set_gdm2_loopback(port); 1668 fallthrough; 1669 case 2: 1670 pse_port = FE_PSE_PORT_PPE2; 1671 break; 1672 default: 1673 pse_port = FE_PSE_PORT_PPE1; 1674 break; 1675 } 1676 1677 airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(port->id), pse_port); 1678 1679 return 0; 1680 } 1681 1682 static void airoha_dev_get_stats64(struct net_device *dev, 1683 struct rtnl_link_stats64 *storage) 1684 { 1685 struct airoha_gdm_port *port = netdev_priv(dev); 1686 unsigned int start; 1687 1688 airoha_update_hw_stats(port); 1689 do { 1690 start = u64_stats_fetch_begin(&port->stats.syncp); 1691 storage->rx_packets = port->stats.rx_ok_pkts; 1692 storage->tx_packets = port->stats.tx_ok_pkts; 1693 storage->rx_bytes = port->stats.rx_ok_bytes; 1694 storage->tx_bytes = port->stats.tx_ok_bytes; 1695 storage->multicast = port->stats.rx_multicast; 1696 storage->rx_errors = port->stats.rx_errors; 1697 storage->rx_dropped = port->stats.rx_drops; 1698 storage->tx_dropped = port->stats.tx_drops; 1699 storage->rx_crc_errors = port->stats.rx_crc_error; 1700 storage->rx_over_errors = port->stats.rx_over_errors; 1701 } while (u64_stats_fetch_retry(&port->stats.syncp, start)); 1702 } 1703 1704 static int airoha_dev_change_mtu(struct net_device *dev, int mtu) 1705 { 1706 struct airoha_gdm_port *port = netdev_priv(dev); 1707 struct airoha_eth *eth = port->qdma->eth; 1708 u32 len = ETH_HLEN + mtu + ETH_FCS_LEN; 1709 1710 airoha_fe_rmw(eth, REG_GDM_LEN_CFG(port->id), 1711 GDM_LONG_LEN_MASK, 1712 FIELD_PREP(GDM_LONG_LEN_MASK, len)); 1713 WRITE_ONCE(dev->mtu, mtu); 1714 1715 return 0; 1716 } 1717 1718 static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb, 1719 struct net_device *sb_dev) 1720 { 1721 struct airoha_gdm_port *port = netdev_priv(dev); 1722 int queue, channel; 1723 1724 /* For dsa device select QoS channel according to the dsa user port 1725 * index, rely on port id otherwise. Select QoS queue based on the 1726 * skb priority. 1727 */ 1728 channel = netdev_uses_dsa(dev) ? skb_get_queue_mapping(skb) : port->id; 1729 channel = channel % AIROHA_NUM_QOS_CHANNELS; 1730 queue = (skb->priority - 1) % AIROHA_NUM_QOS_QUEUES; /* QoS queue */ 1731 queue = channel * AIROHA_NUM_QOS_QUEUES + queue; 1732 1733 return queue < dev->num_tx_queues ? queue : 0; 1734 } 1735 1736 static u32 airoha_get_dsa_tag(struct sk_buff *skb, struct net_device *dev) 1737 { 1738 #if IS_ENABLED(CONFIG_NET_DSA) 1739 struct ethhdr *ehdr; 1740 u8 xmit_tpid; 1741 u16 tag; 1742 1743 if (!netdev_uses_dsa(dev)) 1744 return 0; 1745 1746 if (dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK) 1747 return 0; 1748 1749 if (skb_cow_head(skb, 0)) 1750 return 0; 1751 1752 ehdr = (struct ethhdr *)skb->data; 1753 tag = be16_to_cpu(ehdr->h_proto); 1754 xmit_tpid = tag >> 8; 1755 1756 switch (xmit_tpid) { 1757 case MTK_HDR_XMIT_TAGGED_TPID_8100: 1758 ehdr->h_proto = cpu_to_be16(ETH_P_8021Q); 1759 tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_8100 << 8); 1760 break; 1761 case MTK_HDR_XMIT_TAGGED_TPID_88A8: 1762 ehdr->h_proto = cpu_to_be16(ETH_P_8021AD); 1763 tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_88A8 << 8); 1764 break; 1765 default: 1766 /* PPE module requires untagged DSA packets to work properly, 1767 * so move DSA tag to DMA descriptor. 1768 */ 1769 memmove(skb->data + MTK_HDR_LEN, skb->data, 2 * ETH_ALEN); 1770 __skb_pull(skb, MTK_HDR_LEN); 1771 break; 1772 } 1773 1774 return tag; 1775 #else 1776 return 0; 1777 #endif 1778 } 1779 1780 static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, 1781 struct net_device *dev) 1782 { 1783 struct airoha_gdm_port *port = netdev_priv(dev); 1784 struct airoha_qdma *qdma = port->qdma; 1785 u32 nr_frags, tag, msg0, msg1, len; 1786 struct netdev_queue *txq; 1787 struct airoha_queue *q; 1788 void *data; 1789 int i, qid; 1790 u16 index; 1791 u8 fport; 1792 1793 qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx); 1794 tag = airoha_get_dsa_tag(skb, dev); 1795 1796 msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK, 1797 qid / AIROHA_NUM_QOS_QUEUES) | 1798 FIELD_PREP(QDMA_ETH_TXMSG_QUEUE_MASK, 1799 qid % AIROHA_NUM_QOS_QUEUES) | 1800 FIELD_PREP(QDMA_ETH_TXMSG_SP_TAG_MASK, tag); 1801 if (skb->ip_summed == CHECKSUM_PARTIAL) 1802 msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) | 1803 FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) | 1804 FIELD_PREP(QDMA_ETH_TXMSG_ICO_MASK, 1); 1805 1806 /* TSO: fill MSS info in tcp checksum field */ 1807 if (skb_is_gso(skb)) { 1808 if (skb_cow_head(skb, 0)) 1809 goto error; 1810 1811 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | 1812 SKB_GSO_TCPV6)) { 1813 __be16 csum = cpu_to_be16(skb_shinfo(skb)->gso_size); 1814 1815 tcp_hdr(skb)->check = (__force __sum16)csum; 1816 msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1); 1817 } 1818 } 1819 1820 fport = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id; 1821 msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) | 1822 FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f); 1823 1824 q = &qdma->q_tx[qid]; 1825 if (WARN_ON_ONCE(!q->ndesc)) 1826 goto error; 1827 1828 spin_lock_bh(&q->lock); 1829 1830 txq = netdev_get_tx_queue(dev, qid); 1831 nr_frags = 1 + skb_shinfo(skb)->nr_frags; 1832 1833 if (q->queued + nr_frags > q->ndesc) { 1834 /* not enough space in the queue */ 1835 netif_tx_stop_queue(txq); 1836 spin_unlock_bh(&q->lock); 1837 return NETDEV_TX_BUSY; 1838 } 1839 1840 len = skb_headlen(skb); 1841 data = skb->data; 1842 index = q->head; 1843 1844 for (i = 0; i < nr_frags; i++) { 1845 struct airoha_qdma_desc *desc = &q->desc[index]; 1846 struct airoha_queue_entry *e = &q->entry[index]; 1847 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1848 dma_addr_t addr; 1849 u32 val; 1850 1851 addr = dma_map_single(dev->dev.parent, data, len, 1852 DMA_TO_DEVICE); 1853 if (unlikely(dma_mapping_error(dev->dev.parent, addr))) 1854 goto error_unmap; 1855 1856 index = (index + 1) % q->ndesc; 1857 1858 val = FIELD_PREP(QDMA_DESC_LEN_MASK, len); 1859 if (i < nr_frags - 1) 1860 val |= FIELD_PREP(QDMA_DESC_MORE_MASK, 1); 1861 WRITE_ONCE(desc->ctrl, cpu_to_le32(val)); 1862 WRITE_ONCE(desc->addr, cpu_to_le32(addr)); 1863 val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, index); 1864 WRITE_ONCE(desc->data, cpu_to_le32(val)); 1865 WRITE_ONCE(desc->msg0, cpu_to_le32(msg0)); 1866 WRITE_ONCE(desc->msg1, cpu_to_le32(msg1)); 1867 WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff)); 1868 1869 e->skb = i ? NULL : skb; 1870 e->dma_addr = addr; 1871 e->dma_len = len; 1872 1873 data = skb_frag_address(frag); 1874 len = skb_frag_size(frag); 1875 } 1876 1877 q->head = index; 1878 q->queued += i; 1879 1880 skb_tx_timestamp(skb); 1881 netdev_tx_sent_queue(txq, skb->len); 1882 1883 if (netif_xmit_stopped(txq) || !netdev_xmit_more()) 1884 airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), 1885 TX_RING_CPU_IDX_MASK, 1886 FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head)); 1887 1888 if (q->ndesc - q->queued < q->free_thr) 1889 netif_tx_stop_queue(txq); 1890 1891 spin_unlock_bh(&q->lock); 1892 1893 return NETDEV_TX_OK; 1894 1895 error_unmap: 1896 for (i--; i >= 0; i--) { 1897 index = (q->head + i) % q->ndesc; 1898 dma_unmap_single(dev->dev.parent, q->entry[index].dma_addr, 1899 q->entry[index].dma_len, DMA_TO_DEVICE); 1900 } 1901 1902 spin_unlock_bh(&q->lock); 1903 error: 1904 dev_kfree_skb_any(skb); 1905 dev->stats.tx_dropped++; 1906 1907 return NETDEV_TX_OK; 1908 } 1909 1910 static void airoha_ethtool_get_drvinfo(struct net_device *dev, 1911 struct ethtool_drvinfo *info) 1912 { 1913 struct airoha_gdm_port *port = netdev_priv(dev); 1914 struct airoha_eth *eth = port->qdma->eth; 1915 1916 strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver)); 1917 strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info)); 1918 } 1919 1920 static void airoha_ethtool_get_mac_stats(struct net_device *dev, 1921 struct ethtool_eth_mac_stats *stats) 1922 { 1923 struct airoha_gdm_port *port = netdev_priv(dev); 1924 unsigned int start; 1925 1926 airoha_update_hw_stats(port); 1927 do { 1928 start = u64_stats_fetch_begin(&port->stats.syncp); 1929 stats->MulticastFramesXmittedOK = port->stats.tx_multicast; 1930 stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast; 1931 stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast; 1932 } while (u64_stats_fetch_retry(&port->stats.syncp, start)); 1933 } 1934 1935 static const struct ethtool_rmon_hist_range airoha_ethtool_rmon_ranges[] = { 1936 { 0, 64 }, 1937 { 65, 127 }, 1938 { 128, 255 }, 1939 { 256, 511 }, 1940 { 512, 1023 }, 1941 { 1024, 1518 }, 1942 { 1519, 10239 }, 1943 {}, 1944 }; 1945 1946 static void 1947 airoha_ethtool_get_rmon_stats(struct net_device *dev, 1948 struct ethtool_rmon_stats *stats, 1949 const struct ethtool_rmon_hist_range **ranges) 1950 { 1951 struct airoha_gdm_port *port = netdev_priv(dev); 1952 struct airoha_hw_stats *hw_stats = &port->stats; 1953 unsigned int start; 1954 1955 BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) != 1956 ARRAY_SIZE(hw_stats->tx_len) + 1); 1957 BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) != 1958 ARRAY_SIZE(hw_stats->rx_len) + 1); 1959 1960 *ranges = airoha_ethtool_rmon_ranges; 1961 airoha_update_hw_stats(port); 1962 do { 1963 int i; 1964 1965 start = u64_stats_fetch_begin(&port->stats.syncp); 1966 stats->fragments = hw_stats->rx_fragment; 1967 stats->jabbers = hw_stats->rx_jabber; 1968 for (i = 0; i < ARRAY_SIZE(airoha_ethtool_rmon_ranges) - 1; 1969 i++) { 1970 stats->hist[i] = hw_stats->rx_len[i]; 1971 stats->hist_tx[i] = hw_stats->tx_len[i]; 1972 } 1973 } while (u64_stats_fetch_retry(&port->stats.syncp, start)); 1974 } 1975 1976 static int airoha_qdma_set_chan_tx_sched(struct airoha_gdm_port *port, 1977 int channel, enum tx_sched_mode mode, 1978 const u16 *weights, u8 n_weights) 1979 { 1980 int i; 1981 1982 for (i = 0; i < AIROHA_NUM_TX_RING; i++) 1983 airoha_qdma_clear(port->qdma, REG_QUEUE_CLOSE_CFG(channel), 1984 TXQ_DISABLE_CHAN_QUEUE_MASK(channel, i)); 1985 1986 for (i = 0; i < n_weights; i++) { 1987 u32 status; 1988 int err; 1989 1990 airoha_qdma_wr(port->qdma, REG_TXWRR_WEIGHT_CFG, 1991 TWRR_RW_CMD_MASK | 1992 FIELD_PREP(TWRR_CHAN_IDX_MASK, channel) | 1993 FIELD_PREP(TWRR_QUEUE_IDX_MASK, i) | 1994 FIELD_PREP(TWRR_VALUE_MASK, weights[i])); 1995 err = read_poll_timeout(airoha_qdma_rr, status, 1996 status & TWRR_RW_CMD_DONE, 1997 USEC_PER_MSEC, 10 * USEC_PER_MSEC, 1998 true, port->qdma, 1999 REG_TXWRR_WEIGHT_CFG); 2000 if (err) 2001 return err; 2002 } 2003 2004 airoha_qdma_rmw(port->qdma, REG_CHAN_QOS_MODE(channel >> 3), 2005 CHAN_QOS_MODE_MASK(channel), 2006 mode << __ffs(CHAN_QOS_MODE_MASK(channel))); 2007 2008 return 0; 2009 } 2010 2011 static int airoha_qdma_set_tx_prio_sched(struct airoha_gdm_port *port, 2012 int channel) 2013 { 2014 static const u16 w[AIROHA_NUM_QOS_QUEUES] = {}; 2015 2016 return airoha_qdma_set_chan_tx_sched(port, channel, TC_SCH_SP, w, 2017 ARRAY_SIZE(w)); 2018 } 2019 2020 static int airoha_qdma_set_tx_ets_sched(struct airoha_gdm_port *port, 2021 int channel, 2022 struct tc_ets_qopt_offload *opt) 2023 { 2024 struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params; 2025 enum tx_sched_mode mode = TC_SCH_SP; 2026 u16 w[AIROHA_NUM_QOS_QUEUES] = {}; 2027 int i, nstrict = 0; 2028 2029 if (p->bands > AIROHA_NUM_QOS_QUEUES) 2030 return -EINVAL; 2031 2032 for (i = 0; i < p->bands; i++) { 2033 if (!p->quanta[i]) 2034 nstrict++; 2035 } 2036 2037 /* this configuration is not supported by the hw */ 2038 if (nstrict == AIROHA_NUM_QOS_QUEUES - 1) 2039 return -EINVAL; 2040 2041 /* EN7581 SoC supports fixed QoS band priority where WRR queues have 2042 * lowest priorities with respect to SP ones. 2043 * e.g: WRR0, WRR1, .., WRRm, SP0, SP1, .., SPn 2044 */ 2045 for (i = 0; i < nstrict; i++) { 2046 if (p->priomap[p->bands - i - 1] != i) 2047 return -EINVAL; 2048 } 2049 2050 for (i = 0; i < p->bands - nstrict; i++) { 2051 if (p->priomap[i] != nstrict + i) 2052 return -EINVAL; 2053 2054 w[i] = p->weights[nstrict + i]; 2055 } 2056 2057 if (!nstrict) 2058 mode = TC_SCH_WRR8; 2059 else if (nstrict < AIROHA_NUM_QOS_QUEUES - 1) 2060 mode = nstrict + 1; 2061 2062 return airoha_qdma_set_chan_tx_sched(port, channel, mode, w, 2063 ARRAY_SIZE(w)); 2064 } 2065 2066 static int airoha_qdma_get_tx_ets_stats(struct airoha_gdm_port *port, 2067 int channel, 2068 struct tc_ets_qopt_offload *opt) 2069 { 2070 u64 cpu_tx_packets = airoha_qdma_rr(port->qdma, 2071 REG_CNTR_VAL(channel << 1)); 2072 u64 fwd_tx_packets = airoha_qdma_rr(port->qdma, 2073 REG_CNTR_VAL((channel << 1) + 1)); 2074 u64 tx_packets = (cpu_tx_packets - port->cpu_tx_packets) + 2075 (fwd_tx_packets - port->fwd_tx_packets); 2076 _bstats_update(opt->stats.bstats, 0, tx_packets); 2077 2078 port->cpu_tx_packets = cpu_tx_packets; 2079 port->fwd_tx_packets = fwd_tx_packets; 2080 2081 return 0; 2082 } 2083 2084 static int airoha_tc_setup_qdisc_ets(struct airoha_gdm_port *port, 2085 struct tc_ets_qopt_offload *opt) 2086 { 2087 int channel; 2088 2089 if (opt->parent == TC_H_ROOT) 2090 return -EINVAL; 2091 2092 channel = TC_H_MAJ(opt->handle) >> 16; 2093 channel = channel % AIROHA_NUM_QOS_CHANNELS; 2094 2095 switch (opt->command) { 2096 case TC_ETS_REPLACE: 2097 return airoha_qdma_set_tx_ets_sched(port, channel, opt); 2098 case TC_ETS_DESTROY: 2099 /* PRIO is default qdisc scheduler */ 2100 return airoha_qdma_set_tx_prio_sched(port, channel); 2101 case TC_ETS_STATS: 2102 return airoha_qdma_get_tx_ets_stats(port, channel, opt); 2103 default: 2104 return -EOPNOTSUPP; 2105 } 2106 } 2107 2108 static int airoha_qdma_get_trtcm_param(struct airoha_qdma *qdma, int channel, 2109 u32 addr, enum trtcm_param_type param, 2110 enum trtcm_mode_type mode, 2111 u32 *val_low, u32 *val_high) 2112 { 2113 u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel); 2114 u32 val, config = FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) | 2115 FIELD_PREP(TRTCM_METER_GROUP_MASK, group) | 2116 FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) | 2117 FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode); 2118 2119 airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config); 2120 if (read_poll_timeout(airoha_qdma_rr, val, 2121 val & TRTCM_PARAM_RW_DONE_MASK, 2122 USEC_PER_MSEC, 10 * USEC_PER_MSEC, true, 2123 qdma, REG_TRTCM_CFG_PARAM(addr))) 2124 return -ETIMEDOUT; 2125 2126 *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr)); 2127 if (val_high) 2128 *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr)); 2129 2130 return 0; 2131 } 2132 2133 static int airoha_qdma_set_trtcm_param(struct airoha_qdma *qdma, int channel, 2134 u32 addr, enum trtcm_param_type param, 2135 enum trtcm_mode_type mode, u32 val) 2136 { 2137 u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel); 2138 u32 config = TRTCM_PARAM_RW_MASK | 2139 FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) | 2140 FIELD_PREP(TRTCM_METER_GROUP_MASK, group) | 2141 FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) | 2142 FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode); 2143 2144 airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val); 2145 airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config); 2146 2147 return read_poll_timeout(airoha_qdma_rr, val, 2148 val & TRTCM_PARAM_RW_DONE_MASK, 2149 USEC_PER_MSEC, 10 * USEC_PER_MSEC, true, 2150 qdma, REG_TRTCM_CFG_PARAM(addr)); 2151 } 2152 2153 static int airoha_qdma_set_trtcm_config(struct airoha_qdma *qdma, int channel, 2154 u32 addr, enum trtcm_mode_type mode, 2155 bool enable, u32 enable_mask) 2156 { 2157 u32 val; 2158 2159 if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE, 2160 mode, &val, NULL)) 2161 return -EINVAL; 2162 2163 val = enable ? val | enable_mask : val & ~enable_mask; 2164 2165 return airoha_qdma_set_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE, 2166 mode, val); 2167 } 2168 2169 static int airoha_qdma_set_trtcm_token_bucket(struct airoha_qdma *qdma, 2170 int channel, u32 addr, 2171 enum trtcm_mode_type mode, 2172 u32 rate_val, u32 bucket_size) 2173 { 2174 u32 val, config, tick, unit, rate, rate_frac; 2175 int err; 2176 2177 if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE, 2178 mode, &config, NULL)) 2179 return -EINVAL; 2180 2181 val = airoha_qdma_rr(qdma, addr); 2182 tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val); 2183 if (config & TRTCM_TICK_SEL) 2184 tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val); 2185 if (!tick) 2186 return -EINVAL; 2187 2188 unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick; 2189 if (!unit) 2190 return -EINVAL; 2191 2192 rate = rate_val / unit; 2193 rate_frac = rate_val % unit; 2194 rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit; 2195 rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) | 2196 FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac); 2197 2198 err = airoha_qdma_set_trtcm_param(qdma, channel, addr, 2199 TRTCM_TOKEN_RATE_MODE, mode, rate); 2200 if (err) 2201 return err; 2202 2203 val = max_t(u32, bucket_size, MIN_TOKEN_SIZE); 2204 val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET); 2205 2206 return airoha_qdma_set_trtcm_param(qdma, channel, addr, 2207 TRTCM_BUCKETSIZE_SHIFT_MODE, 2208 mode, val); 2209 } 2210 2211 static int airoha_qdma_set_tx_rate_limit(struct airoha_gdm_port *port, 2212 int channel, u32 rate, 2213 u32 bucket_size) 2214 { 2215 int i, err; 2216 2217 for (i = 0; i <= TRTCM_PEAK_MODE; i++) { 2218 err = airoha_qdma_set_trtcm_config(port->qdma, channel, 2219 REG_EGRESS_TRTCM_CFG, i, 2220 !!rate, TRTCM_METER_MODE); 2221 if (err) 2222 return err; 2223 2224 err = airoha_qdma_set_trtcm_token_bucket(port->qdma, channel, 2225 REG_EGRESS_TRTCM_CFG, 2226 i, rate, bucket_size); 2227 if (err) 2228 return err; 2229 } 2230 2231 return 0; 2232 } 2233 2234 static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port, 2235 struct tc_htb_qopt_offload *opt) 2236 { 2237 u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS; 2238 u32 rate = div_u64(opt->rate, 1000) << 3; /* kbps */ 2239 struct net_device *dev = port->dev; 2240 int num_tx_queues = dev->real_num_tx_queues; 2241 int err; 2242 2243 if (opt->parent_classid != TC_HTB_CLASSID_ROOT) { 2244 NL_SET_ERR_MSG_MOD(opt->extack, "invalid parent classid"); 2245 return -EINVAL; 2246 } 2247 2248 err = airoha_qdma_set_tx_rate_limit(port, channel, rate, opt->quantum); 2249 if (err) { 2250 NL_SET_ERR_MSG_MOD(opt->extack, 2251 "failed configuring htb offload"); 2252 return err; 2253 } 2254 2255 if (opt->command == TC_HTB_NODE_MODIFY) 2256 return 0; 2257 2258 err = netif_set_real_num_tx_queues(dev, num_tx_queues + 1); 2259 if (err) { 2260 airoha_qdma_set_tx_rate_limit(port, channel, 0, opt->quantum); 2261 NL_SET_ERR_MSG_MOD(opt->extack, 2262 "failed setting real_num_tx_queues"); 2263 return err; 2264 } 2265 2266 set_bit(channel, port->qos_sq_bmap); 2267 opt->qid = AIROHA_NUM_TX_RING + channel; 2268 2269 return 0; 2270 } 2271 2272 static int airoha_dev_setup_tc_block(struct airoha_gdm_port *port, 2273 struct flow_block_offload *f) 2274 { 2275 flow_setup_cb_t *cb = airoha_ppe_setup_tc_block_cb; 2276 static LIST_HEAD(block_cb_list); 2277 struct flow_block_cb *block_cb; 2278 2279 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 2280 return -EOPNOTSUPP; 2281 2282 f->driver_block_list = &block_cb_list; 2283 switch (f->command) { 2284 case FLOW_BLOCK_BIND: 2285 block_cb = flow_block_cb_lookup(f->block, cb, port->dev); 2286 if (block_cb) { 2287 flow_block_cb_incref(block_cb); 2288 return 0; 2289 } 2290 block_cb = flow_block_cb_alloc(cb, port->dev, port->dev, NULL); 2291 if (IS_ERR(block_cb)) 2292 return PTR_ERR(block_cb); 2293 2294 flow_block_cb_incref(block_cb); 2295 flow_block_cb_add(block_cb, f); 2296 list_add_tail(&block_cb->driver_list, &block_cb_list); 2297 return 0; 2298 case FLOW_BLOCK_UNBIND: 2299 block_cb = flow_block_cb_lookup(f->block, cb, port->dev); 2300 if (!block_cb) 2301 return -ENOENT; 2302 2303 if (!flow_block_cb_decref(block_cb)) { 2304 flow_block_cb_remove(block_cb, f); 2305 list_del(&block_cb->driver_list); 2306 } 2307 return 0; 2308 default: 2309 return -EOPNOTSUPP; 2310 } 2311 } 2312 2313 static void airoha_tc_remove_htb_queue(struct airoha_gdm_port *port, int queue) 2314 { 2315 struct net_device *dev = port->dev; 2316 2317 netif_set_real_num_tx_queues(dev, dev->real_num_tx_queues - 1); 2318 airoha_qdma_set_tx_rate_limit(port, queue + 1, 0, 0); 2319 clear_bit(queue, port->qos_sq_bmap); 2320 } 2321 2322 static int airoha_tc_htb_delete_leaf_queue(struct airoha_gdm_port *port, 2323 struct tc_htb_qopt_offload *opt) 2324 { 2325 u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS; 2326 2327 if (!test_bit(channel, port->qos_sq_bmap)) { 2328 NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id"); 2329 return -EINVAL; 2330 } 2331 2332 airoha_tc_remove_htb_queue(port, channel); 2333 2334 return 0; 2335 } 2336 2337 static int airoha_tc_htb_destroy(struct airoha_gdm_port *port) 2338 { 2339 int q; 2340 2341 for_each_set_bit(q, port->qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS) 2342 airoha_tc_remove_htb_queue(port, q); 2343 2344 return 0; 2345 } 2346 2347 static int airoha_tc_get_htb_get_leaf_queue(struct airoha_gdm_port *port, 2348 struct tc_htb_qopt_offload *opt) 2349 { 2350 u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS; 2351 2352 if (!test_bit(channel, port->qos_sq_bmap)) { 2353 NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id"); 2354 return -EINVAL; 2355 } 2356 2357 opt->qid = AIROHA_NUM_TX_RING + channel; 2358 2359 return 0; 2360 } 2361 2362 static int airoha_tc_setup_qdisc_htb(struct airoha_gdm_port *port, 2363 struct tc_htb_qopt_offload *opt) 2364 { 2365 switch (opt->command) { 2366 case TC_HTB_CREATE: 2367 break; 2368 case TC_HTB_DESTROY: 2369 return airoha_tc_htb_destroy(port); 2370 case TC_HTB_NODE_MODIFY: 2371 case TC_HTB_LEAF_ALLOC_QUEUE: 2372 return airoha_tc_htb_alloc_leaf_queue(port, opt); 2373 case TC_HTB_LEAF_DEL: 2374 case TC_HTB_LEAF_DEL_LAST: 2375 case TC_HTB_LEAF_DEL_LAST_FORCE: 2376 return airoha_tc_htb_delete_leaf_queue(port, opt); 2377 case TC_HTB_LEAF_QUERY_QUEUE: 2378 return airoha_tc_get_htb_get_leaf_queue(port, opt); 2379 default: 2380 return -EOPNOTSUPP; 2381 } 2382 2383 return 0; 2384 } 2385 2386 static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type, 2387 void *type_data) 2388 { 2389 struct airoha_gdm_port *port = netdev_priv(dev); 2390 2391 switch (type) { 2392 case TC_SETUP_QDISC_ETS: 2393 return airoha_tc_setup_qdisc_ets(port, type_data); 2394 case TC_SETUP_QDISC_HTB: 2395 return airoha_tc_setup_qdisc_htb(port, type_data); 2396 case TC_SETUP_BLOCK: 2397 case TC_SETUP_FT: 2398 return airoha_dev_setup_tc_block(port, type_data); 2399 default: 2400 return -EOPNOTSUPP; 2401 } 2402 } 2403 2404 static const struct net_device_ops airoha_netdev_ops = { 2405 .ndo_init = airoha_dev_init, 2406 .ndo_open = airoha_dev_open, 2407 .ndo_stop = airoha_dev_stop, 2408 .ndo_change_mtu = airoha_dev_change_mtu, 2409 .ndo_select_queue = airoha_dev_select_queue, 2410 .ndo_start_xmit = airoha_dev_xmit, 2411 .ndo_get_stats64 = airoha_dev_get_stats64, 2412 .ndo_set_mac_address = airoha_dev_set_macaddr, 2413 .ndo_setup_tc = airoha_dev_tc_setup, 2414 }; 2415 2416 static const struct ethtool_ops airoha_ethtool_ops = { 2417 .get_drvinfo = airoha_ethtool_get_drvinfo, 2418 .get_eth_mac_stats = airoha_ethtool_get_mac_stats, 2419 .get_rmon_stats = airoha_ethtool_get_rmon_stats, 2420 }; 2421 2422 static int airoha_metadata_dst_alloc(struct airoha_gdm_port *port) 2423 { 2424 int i; 2425 2426 for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) { 2427 struct metadata_dst *md_dst; 2428 2429 md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, 2430 GFP_KERNEL); 2431 if (!md_dst) 2432 return -ENOMEM; 2433 2434 md_dst->u.port_info.port_id = i; 2435 port->dsa_meta[i] = md_dst; 2436 } 2437 2438 return 0; 2439 } 2440 2441 static void airoha_metadata_dst_free(struct airoha_gdm_port *port) 2442 { 2443 int i; 2444 2445 for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) { 2446 if (!port->dsa_meta[i]) 2447 continue; 2448 2449 metadata_dst_free(port->dsa_meta[i]); 2450 } 2451 } 2452 2453 bool airoha_is_valid_gdm_port(struct airoha_eth *eth, 2454 struct airoha_gdm_port *port) 2455 { 2456 int i; 2457 2458 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { 2459 if (eth->ports[i] == port) 2460 return true; 2461 } 2462 2463 return false; 2464 } 2465 2466 static int airoha_alloc_gdm_port(struct airoha_eth *eth, 2467 struct device_node *np, int index) 2468 { 2469 const __be32 *id_ptr = of_get_property(np, "reg", NULL); 2470 struct airoha_gdm_port *port; 2471 struct airoha_qdma *qdma; 2472 struct net_device *dev; 2473 int err, p; 2474 u32 id; 2475 2476 if (!id_ptr) { 2477 dev_err(eth->dev, "missing gdm port id\n"); 2478 return -EINVAL; 2479 } 2480 2481 id = be32_to_cpup(id_ptr); 2482 p = id - 1; 2483 2484 if (!id || id > ARRAY_SIZE(eth->ports)) { 2485 dev_err(eth->dev, "invalid gdm port id: %d\n", id); 2486 return -EINVAL; 2487 } 2488 2489 if (eth->ports[p]) { 2490 dev_err(eth->dev, "duplicate gdm port id: %d\n", id); 2491 return -EINVAL; 2492 } 2493 2494 dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port), 2495 AIROHA_NUM_NETDEV_TX_RINGS, 2496 AIROHA_NUM_RX_RING); 2497 if (!dev) { 2498 dev_err(eth->dev, "alloc_etherdev failed\n"); 2499 return -ENOMEM; 2500 } 2501 2502 qdma = ð->qdma[index % AIROHA_MAX_NUM_QDMA]; 2503 dev->netdev_ops = &airoha_netdev_ops; 2504 dev->ethtool_ops = &airoha_ethtool_ops; 2505 dev->max_mtu = AIROHA_MAX_MTU; 2506 dev->watchdog_timeo = 5 * HZ; 2507 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 2508 NETIF_F_TSO6 | NETIF_F_IPV6_CSUM | 2509 NETIF_F_SG | NETIF_F_TSO | 2510 NETIF_F_HW_TC; 2511 dev->features |= dev->hw_features; 2512 dev->vlan_features = dev->hw_features; 2513 dev->dev.of_node = np; 2514 dev->irq = qdma->irq; 2515 SET_NETDEV_DEV(dev, eth->dev); 2516 2517 /* reserve hw queues for HTB offloading */ 2518 err = netif_set_real_num_tx_queues(dev, AIROHA_NUM_TX_RING); 2519 if (err) 2520 return err; 2521 2522 err = of_get_ethdev_address(np, dev); 2523 if (err) { 2524 if (err == -EPROBE_DEFER) 2525 return err; 2526 2527 eth_hw_addr_random(dev); 2528 dev_info(eth->dev, "generated random MAC address %pM\n", 2529 dev->dev_addr); 2530 } 2531 2532 port = netdev_priv(dev); 2533 u64_stats_init(&port->stats.syncp); 2534 spin_lock_init(&port->stats.lock); 2535 port->qdma = qdma; 2536 port->dev = dev; 2537 port->id = id; 2538 eth->ports[p] = port; 2539 2540 err = airoha_metadata_dst_alloc(port); 2541 if (err) 2542 return err; 2543 2544 return register_netdev(dev); 2545 } 2546 2547 static int airoha_probe(struct platform_device *pdev) 2548 { 2549 struct device_node *np; 2550 struct airoha_eth *eth; 2551 int i, err; 2552 2553 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); 2554 if (!eth) 2555 return -ENOMEM; 2556 2557 eth->dev = &pdev->dev; 2558 2559 err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32)); 2560 if (err) { 2561 dev_err(eth->dev, "failed configuring DMA mask\n"); 2562 return err; 2563 } 2564 2565 eth->fe_regs = devm_platform_ioremap_resource_byname(pdev, "fe"); 2566 if (IS_ERR(eth->fe_regs)) 2567 return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs), 2568 "failed to iomap fe regs\n"); 2569 2570 eth->rsts[0].id = "fe"; 2571 eth->rsts[1].id = "pdma"; 2572 eth->rsts[2].id = "qdma"; 2573 err = devm_reset_control_bulk_get_exclusive(eth->dev, 2574 ARRAY_SIZE(eth->rsts), 2575 eth->rsts); 2576 if (err) { 2577 dev_err(eth->dev, "failed to get bulk reset lines\n"); 2578 return err; 2579 } 2580 2581 eth->xsi_rsts[0].id = "xsi-mac"; 2582 eth->xsi_rsts[1].id = "hsi0-mac"; 2583 eth->xsi_rsts[2].id = "hsi1-mac"; 2584 eth->xsi_rsts[3].id = "hsi-mac"; 2585 eth->xsi_rsts[4].id = "xfp-mac"; 2586 err = devm_reset_control_bulk_get_exclusive(eth->dev, 2587 ARRAY_SIZE(eth->xsi_rsts), 2588 eth->xsi_rsts); 2589 if (err) { 2590 dev_err(eth->dev, "failed to get bulk xsi reset lines\n"); 2591 return err; 2592 } 2593 2594 eth->napi_dev = alloc_netdev_dummy(0); 2595 if (!eth->napi_dev) 2596 return -ENOMEM; 2597 2598 /* Enable threaded NAPI by default */ 2599 eth->napi_dev->threaded = true; 2600 strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name)); 2601 platform_set_drvdata(pdev, eth); 2602 2603 err = airoha_hw_init(pdev, eth); 2604 if (err) 2605 goto error_hw_cleanup; 2606 2607 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) 2608 airoha_qdma_start_napi(ð->qdma[i]); 2609 2610 i = 0; 2611 for_each_child_of_node(pdev->dev.of_node, np) { 2612 if (!of_device_is_compatible(np, "airoha,eth-mac")) 2613 continue; 2614 2615 if (!of_device_is_available(np)) 2616 continue; 2617 2618 err = airoha_alloc_gdm_port(eth, np, i++); 2619 if (err) { 2620 of_node_put(np); 2621 goto error_napi_stop; 2622 } 2623 } 2624 2625 return 0; 2626 2627 error_napi_stop: 2628 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) 2629 airoha_qdma_stop_napi(ð->qdma[i]); 2630 error_hw_cleanup: 2631 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) 2632 airoha_hw_cleanup(ð->qdma[i]); 2633 2634 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { 2635 struct airoha_gdm_port *port = eth->ports[i]; 2636 2637 if (port && port->dev->reg_state == NETREG_REGISTERED) { 2638 unregister_netdev(port->dev); 2639 airoha_metadata_dst_free(port); 2640 } 2641 } 2642 free_netdev(eth->napi_dev); 2643 platform_set_drvdata(pdev, NULL); 2644 2645 return err; 2646 } 2647 2648 static void airoha_remove(struct platform_device *pdev) 2649 { 2650 struct airoha_eth *eth = platform_get_drvdata(pdev); 2651 int i; 2652 2653 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) { 2654 airoha_qdma_stop_napi(ð->qdma[i]); 2655 airoha_hw_cleanup(ð->qdma[i]); 2656 } 2657 2658 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { 2659 struct airoha_gdm_port *port = eth->ports[i]; 2660 2661 if (!port) 2662 continue; 2663 2664 airoha_dev_stop(port->dev); 2665 unregister_netdev(port->dev); 2666 airoha_metadata_dst_free(port); 2667 } 2668 free_netdev(eth->napi_dev); 2669 2670 airoha_ppe_deinit(eth); 2671 platform_set_drvdata(pdev, NULL); 2672 } 2673 2674 static const struct of_device_id of_airoha_match[] = { 2675 { .compatible = "airoha,en7581-eth" }, 2676 { /* sentinel */ } 2677 }; 2678 MODULE_DEVICE_TABLE(of, of_airoha_match); 2679 2680 static struct platform_driver airoha_driver = { 2681 .probe = airoha_probe, 2682 .remove = airoha_remove, 2683 .driver = { 2684 .name = KBUILD_MODNAME, 2685 .of_match_table = of_airoha_match, 2686 }, 2687 }; 2688 module_platform_driver(airoha_driver); 2689 2690 MODULE_LICENSE("GPL"); 2691 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); 2692 MODULE_DESCRIPTION("Ethernet driver for Airoha SoC"); 2693