1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2024 AIROHA Inc 4 * Author: Lorenzo Bianconi <lorenzo@kernel.org> 5 */ 6 #include <linux/of.h> 7 #include <linux/of_net.h> 8 #include <linux/platform_device.h> 9 #include <linux/tcp.h> 10 #include <linux/u64_stats_sync.h> 11 #include <net/dst_metadata.h> 12 #include <net/page_pool/helpers.h> 13 #include <net/pkt_cls.h> 14 #include <uapi/linux/ppp_defs.h> 15 16 #include "airoha_regs.h" 17 #include "airoha_eth.h" 18 19 u32 airoha_rr(void __iomem *base, u32 offset) 20 { 21 return readl(base + offset); 22 } 23 24 void airoha_wr(void __iomem *base, u32 offset, u32 val) 25 { 26 writel(val, base + offset); 27 } 28 29 u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val) 30 { 31 val |= (airoha_rr(base, offset) & ~mask); 32 airoha_wr(base, offset, val); 33 34 return val; 35 } 36 37 static void airoha_qdma_set_irqmask(struct airoha_irq_bank *irq_bank, 38 int index, u32 clear, u32 set) 39 { 40 struct airoha_qdma *qdma = irq_bank->qdma; 41 int bank = irq_bank - &qdma->irq_banks[0]; 42 unsigned long flags; 43 44 if (WARN_ON_ONCE(index >= ARRAY_SIZE(irq_bank->irqmask))) 45 return; 46 47 spin_lock_irqsave(&irq_bank->irq_lock, flags); 48 49 irq_bank->irqmask[index] &= ~clear; 50 irq_bank->irqmask[index] |= set; 51 airoha_qdma_wr(qdma, REG_INT_ENABLE(bank, index), 52 irq_bank->irqmask[index]); 53 /* Read irq_enable register in order to guarantee the update above 54 * completes in the spinlock critical section. 55 */ 56 airoha_qdma_rr(qdma, REG_INT_ENABLE(bank, index)); 57 58 spin_unlock_irqrestore(&irq_bank->irq_lock, flags); 59 } 60 61 static void airoha_qdma_irq_enable(struct airoha_irq_bank *irq_bank, 62 int index, u32 mask) 63 { 64 airoha_qdma_set_irqmask(irq_bank, index, 0, mask); 65 } 66 67 static void airoha_qdma_irq_disable(struct airoha_irq_bank *irq_bank, 68 int index, u32 mask) 69 { 70 airoha_qdma_set_irqmask(irq_bank, index, mask, 0); 71 } 72 73 static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port) 74 { 75 /* GDM1 port on EN7581 SoC is connected to the lan dsa switch. 76 * GDM{2,3,4} can be used as wan port connected to an external 77 * phy module. 78 */ 79 return port->id == 1; 80 } 81 82 static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr) 83 { 84 struct airoha_eth *eth = port->qdma->eth; 85 u32 val, reg; 86 87 reg = airhoa_is_lan_gdm_port(port) ? REG_FE_LAN_MAC_H 88 : REG_FE_WAN_MAC_H; 89 val = (addr[0] << 16) | (addr[1] << 8) | addr[2]; 90 airoha_fe_wr(eth, reg, val); 91 92 val = (addr[3] << 16) | (addr[4] << 8) | addr[5]; 93 airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val); 94 airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val); 95 } 96 97 static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr, 98 u32 val) 99 { 100 airoha_fe_rmw(eth, addr, GDM_OCFQ_MASK, 101 FIELD_PREP(GDM_OCFQ_MASK, val)); 102 airoha_fe_rmw(eth, addr, GDM_MCFQ_MASK, 103 FIELD_PREP(GDM_MCFQ_MASK, val)); 104 airoha_fe_rmw(eth, addr, GDM_BCFQ_MASK, 105 FIELD_PREP(GDM_BCFQ_MASK, val)); 106 airoha_fe_rmw(eth, addr, GDM_UCFQ_MASK, 107 FIELD_PREP(GDM_UCFQ_MASK, val)); 108 } 109 110 static int airoha_set_vip_for_gdm_port(struct airoha_gdm_port *port, 111 bool enable) 112 { 113 struct airoha_eth *eth = port->qdma->eth; 114 u32 vip_port; 115 116 switch (port->id) { 117 case 3: 118 /* FIXME: handle XSI_PCIE1_PORT */ 119 vip_port = XSI_PCIE0_VIP_PORT_MASK; 120 break; 121 case 4: 122 /* FIXME: handle XSI_USB_PORT */ 123 vip_port = XSI_ETH_VIP_PORT_MASK; 124 break; 125 default: 126 return 0; 127 } 128 129 if (enable) { 130 airoha_fe_set(eth, REG_FE_VIP_PORT_EN, vip_port); 131 airoha_fe_set(eth, REG_FE_IFC_PORT_EN, vip_port); 132 } else { 133 airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, vip_port); 134 airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port); 135 } 136 137 return 0; 138 } 139 140 static void airoha_fe_maccr_init(struct airoha_eth *eth) 141 { 142 int p; 143 144 for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) 145 airoha_fe_set(eth, REG_GDM_FWD_CFG(p), 146 GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM | 147 GDM_DROP_CRC_ERR); 148 149 airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK, 150 FIELD_PREP(CDM1_VLAN_MASK, 0x8100)); 151 152 airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD); 153 } 154 155 static void airoha_fe_vip_setup(struct airoha_eth *eth) 156 { 157 airoha_fe_wr(eth, REG_FE_VIP_PATN(3), ETH_P_PPP_DISC); 158 airoha_fe_wr(eth, REG_FE_VIP_EN(3), PATN_FCPU_EN_MASK | PATN_EN_MASK); 159 160 airoha_fe_wr(eth, REG_FE_VIP_PATN(4), PPP_LCP); 161 airoha_fe_wr(eth, REG_FE_VIP_EN(4), 162 PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | 163 PATN_EN_MASK); 164 165 airoha_fe_wr(eth, REG_FE_VIP_PATN(6), PPP_IPCP); 166 airoha_fe_wr(eth, REG_FE_VIP_EN(6), 167 PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | 168 PATN_EN_MASK); 169 170 airoha_fe_wr(eth, REG_FE_VIP_PATN(7), PPP_CHAP); 171 airoha_fe_wr(eth, REG_FE_VIP_EN(7), 172 PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | 173 PATN_EN_MASK); 174 175 /* BOOTP (0x43) */ 176 airoha_fe_wr(eth, REG_FE_VIP_PATN(8), 0x43); 177 airoha_fe_wr(eth, REG_FE_VIP_EN(8), 178 PATN_FCPU_EN_MASK | PATN_SP_EN_MASK | 179 FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); 180 181 /* BOOTP (0x44) */ 182 airoha_fe_wr(eth, REG_FE_VIP_PATN(9), 0x44); 183 airoha_fe_wr(eth, REG_FE_VIP_EN(9), 184 PATN_FCPU_EN_MASK | PATN_SP_EN_MASK | 185 FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); 186 187 /* ISAKMP */ 188 airoha_fe_wr(eth, REG_FE_VIP_PATN(10), 0x1f401f4); 189 airoha_fe_wr(eth, REG_FE_VIP_EN(10), 190 PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK | 191 FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); 192 193 airoha_fe_wr(eth, REG_FE_VIP_PATN(11), PPP_IPV6CP); 194 airoha_fe_wr(eth, REG_FE_VIP_EN(11), 195 PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | 196 PATN_EN_MASK); 197 198 /* DHCPv6 */ 199 airoha_fe_wr(eth, REG_FE_VIP_PATN(12), 0x2220223); 200 airoha_fe_wr(eth, REG_FE_VIP_EN(12), 201 PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK | 202 FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); 203 204 airoha_fe_wr(eth, REG_FE_VIP_PATN(19), PPP_PAP); 205 airoha_fe_wr(eth, REG_FE_VIP_EN(19), 206 PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | 207 PATN_EN_MASK); 208 209 /* ETH->ETH_P_1905 (0x893a) */ 210 airoha_fe_wr(eth, REG_FE_VIP_PATN(20), 0x893a); 211 airoha_fe_wr(eth, REG_FE_VIP_EN(20), 212 PATN_FCPU_EN_MASK | PATN_EN_MASK); 213 214 airoha_fe_wr(eth, REG_FE_VIP_PATN(21), ETH_P_LLDP); 215 airoha_fe_wr(eth, REG_FE_VIP_EN(21), 216 PATN_FCPU_EN_MASK | PATN_EN_MASK); 217 } 218 219 static u32 airoha_fe_get_pse_queue_rsv_pages(struct airoha_eth *eth, 220 u32 port, u32 queue) 221 { 222 u32 val; 223 224 airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR, 225 PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK, 226 FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) | 227 FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue)); 228 val = airoha_fe_rr(eth, REG_FE_PSE_QUEUE_CFG_VAL); 229 230 return FIELD_GET(PSE_CFG_OQ_RSV_MASK, val); 231 } 232 233 static void airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth *eth, 234 u32 port, u32 queue, u32 val) 235 { 236 airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_VAL, PSE_CFG_OQ_RSV_MASK, 237 FIELD_PREP(PSE_CFG_OQ_RSV_MASK, val)); 238 airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR, 239 PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK | 240 PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK, 241 FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) | 242 FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue) | 243 PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK); 244 } 245 246 static u32 airoha_fe_get_pse_all_rsv(struct airoha_eth *eth) 247 { 248 u32 val = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET); 249 250 return FIELD_GET(PSE_ALLRSV_MASK, val); 251 } 252 253 static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth, 254 u32 port, u32 queue, u32 val) 255 { 256 u32 orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue); 257 u32 tmp, all_rsv, fq_limit; 258 259 airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val); 260 261 /* modify all rsv */ 262 all_rsv = airoha_fe_get_pse_all_rsv(eth); 263 all_rsv += (val - orig_val); 264 airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK, 265 FIELD_PREP(PSE_ALLRSV_MASK, all_rsv)); 266 267 /* modify hthd */ 268 tmp = airoha_fe_rr(eth, PSE_FQ_CFG); 269 fq_limit = FIELD_GET(PSE_FQ_LIMIT_MASK, tmp); 270 tmp = fq_limit - all_rsv - 0x20; 271 airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD, 272 PSE_SHARE_USED_HTHD_MASK, 273 FIELD_PREP(PSE_SHARE_USED_HTHD_MASK, tmp)); 274 275 tmp = fq_limit - all_rsv - 0x100; 276 airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD, 277 PSE_SHARE_USED_MTHD_MASK, 278 FIELD_PREP(PSE_SHARE_USED_MTHD_MASK, tmp)); 279 tmp = (3 * tmp) >> 2; 280 airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, 281 PSE_SHARE_USED_LTHD_MASK, 282 FIELD_PREP(PSE_SHARE_USED_LTHD_MASK, tmp)); 283 284 return 0; 285 } 286 287 static void airoha_fe_pse_ports_init(struct airoha_eth *eth) 288 { 289 const u32 pse_port_num_queues[] = { 290 [FE_PSE_PORT_CDM1] = 6, 291 [FE_PSE_PORT_GDM1] = 6, 292 [FE_PSE_PORT_GDM2] = 32, 293 [FE_PSE_PORT_GDM3] = 6, 294 [FE_PSE_PORT_PPE1] = 4, 295 [FE_PSE_PORT_CDM2] = 6, 296 [FE_PSE_PORT_CDM3] = 8, 297 [FE_PSE_PORT_CDM4] = 10, 298 [FE_PSE_PORT_PPE2] = 4, 299 [FE_PSE_PORT_GDM4] = 2, 300 [FE_PSE_PORT_CDM5] = 2, 301 }; 302 u32 all_rsv; 303 int q; 304 305 all_rsv = airoha_fe_get_pse_all_rsv(eth); 306 /* hw misses PPE2 oq rsv */ 307 all_rsv += PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2]; 308 airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv); 309 310 /* CMD1 */ 311 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++) 312 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM1, q, 313 PSE_QUEUE_RSV_PAGES); 314 /* GMD1 */ 315 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM1]; q++) 316 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM1, q, 317 PSE_QUEUE_RSV_PAGES); 318 /* GMD2 */ 319 for (q = 6; q < pse_port_num_queues[FE_PSE_PORT_GDM2]; q++) 320 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM2, q, 0); 321 /* GMD3 */ 322 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM3]; q++) 323 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM3, q, 324 PSE_QUEUE_RSV_PAGES); 325 /* PPE1 */ 326 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE1]; q++) { 327 if (q < pse_port_num_queues[FE_PSE_PORT_PPE1]) 328 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 329 PSE_QUEUE_RSV_PAGES); 330 else 331 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 0); 332 } 333 /* CDM2 */ 334 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM2]; q++) 335 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM2, q, 336 PSE_QUEUE_RSV_PAGES); 337 /* CDM3 */ 338 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++) 339 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM3, q, 0); 340 /* CDM4 */ 341 for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++) 342 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q, 343 PSE_QUEUE_RSV_PAGES); 344 /* PPE2 */ 345 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) { 346 if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2) 347 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, 348 PSE_QUEUE_RSV_PAGES); 349 else 350 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, 0); 351 } 352 /* GMD4 */ 353 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++) 354 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM4, q, 355 PSE_QUEUE_RSV_PAGES); 356 /* CDM5 */ 357 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM5]; q++) 358 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM5, q, 359 PSE_QUEUE_RSV_PAGES); 360 } 361 362 static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth) 363 { 364 int i; 365 366 for (i = 0; i < AIROHA_FE_MC_MAX_VLAN_TABLE; i++) { 367 int err, j; 368 u32 val; 369 370 airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0); 371 372 val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) | 373 MC_VLAN_CFG_TABLE_SEL_MASK | MC_VLAN_CFG_RW_MASK; 374 airoha_fe_wr(eth, REG_MC_VLAN_CFG, val); 375 err = read_poll_timeout(airoha_fe_rr, val, 376 val & MC_VLAN_CFG_CMD_DONE_MASK, 377 USEC_PER_MSEC, 5 * USEC_PER_MSEC, 378 false, eth, REG_MC_VLAN_CFG); 379 if (err) 380 return err; 381 382 for (j = 0; j < AIROHA_FE_MC_MAX_VLAN_PORT; j++) { 383 airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0); 384 385 val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) | 386 FIELD_PREP(MC_VLAN_CFG_PORT_ID_MASK, j) | 387 MC_VLAN_CFG_RW_MASK; 388 airoha_fe_wr(eth, REG_MC_VLAN_CFG, val); 389 err = read_poll_timeout(airoha_fe_rr, val, 390 val & MC_VLAN_CFG_CMD_DONE_MASK, 391 USEC_PER_MSEC, 392 5 * USEC_PER_MSEC, false, eth, 393 REG_MC_VLAN_CFG); 394 if (err) 395 return err; 396 } 397 } 398 399 return 0; 400 } 401 402 static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth) 403 { 404 /* CDM1_CRSN_QSEL */ 405 airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_22 >> 2), 406 CDM1_CRSN_QSEL_REASON_MASK(CRSN_22), 407 FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_22), 408 CDM_CRSN_QSEL_Q1)); 409 airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_08 >> 2), 410 CDM1_CRSN_QSEL_REASON_MASK(CRSN_08), 411 FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_08), 412 CDM_CRSN_QSEL_Q1)); 413 airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_21 >> 2), 414 CDM1_CRSN_QSEL_REASON_MASK(CRSN_21), 415 FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_21), 416 CDM_CRSN_QSEL_Q1)); 417 airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_24 >> 2), 418 CDM1_CRSN_QSEL_REASON_MASK(CRSN_24), 419 FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_24), 420 CDM_CRSN_QSEL_Q6)); 421 airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_25 >> 2), 422 CDM1_CRSN_QSEL_REASON_MASK(CRSN_25), 423 FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_25), 424 CDM_CRSN_QSEL_Q1)); 425 /* CDM2_CRSN_QSEL */ 426 airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_08 >> 2), 427 CDM2_CRSN_QSEL_REASON_MASK(CRSN_08), 428 FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_08), 429 CDM_CRSN_QSEL_Q1)); 430 airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_21 >> 2), 431 CDM2_CRSN_QSEL_REASON_MASK(CRSN_21), 432 FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_21), 433 CDM_CRSN_QSEL_Q1)); 434 airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_22 >> 2), 435 CDM2_CRSN_QSEL_REASON_MASK(CRSN_22), 436 FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_22), 437 CDM_CRSN_QSEL_Q1)); 438 airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_24 >> 2), 439 CDM2_CRSN_QSEL_REASON_MASK(CRSN_24), 440 FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_24), 441 CDM_CRSN_QSEL_Q6)); 442 airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_25 >> 2), 443 CDM2_CRSN_QSEL_REASON_MASK(CRSN_25), 444 FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_25), 445 CDM_CRSN_QSEL_Q1)); 446 } 447 448 static int airoha_fe_init(struct airoha_eth *eth) 449 { 450 airoha_fe_maccr_init(eth); 451 452 /* PSE IQ reserve */ 453 airoha_fe_rmw(eth, REG_PSE_IQ_REV1, PSE_IQ_RES1_P2_MASK, 454 FIELD_PREP(PSE_IQ_RES1_P2_MASK, 0x10)); 455 airoha_fe_rmw(eth, REG_PSE_IQ_REV2, 456 PSE_IQ_RES2_P5_MASK | PSE_IQ_RES2_P4_MASK, 457 FIELD_PREP(PSE_IQ_RES2_P5_MASK, 0x40) | 458 FIELD_PREP(PSE_IQ_RES2_P4_MASK, 0x34)); 459 460 /* enable FE copy engine for MC/KA/DPI */ 461 airoha_fe_wr(eth, REG_FE_PCE_CFG, 462 PCE_DPI_EN_MASK | PCE_KA_EN_MASK | PCE_MC_EN_MASK); 463 /* set vip queue selection to ring 1 */ 464 airoha_fe_rmw(eth, REG_CDM1_FWD_CFG, CDM1_VIP_QSEL_MASK, 465 FIELD_PREP(CDM1_VIP_QSEL_MASK, 0x4)); 466 airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_VIP_QSEL_MASK, 467 FIELD_PREP(CDM2_VIP_QSEL_MASK, 0x4)); 468 /* set GDM4 source interface offset to 8 */ 469 airoha_fe_rmw(eth, REG_GDM4_SRC_PORT_SET, 470 GDM4_SPORT_OFF2_MASK | 471 GDM4_SPORT_OFF1_MASK | 472 GDM4_SPORT_OFF0_MASK, 473 FIELD_PREP(GDM4_SPORT_OFF2_MASK, 8) | 474 FIELD_PREP(GDM4_SPORT_OFF1_MASK, 8) | 475 FIELD_PREP(GDM4_SPORT_OFF0_MASK, 8)); 476 477 /* set PSE Page as 128B */ 478 airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG, 479 FE_DMA_GLO_L2_SPACE_MASK | FE_DMA_GLO_PG_SZ_MASK, 480 FIELD_PREP(FE_DMA_GLO_L2_SPACE_MASK, 2) | 481 FE_DMA_GLO_PG_SZ_MASK); 482 airoha_fe_wr(eth, REG_FE_RST_GLO_CFG, 483 FE_RST_CORE_MASK | FE_RST_GDM3_MBI_ARB_MASK | 484 FE_RST_GDM4_MBI_ARB_MASK); 485 usleep_range(1000, 2000); 486 487 /* connect RxRing1 and RxRing15 to PSE Port0 OQ-1 488 * connect other rings to PSE Port0 OQ-0 489 */ 490 airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP0, BIT(4)); 491 airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP1, BIT(28)); 492 airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP2, BIT(4)); 493 airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP3, BIT(28)); 494 495 airoha_fe_vip_setup(eth); 496 airoha_fe_pse_ports_init(eth); 497 498 airoha_fe_set(eth, REG_GDM_MISC_CFG, 499 GDM2_RDM_ACK_WAIT_PREF_MASK | 500 GDM2_CHN_VLD_MODE_MASK); 501 airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK, 502 FIELD_PREP(CDM2_OAM_QSEL_MASK, 15)); 503 504 /* init fragment and assemble Force Port */ 505 /* NPU Core-3, NPU Bridge Channel-3 */ 506 airoha_fe_rmw(eth, REG_IP_FRAG_FP, 507 IP_FRAGMENT_PORT_MASK | IP_FRAGMENT_NBQ_MASK, 508 FIELD_PREP(IP_FRAGMENT_PORT_MASK, 6) | 509 FIELD_PREP(IP_FRAGMENT_NBQ_MASK, 3)); 510 /* QDMA LAN, RX Ring-22 */ 511 airoha_fe_rmw(eth, REG_IP_FRAG_FP, 512 IP_ASSEMBLE_PORT_MASK | IP_ASSEMBLE_NBQ_MASK, 513 FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) | 514 FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22)); 515 516 airoha_fe_set(eth, REG_GDM3_FWD_CFG, GDM3_PAD_EN_MASK); 517 airoha_fe_set(eth, REG_GDM4_FWD_CFG, GDM4_PAD_EN_MASK); 518 519 airoha_fe_crsn_qsel_init(eth); 520 521 airoha_fe_clear(eth, REG_FE_CPORT_CFG, FE_CPORT_QUEUE_XFC_MASK); 522 airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK); 523 524 /* default aging mode for mbi unlock issue */ 525 airoha_fe_rmw(eth, REG_GDM2_CHN_RLS, 526 MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK, 527 FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) | 528 FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3)); 529 530 /* disable IFC by default */ 531 airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK); 532 533 airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(0), 534 FIELD_PREP(DFT_CPORT_MASK(7), FE_PSE_PORT_CDM1) | 535 FIELD_PREP(DFT_CPORT_MASK(6), FE_PSE_PORT_CDM1) | 536 FIELD_PREP(DFT_CPORT_MASK(5), FE_PSE_PORT_CDM1) | 537 FIELD_PREP(DFT_CPORT_MASK(4), FE_PSE_PORT_CDM1) | 538 FIELD_PREP(DFT_CPORT_MASK(3), FE_PSE_PORT_CDM1) | 539 FIELD_PREP(DFT_CPORT_MASK(2), FE_PSE_PORT_CDM1) | 540 FIELD_PREP(DFT_CPORT_MASK(1), FE_PSE_PORT_CDM1) | 541 FIELD_PREP(DFT_CPORT_MASK(0), FE_PSE_PORT_CDM1)); 542 airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(1), 543 FIELD_PREP(DFT_CPORT_MASK(7), FE_PSE_PORT_CDM2) | 544 FIELD_PREP(DFT_CPORT_MASK(6), FE_PSE_PORT_CDM2) | 545 FIELD_PREP(DFT_CPORT_MASK(5), FE_PSE_PORT_CDM2) | 546 FIELD_PREP(DFT_CPORT_MASK(4), FE_PSE_PORT_CDM2) | 547 FIELD_PREP(DFT_CPORT_MASK(3), FE_PSE_PORT_CDM2) | 548 FIELD_PREP(DFT_CPORT_MASK(2), FE_PSE_PORT_CDM2) | 549 FIELD_PREP(DFT_CPORT_MASK(1), FE_PSE_PORT_CDM2) | 550 FIELD_PREP(DFT_CPORT_MASK(0), FE_PSE_PORT_CDM2)); 551 552 /* enable 1:N vlan action, init vlan table */ 553 airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK); 554 555 return airoha_fe_mc_vlan_clear(eth); 556 } 557 558 static int airoha_qdma_fill_rx_queue(struct airoha_queue *q) 559 { 560 enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); 561 struct airoha_qdma *qdma = q->qdma; 562 struct airoha_eth *eth = qdma->eth; 563 int qid = q - &qdma->q_rx[0]; 564 int nframes = 0; 565 566 while (q->queued < q->ndesc - 1) { 567 struct airoha_queue_entry *e = &q->entry[q->head]; 568 struct airoha_qdma_desc *desc = &q->desc[q->head]; 569 struct page *page; 570 int offset; 571 u32 val; 572 573 page = page_pool_dev_alloc_frag(q->page_pool, &offset, 574 q->buf_size); 575 if (!page) 576 break; 577 578 q->head = (q->head + 1) % q->ndesc; 579 q->queued++; 580 nframes++; 581 582 e->buf = page_address(page) + offset; 583 e->dma_addr = page_pool_get_dma_addr(page) + offset; 584 e->dma_len = SKB_WITH_OVERHEAD(q->buf_size); 585 586 dma_sync_single_for_device(eth->dev, e->dma_addr, e->dma_len, 587 dir); 588 589 val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len); 590 WRITE_ONCE(desc->ctrl, cpu_to_le32(val)); 591 WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr)); 592 val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head); 593 WRITE_ONCE(desc->data, cpu_to_le32(val)); 594 WRITE_ONCE(desc->msg0, 0); 595 WRITE_ONCE(desc->msg1, 0); 596 WRITE_ONCE(desc->msg2, 0); 597 WRITE_ONCE(desc->msg3, 0); 598 599 airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), 600 RX_RING_CPU_IDX_MASK, 601 FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head)); 602 } 603 604 return nframes; 605 } 606 607 static int airoha_qdma_get_gdm_port(struct airoha_eth *eth, 608 struct airoha_qdma_desc *desc) 609 { 610 u32 port, sport, msg1 = le32_to_cpu(desc->msg1); 611 612 sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1); 613 switch (sport) { 614 case 0x10 ... 0x14: 615 port = 0; 616 break; 617 case 0x2 ... 0x4: 618 port = sport - 1; 619 break; 620 default: 621 return -EINVAL; 622 } 623 624 return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port; 625 } 626 627 static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) 628 { 629 enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); 630 struct airoha_qdma *qdma = q->qdma; 631 struct airoha_eth *eth = qdma->eth; 632 int qid = q - &qdma->q_rx[0]; 633 int done = 0; 634 635 while (done < budget) { 636 struct airoha_queue_entry *e = &q->entry[q->tail]; 637 struct airoha_qdma_desc *desc = &q->desc[q->tail]; 638 u32 hash, reason, msg1 = le32_to_cpu(desc->msg1); 639 dma_addr_t dma_addr = le32_to_cpu(desc->addr); 640 struct page *page = virt_to_head_page(e->buf); 641 u32 desc_ctrl = le32_to_cpu(desc->ctrl); 642 struct airoha_gdm_port *port; 643 int data_len, len, p; 644 645 if (!(desc_ctrl & QDMA_DESC_DONE_MASK)) 646 break; 647 648 if (!dma_addr) 649 break; 650 651 len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl); 652 if (!len) 653 break; 654 655 q->tail = (q->tail + 1) % q->ndesc; 656 q->queued--; 657 658 dma_sync_single_for_cpu(eth->dev, dma_addr, 659 SKB_WITH_OVERHEAD(q->buf_size), dir); 660 661 data_len = q->skb ? q->buf_size 662 : SKB_WITH_OVERHEAD(q->buf_size); 663 if (data_len < len) 664 goto free_frag; 665 666 p = airoha_qdma_get_gdm_port(eth, desc); 667 if (p < 0 || !eth->ports[p]) 668 goto free_frag; 669 670 port = eth->ports[p]; 671 if (!q->skb) { /* first buffer */ 672 q->skb = napi_build_skb(e->buf, q->buf_size); 673 if (!q->skb) 674 goto free_frag; 675 676 __skb_put(q->skb, len); 677 skb_mark_for_recycle(q->skb); 678 q->skb->dev = port->dev; 679 q->skb->protocol = eth_type_trans(q->skb, port->dev); 680 q->skb->ip_summed = CHECKSUM_UNNECESSARY; 681 skb_record_rx_queue(q->skb, qid); 682 } else { /* scattered frame */ 683 struct skb_shared_info *shinfo = skb_shinfo(q->skb); 684 int nr_frags = shinfo->nr_frags; 685 686 if (nr_frags >= ARRAY_SIZE(shinfo->frags)) 687 goto free_frag; 688 689 skb_add_rx_frag(q->skb, nr_frags, page, 690 e->buf - page_address(page), len, 691 q->buf_size); 692 } 693 694 if (FIELD_GET(QDMA_DESC_MORE_MASK, desc_ctrl)) 695 continue; 696 697 if (netdev_uses_dsa(port->dev)) { 698 /* PPE module requires untagged packets to work 699 * properly and it provides DSA port index via the 700 * DMA descriptor. Report DSA tag to the DSA stack 701 * via skb dst info. 702 */ 703 u32 sptag = FIELD_GET(QDMA_ETH_RXMSG_SPTAG, 704 le32_to_cpu(desc->msg0)); 705 706 if (sptag < ARRAY_SIZE(port->dsa_meta) && 707 port->dsa_meta[sptag]) 708 skb_dst_set_noref(q->skb, 709 &port->dsa_meta[sptag]->dst); 710 } 711 712 hash = FIELD_GET(AIROHA_RXD4_FOE_ENTRY, msg1); 713 if (hash != AIROHA_RXD4_FOE_ENTRY) 714 skb_set_hash(q->skb, jhash_1word(hash, 0), 715 PKT_HASH_TYPE_L4); 716 717 reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1); 718 if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) 719 airoha_ppe_check_skb(eth->ppe, q->skb, hash); 720 721 done++; 722 napi_gro_receive(&q->napi, q->skb); 723 q->skb = NULL; 724 continue; 725 free_frag: 726 page_pool_put_full_page(q->page_pool, page, true); 727 dev_kfree_skb(q->skb); 728 q->skb = NULL; 729 } 730 airoha_qdma_fill_rx_queue(q); 731 732 return done; 733 } 734 735 static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget) 736 { 737 struct airoha_queue *q = container_of(napi, struct airoha_queue, napi); 738 int cur, done = 0; 739 740 do { 741 cur = airoha_qdma_rx_process(q, budget - done); 742 done += cur; 743 } while (cur && done < budget); 744 745 if (done < budget && napi_complete(napi)) { 746 struct airoha_qdma *qdma = q->qdma; 747 int i, qid = q - &qdma->q_rx[0]; 748 int intr_reg = qid < RX_DONE_HIGH_OFFSET ? QDMA_INT_REG_IDX1 749 : QDMA_INT_REG_IDX2; 750 751 for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) { 752 if (!(BIT(qid) & RX_IRQ_BANK_PIN_MASK(i))) 753 continue; 754 755 airoha_qdma_irq_enable(&qdma->irq_banks[i], intr_reg, 756 BIT(qid % RX_DONE_HIGH_OFFSET)); 757 } 758 } 759 760 return done; 761 } 762 763 static int airoha_qdma_init_rx_queue(struct airoha_queue *q, 764 struct airoha_qdma *qdma, int ndesc) 765 { 766 const struct page_pool_params pp_params = { 767 .order = 0, 768 .pool_size = 256, 769 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, 770 .dma_dir = DMA_FROM_DEVICE, 771 .max_len = PAGE_SIZE, 772 .nid = NUMA_NO_NODE, 773 .dev = qdma->eth->dev, 774 .napi = &q->napi, 775 }; 776 struct airoha_eth *eth = qdma->eth; 777 int qid = q - &qdma->q_rx[0], thr; 778 dma_addr_t dma_addr; 779 780 q->buf_size = PAGE_SIZE / 2; 781 q->ndesc = ndesc; 782 q->qdma = qdma; 783 784 q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), 785 GFP_KERNEL); 786 if (!q->entry) 787 return -ENOMEM; 788 789 q->page_pool = page_pool_create(&pp_params); 790 if (IS_ERR(q->page_pool)) { 791 int err = PTR_ERR(q->page_pool); 792 793 q->page_pool = NULL; 794 return err; 795 } 796 797 q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc), 798 &dma_addr, GFP_KERNEL); 799 if (!q->desc) 800 return -ENOMEM; 801 802 netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll); 803 804 airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr); 805 airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), 806 RX_RING_SIZE_MASK, 807 FIELD_PREP(RX_RING_SIZE_MASK, ndesc)); 808 809 thr = clamp(ndesc >> 3, 1, 32); 810 airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK, 811 FIELD_PREP(RX_RING_THR_MASK, thr)); 812 airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK, 813 FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head)); 814 airoha_qdma_set(qdma, REG_RX_SCATTER_CFG(qid), RX_RING_SG_EN_MASK); 815 816 airoha_qdma_fill_rx_queue(q); 817 818 return 0; 819 } 820 821 static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q) 822 { 823 struct airoha_eth *eth = q->qdma->eth; 824 825 while (q->queued) { 826 struct airoha_queue_entry *e = &q->entry[q->tail]; 827 struct page *page = virt_to_head_page(e->buf); 828 829 dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len, 830 page_pool_get_dma_dir(q->page_pool)); 831 page_pool_put_full_page(q->page_pool, page, false); 832 q->tail = (q->tail + 1) % q->ndesc; 833 q->queued--; 834 } 835 } 836 837 static int airoha_qdma_init_rx(struct airoha_qdma *qdma) 838 { 839 int i; 840 841 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 842 int err; 843 844 if (!(RX_DONE_INT_MASK & BIT(i))) { 845 /* rx-queue not binded to irq */ 846 continue; 847 } 848 849 err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma, 850 RX_DSCP_NUM(i)); 851 if (err) 852 return err; 853 } 854 855 return 0; 856 } 857 858 static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget) 859 { 860 struct airoha_tx_irq_queue *irq_q; 861 int id, done = 0, irq_queued; 862 struct airoha_qdma *qdma; 863 struct airoha_eth *eth; 864 u32 status, head; 865 866 irq_q = container_of(napi, struct airoha_tx_irq_queue, napi); 867 qdma = irq_q->qdma; 868 id = irq_q - &qdma->q_tx_irq[0]; 869 eth = qdma->eth; 870 871 status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(id)); 872 head = FIELD_GET(IRQ_HEAD_IDX_MASK, status); 873 head = head % irq_q->size; 874 irq_queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status); 875 876 while (irq_queued > 0 && done < budget) { 877 u32 qid, val = irq_q->q[head]; 878 struct airoha_qdma_desc *desc; 879 struct airoha_queue_entry *e; 880 struct airoha_queue *q; 881 u32 index, desc_ctrl; 882 struct sk_buff *skb; 883 884 if (val == 0xff) 885 break; 886 887 irq_q->q[head] = 0xff; /* mark as done */ 888 head = (head + 1) % irq_q->size; 889 irq_queued--; 890 done++; 891 892 qid = FIELD_GET(IRQ_RING_IDX_MASK, val); 893 if (qid >= ARRAY_SIZE(qdma->q_tx)) 894 continue; 895 896 q = &qdma->q_tx[qid]; 897 if (!q->ndesc) 898 continue; 899 900 index = FIELD_GET(IRQ_DESC_IDX_MASK, val); 901 if (index >= q->ndesc) 902 continue; 903 904 spin_lock_bh(&q->lock); 905 906 if (!q->queued) 907 goto unlock; 908 909 desc = &q->desc[index]; 910 desc_ctrl = le32_to_cpu(desc->ctrl); 911 912 if (!(desc_ctrl & QDMA_DESC_DONE_MASK) && 913 !(desc_ctrl & QDMA_DESC_DROP_MASK)) 914 goto unlock; 915 916 e = &q->entry[index]; 917 skb = e->skb; 918 919 dma_unmap_single(eth->dev, e->dma_addr, e->dma_len, 920 DMA_TO_DEVICE); 921 memset(e, 0, sizeof(*e)); 922 WRITE_ONCE(desc->msg0, 0); 923 WRITE_ONCE(desc->msg1, 0); 924 q->queued--; 925 926 /* completion ring can report out-of-order indexes if hw QoS 927 * is enabled and packets with different priority are queued 928 * to same DMA ring. Take into account possible out-of-order 929 * reports incrementing DMA ring tail pointer 930 */ 931 while (q->tail != q->head && !q->entry[q->tail].dma_addr) 932 q->tail = (q->tail + 1) % q->ndesc; 933 934 if (skb) { 935 u16 queue = skb_get_queue_mapping(skb); 936 struct netdev_queue *txq; 937 938 txq = netdev_get_tx_queue(skb->dev, queue); 939 netdev_tx_completed_queue(txq, 1, skb->len); 940 if (netif_tx_queue_stopped(txq) && 941 q->ndesc - q->queued >= q->free_thr) 942 netif_tx_wake_queue(txq); 943 944 dev_kfree_skb_any(skb); 945 } 946 unlock: 947 spin_unlock_bh(&q->lock); 948 } 949 950 if (done) { 951 int i, len = done >> 7; 952 953 for (i = 0; i < len; i++) 954 airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id), 955 IRQ_CLEAR_LEN_MASK, 0x80); 956 airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id), 957 IRQ_CLEAR_LEN_MASK, (done & 0x7f)); 958 } 959 960 if (done < budget && napi_complete(napi)) 961 airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0, 962 TX_DONE_INT_MASK(id)); 963 964 return done; 965 } 966 967 static int airoha_qdma_init_tx_queue(struct airoha_queue *q, 968 struct airoha_qdma *qdma, int size) 969 { 970 struct airoha_eth *eth = qdma->eth; 971 int i, qid = q - &qdma->q_tx[0]; 972 dma_addr_t dma_addr; 973 974 spin_lock_init(&q->lock); 975 q->ndesc = size; 976 q->qdma = qdma; 977 q->free_thr = 1 + MAX_SKB_FRAGS; 978 979 q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), 980 GFP_KERNEL); 981 if (!q->entry) 982 return -ENOMEM; 983 984 q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc), 985 &dma_addr, GFP_KERNEL); 986 if (!q->desc) 987 return -ENOMEM; 988 989 for (i = 0; i < q->ndesc; i++) { 990 u32 val; 991 992 val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1); 993 WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val)); 994 } 995 996 /* xmit ring drop default setting */ 997 airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid), 998 TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK); 999 1000 airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr); 1001 airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, 1002 FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head)); 1003 airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK, 1004 FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head)); 1005 1006 return 0; 1007 } 1008 1009 static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q, 1010 struct airoha_qdma *qdma, int size) 1011 { 1012 int id = irq_q - &qdma->q_tx_irq[0]; 1013 struct airoha_eth *eth = qdma->eth; 1014 dma_addr_t dma_addr; 1015 1016 netif_napi_add_tx(eth->napi_dev, &irq_q->napi, 1017 airoha_qdma_tx_napi_poll); 1018 irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32), 1019 &dma_addr, GFP_KERNEL); 1020 if (!irq_q->q) 1021 return -ENOMEM; 1022 1023 memset(irq_q->q, 0xff, size * sizeof(u32)); 1024 irq_q->size = size; 1025 irq_q->qdma = qdma; 1026 1027 airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr); 1028 airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK, 1029 FIELD_PREP(TX_IRQ_DEPTH_MASK, size)); 1030 airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK, 1031 FIELD_PREP(TX_IRQ_THR_MASK, 1)); 1032 1033 return 0; 1034 } 1035 1036 static int airoha_qdma_init_tx(struct airoha_qdma *qdma) 1037 { 1038 int i, err; 1039 1040 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { 1041 err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma, 1042 IRQ_QUEUE_LEN(i)); 1043 if (err) 1044 return err; 1045 } 1046 1047 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { 1048 err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma, 1049 TX_DSCP_NUM); 1050 if (err) 1051 return err; 1052 } 1053 1054 return 0; 1055 } 1056 1057 static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q) 1058 { 1059 struct airoha_eth *eth = q->qdma->eth; 1060 1061 spin_lock_bh(&q->lock); 1062 while (q->queued) { 1063 struct airoha_queue_entry *e = &q->entry[q->tail]; 1064 1065 dma_unmap_single(eth->dev, e->dma_addr, e->dma_len, 1066 DMA_TO_DEVICE); 1067 dev_kfree_skb_any(e->skb); 1068 e->skb = NULL; 1069 1070 q->tail = (q->tail + 1) % q->ndesc; 1071 q->queued--; 1072 } 1073 spin_unlock_bh(&q->lock); 1074 } 1075 1076 static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma) 1077 { 1078 struct airoha_eth *eth = qdma->eth; 1079 dma_addr_t dma_addr; 1080 u32 status; 1081 int size; 1082 1083 size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc); 1084 qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr, 1085 GFP_KERNEL); 1086 if (!qdma->hfwd.desc) 1087 return -ENOMEM; 1088 1089 airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr); 1090 1091 size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM; 1092 qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr, 1093 GFP_KERNEL); 1094 if (!qdma->hfwd.q) 1095 return -ENOMEM; 1096 1097 airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr); 1098 1099 airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG, 1100 HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 1101 FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0)); 1102 airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK, 1103 FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128)); 1104 airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG, 1105 LMGR_INIT_START | LMGR_SRAM_MODE_MASK | 1106 HW_FWD_DESC_NUM_MASK, 1107 FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) | 1108 LMGR_INIT_START); 1109 1110 return read_poll_timeout(airoha_qdma_rr, status, 1111 !(status & LMGR_INIT_START), USEC_PER_MSEC, 1112 30 * USEC_PER_MSEC, true, qdma, 1113 REG_LMGR_INIT_CFG); 1114 } 1115 1116 static void airoha_qdma_init_qos(struct airoha_qdma *qdma) 1117 { 1118 airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK); 1119 airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK); 1120 1121 airoha_qdma_clear(qdma, REG_PSE_BUF_USAGE_CFG, 1122 PSE_BUF_ESTIMATE_EN_MASK); 1123 1124 airoha_qdma_set(qdma, REG_EGRESS_RATE_METER_CFG, 1125 EGRESS_RATE_METER_EN_MASK | 1126 EGRESS_RATE_METER_EQ_RATE_EN_MASK); 1127 /* 2047us x 31 = 63.457ms */ 1128 airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG, 1129 EGRESS_RATE_METER_WINDOW_SZ_MASK, 1130 FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f)); 1131 airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG, 1132 EGRESS_RATE_METER_TIMESLICE_MASK, 1133 FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff)); 1134 1135 /* ratelimit init */ 1136 airoha_qdma_set(qdma, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK); 1137 /* fast-tick 25us */ 1138 airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK, 1139 FIELD_PREP(GLB_FAST_TICK_MASK, 25)); 1140 airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK, 1141 FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40)); 1142 1143 airoha_qdma_set(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK); 1144 airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK, 1145 FIELD_PREP(EGRESS_FAST_TICK_MASK, 25)); 1146 airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, 1147 EGRESS_SLOW_TICK_RATIO_MASK, 1148 FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40)); 1149 1150 airoha_qdma_set(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK); 1151 airoha_qdma_clear(qdma, REG_INGRESS_TRTCM_CFG, 1152 INGRESS_TRTCM_MODE_MASK); 1153 airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK, 1154 FIELD_PREP(INGRESS_FAST_TICK_MASK, 125)); 1155 airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, 1156 INGRESS_SLOW_TICK_RATIO_MASK, 1157 FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8)); 1158 1159 airoha_qdma_set(qdma, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK); 1160 airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK, 1161 FIELD_PREP(SLA_FAST_TICK_MASK, 25)); 1162 airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK, 1163 FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40)); 1164 } 1165 1166 static void airoha_qdma_init_qos_stats(struct airoha_qdma *qdma) 1167 { 1168 int i; 1169 1170 for (i = 0; i < AIROHA_NUM_QOS_CHANNELS; i++) { 1171 /* Tx-cpu transferred count */ 1172 airoha_qdma_wr(qdma, REG_CNTR_VAL(i << 1), 0); 1173 airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1), 1174 CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK | 1175 CNTR_ALL_DSCP_RING_EN_MASK | 1176 FIELD_PREP(CNTR_CHAN_MASK, i)); 1177 /* Tx-fwd transferred count */ 1178 airoha_qdma_wr(qdma, REG_CNTR_VAL((i << 1) + 1), 0); 1179 airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1), 1180 CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK | 1181 CNTR_ALL_DSCP_RING_EN_MASK | 1182 FIELD_PREP(CNTR_SRC_MASK, 1) | 1183 FIELD_PREP(CNTR_CHAN_MASK, i)); 1184 } 1185 } 1186 1187 static int airoha_qdma_hw_init(struct airoha_qdma *qdma) 1188 { 1189 int i; 1190 1191 for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) { 1192 /* clear pending irqs */ 1193 airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff); 1194 /* setup rx irqs */ 1195 airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX0, 1196 INT_RX0_MASK(RX_IRQ_BANK_PIN_MASK(i))); 1197 airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX1, 1198 INT_RX1_MASK(RX_IRQ_BANK_PIN_MASK(i))); 1199 airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX2, 1200 INT_RX2_MASK(RX_IRQ_BANK_PIN_MASK(i))); 1201 airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX3, 1202 INT_RX3_MASK(RX_IRQ_BANK_PIN_MASK(i))); 1203 } 1204 /* setup tx irqs */ 1205 airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0, 1206 TX_COHERENT_LOW_INT_MASK | INT_TX_MASK); 1207 airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX4, 1208 TX_COHERENT_HIGH_INT_MASK); 1209 1210 /* setup irq binding */ 1211 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { 1212 if (!qdma->q_tx[i].ndesc) 1213 continue; 1214 1215 if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i)) 1216 airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(i), 1217 TX_RING_IRQ_BLOCKING_CFG_MASK); 1218 else 1219 airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i), 1220 TX_RING_IRQ_BLOCKING_CFG_MASK); 1221 } 1222 1223 airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG, 1224 FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) | 1225 GLOBAL_CFG_CPU_TXR_RR_MASK | 1226 GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK | 1227 GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK | 1228 GLOBAL_CFG_MULTICAST_EN_MASK | 1229 GLOBAL_CFG_IRQ0_EN_MASK | GLOBAL_CFG_IRQ1_EN_MASK | 1230 GLOBAL_CFG_TX_WB_DONE_MASK | 1231 FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2)); 1232 1233 airoha_qdma_init_qos(qdma); 1234 1235 /* disable qdma rx delay interrupt */ 1236 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 1237 if (!qdma->q_rx[i].ndesc) 1238 continue; 1239 1240 airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i), 1241 RX_DELAY_INT_MASK); 1242 } 1243 1244 airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG, 1245 TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN); 1246 airoha_qdma_init_qos_stats(qdma); 1247 1248 return 0; 1249 } 1250 1251 static irqreturn_t airoha_irq_handler(int irq, void *dev_instance) 1252 { 1253 struct airoha_irq_bank *irq_bank = dev_instance; 1254 struct airoha_qdma *qdma = irq_bank->qdma; 1255 u32 rx_intr_mask = 0, rx_intr1, rx_intr2; 1256 u32 intr[ARRAY_SIZE(irq_bank->irqmask)]; 1257 int i; 1258 1259 for (i = 0; i < ARRAY_SIZE(intr); i++) { 1260 intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i)); 1261 intr[i] &= irq_bank->irqmask[i]; 1262 airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]); 1263 } 1264 1265 if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state)) 1266 return IRQ_NONE; 1267 1268 rx_intr1 = intr[1] & RX_DONE_LOW_INT_MASK; 1269 if (rx_intr1) { 1270 airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX1, rx_intr1); 1271 rx_intr_mask |= rx_intr1; 1272 } 1273 1274 rx_intr2 = intr[2] & RX_DONE_HIGH_INT_MASK; 1275 if (rx_intr2) { 1276 airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX2, rx_intr2); 1277 rx_intr_mask |= (rx_intr2 << 16); 1278 } 1279 1280 for (i = 0; rx_intr_mask && i < ARRAY_SIZE(qdma->q_rx); i++) { 1281 if (!qdma->q_rx[i].ndesc) 1282 continue; 1283 1284 if (rx_intr_mask & BIT(i)) 1285 napi_schedule(&qdma->q_rx[i].napi); 1286 } 1287 1288 if (intr[0] & INT_TX_MASK) { 1289 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { 1290 if (!(intr[0] & TX_DONE_INT_MASK(i))) 1291 continue; 1292 1293 airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX0, 1294 TX_DONE_INT_MASK(i)); 1295 napi_schedule(&qdma->q_tx_irq[i].napi); 1296 } 1297 } 1298 1299 return IRQ_HANDLED; 1300 } 1301 1302 static int airoha_qdma_init_irq_banks(struct platform_device *pdev, 1303 struct airoha_qdma *qdma) 1304 { 1305 struct airoha_eth *eth = qdma->eth; 1306 int i, id = qdma - ð->qdma[0]; 1307 1308 for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) { 1309 struct airoha_irq_bank *irq_bank = &qdma->irq_banks[i]; 1310 int err, irq_index = 4 * id + i; 1311 const char *name; 1312 1313 spin_lock_init(&irq_bank->irq_lock); 1314 irq_bank->qdma = qdma; 1315 1316 irq_bank->irq = platform_get_irq(pdev, irq_index); 1317 if (irq_bank->irq < 0) 1318 return irq_bank->irq; 1319 1320 name = devm_kasprintf(eth->dev, GFP_KERNEL, 1321 KBUILD_MODNAME ".%d", irq_index); 1322 if (!name) 1323 return -ENOMEM; 1324 1325 err = devm_request_irq(eth->dev, irq_bank->irq, 1326 airoha_irq_handler, IRQF_SHARED, name, 1327 irq_bank); 1328 if (err) 1329 return err; 1330 } 1331 1332 return 0; 1333 } 1334 1335 static int airoha_qdma_init(struct platform_device *pdev, 1336 struct airoha_eth *eth, 1337 struct airoha_qdma *qdma) 1338 { 1339 int err, id = qdma - ð->qdma[0]; 1340 const char *res; 1341 1342 qdma->eth = eth; 1343 res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id); 1344 if (!res) 1345 return -ENOMEM; 1346 1347 qdma->regs = devm_platform_ioremap_resource_byname(pdev, res); 1348 if (IS_ERR(qdma->regs)) 1349 return dev_err_probe(eth->dev, PTR_ERR(qdma->regs), 1350 "failed to iomap qdma%d regs\n", id); 1351 1352 err = airoha_qdma_init_irq_banks(pdev, qdma); 1353 if (err) 1354 return err; 1355 1356 err = airoha_qdma_init_rx(qdma); 1357 if (err) 1358 return err; 1359 1360 err = airoha_qdma_init_tx(qdma); 1361 if (err) 1362 return err; 1363 1364 err = airoha_qdma_init_hfwd_queues(qdma); 1365 if (err) 1366 return err; 1367 1368 return airoha_qdma_hw_init(qdma); 1369 } 1370 1371 static int airoha_hw_init(struct platform_device *pdev, 1372 struct airoha_eth *eth) 1373 { 1374 int err, i; 1375 1376 /* disable xsi */ 1377 err = reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts), 1378 eth->xsi_rsts); 1379 if (err) 1380 return err; 1381 1382 err = reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts); 1383 if (err) 1384 return err; 1385 1386 msleep(20); 1387 err = reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts); 1388 if (err) 1389 return err; 1390 1391 msleep(20); 1392 err = airoha_fe_init(eth); 1393 if (err) 1394 return err; 1395 1396 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) { 1397 err = airoha_qdma_init(pdev, eth, ð->qdma[i]); 1398 if (err) 1399 return err; 1400 } 1401 1402 err = airoha_ppe_init(eth); 1403 if (err) 1404 return err; 1405 1406 set_bit(DEV_STATE_INITIALIZED, ð->state); 1407 1408 return 0; 1409 } 1410 1411 static void airoha_hw_cleanup(struct airoha_qdma *qdma) 1412 { 1413 int i; 1414 1415 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 1416 if (!qdma->q_rx[i].ndesc) 1417 continue; 1418 1419 netif_napi_del(&qdma->q_rx[i].napi); 1420 airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]); 1421 if (qdma->q_rx[i].page_pool) 1422 page_pool_destroy(qdma->q_rx[i].page_pool); 1423 } 1424 1425 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) 1426 netif_napi_del(&qdma->q_tx_irq[i].napi); 1427 1428 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { 1429 if (!qdma->q_tx[i].ndesc) 1430 continue; 1431 1432 airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]); 1433 } 1434 } 1435 1436 static void airoha_qdma_start_napi(struct airoha_qdma *qdma) 1437 { 1438 int i; 1439 1440 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) 1441 napi_enable(&qdma->q_tx_irq[i].napi); 1442 1443 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 1444 if (!qdma->q_rx[i].ndesc) 1445 continue; 1446 1447 napi_enable(&qdma->q_rx[i].napi); 1448 } 1449 } 1450 1451 static void airoha_qdma_stop_napi(struct airoha_qdma *qdma) 1452 { 1453 int i; 1454 1455 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) 1456 napi_disable(&qdma->q_tx_irq[i].napi); 1457 1458 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 1459 if (!qdma->q_rx[i].ndesc) 1460 continue; 1461 1462 napi_disable(&qdma->q_rx[i].napi); 1463 } 1464 } 1465 1466 static void airoha_update_hw_stats(struct airoha_gdm_port *port) 1467 { 1468 struct airoha_eth *eth = port->qdma->eth; 1469 u32 val, i = 0; 1470 1471 spin_lock(&port->stats.lock); 1472 u64_stats_update_begin(&port->stats.syncp); 1473 1474 /* TX */ 1475 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_H(port->id)); 1476 port->stats.tx_ok_pkts += ((u64)val << 32); 1477 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_L(port->id)); 1478 port->stats.tx_ok_pkts += val; 1479 1480 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_H(port->id)); 1481 port->stats.tx_ok_bytes += ((u64)val << 32); 1482 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_L(port->id)); 1483 port->stats.tx_ok_bytes += val; 1484 1485 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_DROP_CNT(port->id)); 1486 port->stats.tx_drops += val; 1487 1488 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_BC_CNT(port->id)); 1489 port->stats.tx_broadcast += val; 1490 1491 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_MC_CNT(port->id)); 1492 port->stats.tx_multicast += val; 1493 1494 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_RUNT_CNT(port->id)); 1495 port->stats.tx_len[i] += val; 1496 1497 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_H(port->id)); 1498 port->stats.tx_len[i] += ((u64)val << 32); 1499 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_L(port->id)); 1500 port->stats.tx_len[i++] += val; 1501 1502 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_H(port->id)); 1503 port->stats.tx_len[i] += ((u64)val << 32); 1504 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_L(port->id)); 1505 port->stats.tx_len[i++] += val; 1506 1507 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_H(port->id)); 1508 port->stats.tx_len[i] += ((u64)val << 32); 1509 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_L(port->id)); 1510 port->stats.tx_len[i++] += val; 1511 1512 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_H(port->id)); 1513 port->stats.tx_len[i] += ((u64)val << 32); 1514 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_L(port->id)); 1515 port->stats.tx_len[i++] += val; 1516 1517 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_H(port->id)); 1518 port->stats.tx_len[i] += ((u64)val << 32); 1519 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_L(port->id)); 1520 port->stats.tx_len[i++] += val; 1521 1522 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_H(port->id)); 1523 port->stats.tx_len[i] += ((u64)val << 32); 1524 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_L(port->id)); 1525 port->stats.tx_len[i++] += val; 1526 1527 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_LONG_CNT(port->id)); 1528 port->stats.tx_len[i++] += val; 1529 1530 /* RX */ 1531 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_H(port->id)); 1532 port->stats.rx_ok_pkts += ((u64)val << 32); 1533 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_L(port->id)); 1534 port->stats.rx_ok_pkts += val; 1535 1536 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_H(port->id)); 1537 port->stats.rx_ok_bytes += ((u64)val << 32); 1538 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_L(port->id)); 1539 port->stats.rx_ok_bytes += val; 1540 1541 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_DROP_CNT(port->id)); 1542 port->stats.rx_drops += val; 1543 1544 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_BC_CNT(port->id)); 1545 port->stats.rx_broadcast += val; 1546 1547 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_MC_CNT(port->id)); 1548 port->stats.rx_multicast += val; 1549 1550 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ERROR_DROP_CNT(port->id)); 1551 port->stats.rx_errors += val; 1552 1553 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_CRC_ERR_CNT(port->id)); 1554 port->stats.rx_crc_error += val; 1555 1556 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OVERFLOW_DROP_CNT(port->id)); 1557 port->stats.rx_over_errors += val; 1558 1559 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_FRAG_CNT(port->id)); 1560 port->stats.rx_fragment += val; 1561 1562 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_JABBER_CNT(port->id)); 1563 port->stats.rx_jabber += val; 1564 1565 i = 0; 1566 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_RUNT_CNT(port->id)); 1567 port->stats.rx_len[i] += val; 1568 1569 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_H(port->id)); 1570 port->stats.rx_len[i] += ((u64)val << 32); 1571 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_L(port->id)); 1572 port->stats.rx_len[i++] += val; 1573 1574 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_H(port->id)); 1575 port->stats.rx_len[i] += ((u64)val << 32); 1576 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_L(port->id)); 1577 port->stats.rx_len[i++] += val; 1578 1579 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_H(port->id)); 1580 port->stats.rx_len[i] += ((u64)val << 32); 1581 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_L(port->id)); 1582 port->stats.rx_len[i++] += val; 1583 1584 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_H(port->id)); 1585 port->stats.rx_len[i] += ((u64)val << 32); 1586 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_L(port->id)); 1587 port->stats.rx_len[i++] += val; 1588 1589 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_H(port->id)); 1590 port->stats.rx_len[i] += ((u64)val << 32); 1591 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_L(port->id)); 1592 port->stats.rx_len[i++] += val; 1593 1594 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_H(port->id)); 1595 port->stats.rx_len[i] += ((u64)val << 32); 1596 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_L(port->id)); 1597 port->stats.rx_len[i++] += val; 1598 1599 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_LONG_CNT(port->id)); 1600 port->stats.rx_len[i++] += val; 1601 1602 /* reset mib counters */ 1603 airoha_fe_set(eth, REG_FE_GDM_MIB_CLEAR(port->id), 1604 FE_GDM_MIB_RX_CLEAR_MASK | FE_GDM_MIB_TX_CLEAR_MASK); 1605 1606 u64_stats_update_end(&port->stats.syncp); 1607 spin_unlock(&port->stats.lock); 1608 } 1609 1610 static int airoha_dev_open(struct net_device *dev) 1611 { 1612 int err, len = ETH_HLEN + dev->mtu + ETH_FCS_LEN; 1613 struct airoha_gdm_port *port = netdev_priv(dev); 1614 struct airoha_qdma *qdma = port->qdma; 1615 1616 netif_tx_start_all_queues(dev); 1617 err = airoha_set_vip_for_gdm_port(port, true); 1618 if (err) 1619 return err; 1620 1621 if (netdev_uses_dsa(dev)) 1622 airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id), 1623 GDM_STAG_EN_MASK); 1624 else 1625 airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id), 1626 GDM_STAG_EN_MASK); 1627 1628 airoha_fe_rmw(qdma->eth, REG_GDM_LEN_CFG(port->id), 1629 GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK, 1630 FIELD_PREP(GDM_SHORT_LEN_MASK, 60) | 1631 FIELD_PREP(GDM_LONG_LEN_MASK, len)); 1632 1633 airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG, 1634 GLOBAL_CFG_TX_DMA_EN_MASK | 1635 GLOBAL_CFG_RX_DMA_EN_MASK); 1636 atomic_inc(&qdma->users); 1637 1638 return 0; 1639 } 1640 1641 static int airoha_dev_stop(struct net_device *dev) 1642 { 1643 struct airoha_gdm_port *port = netdev_priv(dev); 1644 struct airoha_qdma *qdma = port->qdma; 1645 int i, err; 1646 1647 netif_tx_disable(dev); 1648 err = airoha_set_vip_for_gdm_port(port, false); 1649 if (err) 1650 return err; 1651 1652 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) 1653 netdev_tx_reset_subqueue(dev, i); 1654 1655 if (atomic_dec_and_test(&qdma->users)) { 1656 airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG, 1657 GLOBAL_CFG_TX_DMA_EN_MASK | 1658 GLOBAL_CFG_RX_DMA_EN_MASK); 1659 1660 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { 1661 if (!qdma->q_tx[i].ndesc) 1662 continue; 1663 1664 airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]); 1665 } 1666 } 1667 1668 return 0; 1669 } 1670 1671 static int airoha_dev_set_macaddr(struct net_device *dev, void *p) 1672 { 1673 struct airoha_gdm_port *port = netdev_priv(dev); 1674 int err; 1675 1676 err = eth_mac_addr(dev, p); 1677 if (err) 1678 return err; 1679 1680 airoha_set_macaddr(port, dev->dev_addr); 1681 1682 return 0; 1683 } 1684 1685 static void airhoha_set_gdm2_loopback(struct airoha_gdm_port *port) 1686 { 1687 u32 pse_port = port->id == 3 ? FE_PSE_PORT_GDM3 : FE_PSE_PORT_GDM4; 1688 struct airoha_eth *eth = port->qdma->eth; 1689 u32 chan = port->id == 3 ? 4 : 0; 1690 1691 /* Forward the traffic to the proper GDM port */ 1692 airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(2), pse_port); 1693 airoha_fe_clear(eth, REG_GDM_FWD_CFG(2), GDM_STRIP_CRC); 1694 1695 /* Enable GDM2 loopback */ 1696 airoha_fe_wr(eth, REG_GDM_TXCHN_EN(2), 0xffffffff); 1697 airoha_fe_wr(eth, REG_GDM_RXCHN_EN(2), 0xffff); 1698 airoha_fe_rmw(eth, REG_GDM_LPBK_CFG(2), 1699 LPBK_CHAN_MASK | LPBK_MODE_MASK | LPBK_EN_MASK, 1700 FIELD_PREP(LPBK_CHAN_MASK, chan) | LPBK_EN_MASK); 1701 airoha_fe_rmw(eth, REG_GDM_LEN_CFG(2), 1702 GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK, 1703 FIELD_PREP(GDM_SHORT_LEN_MASK, 60) | 1704 FIELD_PREP(GDM_LONG_LEN_MASK, AIROHA_MAX_MTU)); 1705 1706 /* Disable VIP and IFC for GDM2 */ 1707 airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, BIT(2)); 1708 airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, BIT(2)); 1709 1710 if (port->id == 3) { 1711 /* FIXME: handle XSI_PCE1_PORT */ 1712 airoha_fe_rmw(eth, REG_FE_WAN_PORT, 1713 WAN1_EN_MASK | WAN1_MASK | WAN0_MASK, 1714 FIELD_PREP(WAN0_MASK, HSGMII_LAN_PCIE0_SRCPORT)); 1715 airoha_fe_rmw(eth, 1716 REG_SP_DFT_CPORT(HSGMII_LAN_PCIE0_SRCPORT >> 3), 1717 SP_CPORT_PCIE0_MASK, 1718 FIELD_PREP(SP_CPORT_PCIE0_MASK, 1719 FE_PSE_PORT_CDM2)); 1720 } else { 1721 /* FIXME: handle XSI_USB_PORT */ 1722 airoha_fe_rmw(eth, REG_SRC_PORT_FC_MAP6, 1723 FC_ID_OF_SRC_PORT24_MASK, 1724 FIELD_PREP(FC_ID_OF_SRC_PORT24_MASK, 2)); 1725 airoha_fe_rmw(eth, REG_FE_WAN_PORT, 1726 WAN1_EN_MASK | WAN1_MASK | WAN0_MASK, 1727 FIELD_PREP(WAN0_MASK, HSGMII_LAN_ETH_SRCPORT)); 1728 airoha_fe_rmw(eth, 1729 REG_SP_DFT_CPORT(HSGMII_LAN_ETH_SRCPORT >> 3), 1730 SP_CPORT_ETH_MASK, 1731 FIELD_PREP(SP_CPORT_ETH_MASK, FE_PSE_PORT_CDM2)); 1732 } 1733 } 1734 1735 static int airoha_dev_init(struct net_device *dev) 1736 { 1737 struct airoha_gdm_port *port = netdev_priv(dev); 1738 struct airoha_eth *eth = port->qdma->eth; 1739 u32 pse_port; 1740 1741 airoha_set_macaddr(port, dev->dev_addr); 1742 1743 switch (port->id) { 1744 case 3: 1745 case 4: 1746 /* If GDM2 is active we can't enable loopback */ 1747 if (!eth->ports[1]) 1748 airhoha_set_gdm2_loopback(port); 1749 fallthrough; 1750 case 2: 1751 pse_port = FE_PSE_PORT_PPE2; 1752 break; 1753 default: 1754 pse_port = FE_PSE_PORT_PPE1; 1755 break; 1756 } 1757 1758 airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(port->id), pse_port); 1759 1760 return 0; 1761 } 1762 1763 static void airoha_dev_get_stats64(struct net_device *dev, 1764 struct rtnl_link_stats64 *storage) 1765 { 1766 struct airoha_gdm_port *port = netdev_priv(dev); 1767 unsigned int start; 1768 1769 airoha_update_hw_stats(port); 1770 do { 1771 start = u64_stats_fetch_begin(&port->stats.syncp); 1772 storage->rx_packets = port->stats.rx_ok_pkts; 1773 storage->tx_packets = port->stats.tx_ok_pkts; 1774 storage->rx_bytes = port->stats.rx_ok_bytes; 1775 storage->tx_bytes = port->stats.tx_ok_bytes; 1776 storage->multicast = port->stats.rx_multicast; 1777 storage->rx_errors = port->stats.rx_errors; 1778 storage->rx_dropped = port->stats.rx_drops; 1779 storage->tx_dropped = port->stats.tx_drops; 1780 storage->rx_crc_errors = port->stats.rx_crc_error; 1781 storage->rx_over_errors = port->stats.rx_over_errors; 1782 } while (u64_stats_fetch_retry(&port->stats.syncp, start)); 1783 } 1784 1785 static int airoha_dev_change_mtu(struct net_device *dev, int mtu) 1786 { 1787 struct airoha_gdm_port *port = netdev_priv(dev); 1788 struct airoha_eth *eth = port->qdma->eth; 1789 u32 len = ETH_HLEN + mtu + ETH_FCS_LEN; 1790 1791 airoha_fe_rmw(eth, REG_GDM_LEN_CFG(port->id), 1792 GDM_LONG_LEN_MASK, 1793 FIELD_PREP(GDM_LONG_LEN_MASK, len)); 1794 WRITE_ONCE(dev->mtu, mtu); 1795 1796 return 0; 1797 } 1798 1799 static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb, 1800 struct net_device *sb_dev) 1801 { 1802 struct airoha_gdm_port *port = netdev_priv(dev); 1803 int queue, channel; 1804 1805 /* For dsa device select QoS channel according to the dsa user port 1806 * index, rely on port id otherwise. Select QoS queue based on the 1807 * skb priority. 1808 */ 1809 channel = netdev_uses_dsa(dev) ? skb_get_queue_mapping(skb) : port->id; 1810 channel = channel % AIROHA_NUM_QOS_CHANNELS; 1811 queue = (skb->priority - 1) % AIROHA_NUM_QOS_QUEUES; /* QoS queue */ 1812 queue = channel * AIROHA_NUM_QOS_QUEUES + queue; 1813 1814 return queue < dev->num_tx_queues ? queue : 0; 1815 } 1816 1817 static u32 airoha_get_dsa_tag(struct sk_buff *skb, struct net_device *dev) 1818 { 1819 #if IS_ENABLED(CONFIG_NET_DSA) 1820 struct ethhdr *ehdr; 1821 u8 xmit_tpid; 1822 u16 tag; 1823 1824 if (!netdev_uses_dsa(dev)) 1825 return 0; 1826 1827 if (dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK) 1828 return 0; 1829 1830 if (skb_cow_head(skb, 0)) 1831 return 0; 1832 1833 ehdr = (struct ethhdr *)skb->data; 1834 tag = be16_to_cpu(ehdr->h_proto); 1835 xmit_tpid = tag >> 8; 1836 1837 switch (xmit_tpid) { 1838 case MTK_HDR_XMIT_TAGGED_TPID_8100: 1839 ehdr->h_proto = cpu_to_be16(ETH_P_8021Q); 1840 tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_8100 << 8); 1841 break; 1842 case MTK_HDR_XMIT_TAGGED_TPID_88A8: 1843 ehdr->h_proto = cpu_to_be16(ETH_P_8021AD); 1844 tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_88A8 << 8); 1845 break; 1846 default: 1847 /* PPE module requires untagged DSA packets to work properly, 1848 * so move DSA tag to DMA descriptor. 1849 */ 1850 memmove(skb->data + MTK_HDR_LEN, skb->data, 2 * ETH_ALEN); 1851 __skb_pull(skb, MTK_HDR_LEN); 1852 break; 1853 } 1854 1855 return tag; 1856 #else 1857 return 0; 1858 #endif 1859 } 1860 1861 static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, 1862 struct net_device *dev) 1863 { 1864 struct airoha_gdm_port *port = netdev_priv(dev); 1865 struct airoha_qdma *qdma = port->qdma; 1866 u32 nr_frags, tag, msg0, msg1, len; 1867 struct netdev_queue *txq; 1868 struct airoha_queue *q; 1869 void *data; 1870 int i, qid; 1871 u16 index; 1872 u8 fport; 1873 1874 qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx); 1875 tag = airoha_get_dsa_tag(skb, dev); 1876 1877 msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK, 1878 qid / AIROHA_NUM_QOS_QUEUES) | 1879 FIELD_PREP(QDMA_ETH_TXMSG_QUEUE_MASK, 1880 qid % AIROHA_NUM_QOS_QUEUES) | 1881 FIELD_PREP(QDMA_ETH_TXMSG_SP_TAG_MASK, tag); 1882 if (skb->ip_summed == CHECKSUM_PARTIAL) 1883 msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) | 1884 FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) | 1885 FIELD_PREP(QDMA_ETH_TXMSG_ICO_MASK, 1); 1886 1887 /* TSO: fill MSS info in tcp checksum field */ 1888 if (skb_is_gso(skb)) { 1889 if (skb_cow_head(skb, 0)) 1890 goto error; 1891 1892 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | 1893 SKB_GSO_TCPV6)) { 1894 __be16 csum = cpu_to_be16(skb_shinfo(skb)->gso_size); 1895 1896 tcp_hdr(skb)->check = (__force __sum16)csum; 1897 msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1); 1898 } 1899 } 1900 1901 fport = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id; 1902 msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) | 1903 FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f); 1904 1905 q = &qdma->q_tx[qid]; 1906 if (WARN_ON_ONCE(!q->ndesc)) 1907 goto error; 1908 1909 spin_lock_bh(&q->lock); 1910 1911 txq = netdev_get_tx_queue(dev, qid); 1912 nr_frags = 1 + skb_shinfo(skb)->nr_frags; 1913 1914 if (q->queued + nr_frags > q->ndesc) { 1915 /* not enough space in the queue */ 1916 netif_tx_stop_queue(txq); 1917 spin_unlock_bh(&q->lock); 1918 return NETDEV_TX_BUSY; 1919 } 1920 1921 len = skb_headlen(skb); 1922 data = skb->data; 1923 index = q->head; 1924 1925 for (i = 0; i < nr_frags; i++) { 1926 struct airoha_qdma_desc *desc = &q->desc[index]; 1927 struct airoha_queue_entry *e = &q->entry[index]; 1928 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1929 dma_addr_t addr; 1930 u32 val; 1931 1932 addr = dma_map_single(dev->dev.parent, data, len, 1933 DMA_TO_DEVICE); 1934 if (unlikely(dma_mapping_error(dev->dev.parent, addr))) 1935 goto error_unmap; 1936 1937 index = (index + 1) % q->ndesc; 1938 1939 val = FIELD_PREP(QDMA_DESC_LEN_MASK, len); 1940 if (i < nr_frags - 1) 1941 val |= FIELD_PREP(QDMA_DESC_MORE_MASK, 1); 1942 WRITE_ONCE(desc->ctrl, cpu_to_le32(val)); 1943 WRITE_ONCE(desc->addr, cpu_to_le32(addr)); 1944 val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, index); 1945 WRITE_ONCE(desc->data, cpu_to_le32(val)); 1946 WRITE_ONCE(desc->msg0, cpu_to_le32(msg0)); 1947 WRITE_ONCE(desc->msg1, cpu_to_le32(msg1)); 1948 WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff)); 1949 1950 e->skb = i ? NULL : skb; 1951 e->dma_addr = addr; 1952 e->dma_len = len; 1953 1954 data = skb_frag_address(frag); 1955 len = skb_frag_size(frag); 1956 } 1957 1958 q->head = index; 1959 q->queued += i; 1960 1961 skb_tx_timestamp(skb); 1962 netdev_tx_sent_queue(txq, skb->len); 1963 1964 if (netif_xmit_stopped(txq) || !netdev_xmit_more()) 1965 airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), 1966 TX_RING_CPU_IDX_MASK, 1967 FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head)); 1968 1969 if (q->ndesc - q->queued < q->free_thr) 1970 netif_tx_stop_queue(txq); 1971 1972 spin_unlock_bh(&q->lock); 1973 1974 return NETDEV_TX_OK; 1975 1976 error_unmap: 1977 for (i--; i >= 0; i--) { 1978 index = (q->head + i) % q->ndesc; 1979 dma_unmap_single(dev->dev.parent, q->entry[index].dma_addr, 1980 q->entry[index].dma_len, DMA_TO_DEVICE); 1981 } 1982 1983 spin_unlock_bh(&q->lock); 1984 error: 1985 dev_kfree_skb_any(skb); 1986 dev->stats.tx_dropped++; 1987 1988 return NETDEV_TX_OK; 1989 } 1990 1991 static void airoha_ethtool_get_drvinfo(struct net_device *dev, 1992 struct ethtool_drvinfo *info) 1993 { 1994 struct airoha_gdm_port *port = netdev_priv(dev); 1995 struct airoha_eth *eth = port->qdma->eth; 1996 1997 strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver)); 1998 strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info)); 1999 } 2000 2001 static void airoha_ethtool_get_mac_stats(struct net_device *dev, 2002 struct ethtool_eth_mac_stats *stats) 2003 { 2004 struct airoha_gdm_port *port = netdev_priv(dev); 2005 unsigned int start; 2006 2007 airoha_update_hw_stats(port); 2008 do { 2009 start = u64_stats_fetch_begin(&port->stats.syncp); 2010 stats->MulticastFramesXmittedOK = port->stats.tx_multicast; 2011 stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast; 2012 stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast; 2013 } while (u64_stats_fetch_retry(&port->stats.syncp, start)); 2014 } 2015 2016 static const struct ethtool_rmon_hist_range airoha_ethtool_rmon_ranges[] = { 2017 { 0, 64 }, 2018 { 65, 127 }, 2019 { 128, 255 }, 2020 { 256, 511 }, 2021 { 512, 1023 }, 2022 { 1024, 1518 }, 2023 { 1519, 10239 }, 2024 {}, 2025 }; 2026 2027 static void 2028 airoha_ethtool_get_rmon_stats(struct net_device *dev, 2029 struct ethtool_rmon_stats *stats, 2030 const struct ethtool_rmon_hist_range **ranges) 2031 { 2032 struct airoha_gdm_port *port = netdev_priv(dev); 2033 struct airoha_hw_stats *hw_stats = &port->stats; 2034 unsigned int start; 2035 2036 BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) != 2037 ARRAY_SIZE(hw_stats->tx_len) + 1); 2038 BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) != 2039 ARRAY_SIZE(hw_stats->rx_len) + 1); 2040 2041 *ranges = airoha_ethtool_rmon_ranges; 2042 airoha_update_hw_stats(port); 2043 do { 2044 int i; 2045 2046 start = u64_stats_fetch_begin(&port->stats.syncp); 2047 stats->fragments = hw_stats->rx_fragment; 2048 stats->jabbers = hw_stats->rx_jabber; 2049 for (i = 0; i < ARRAY_SIZE(airoha_ethtool_rmon_ranges) - 1; 2050 i++) { 2051 stats->hist[i] = hw_stats->rx_len[i]; 2052 stats->hist_tx[i] = hw_stats->tx_len[i]; 2053 } 2054 } while (u64_stats_fetch_retry(&port->stats.syncp, start)); 2055 } 2056 2057 static int airoha_qdma_set_chan_tx_sched(struct airoha_gdm_port *port, 2058 int channel, enum tx_sched_mode mode, 2059 const u16 *weights, u8 n_weights) 2060 { 2061 int i; 2062 2063 for (i = 0; i < AIROHA_NUM_TX_RING; i++) 2064 airoha_qdma_clear(port->qdma, REG_QUEUE_CLOSE_CFG(channel), 2065 TXQ_DISABLE_CHAN_QUEUE_MASK(channel, i)); 2066 2067 for (i = 0; i < n_weights; i++) { 2068 u32 status; 2069 int err; 2070 2071 airoha_qdma_wr(port->qdma, REG_TXWRR_WEIGHT_CFG, 2072 TWRR_RW_CMD_MASK | 2073 FIELD_PREP(TWRR_CHAN_IDX_MASK, channel) | 2074 FIELD_PREP(TWRR_QUEUE_IDX_MASK, i) | 2075 FIELD_PREP(TWRR_VALUE_MASK, weights[i])); 2076 err = read_poll_timeout(airoha_qdma_rr, status, 2077 status & TWRR_RW_CMD_DONE, 2078 USEC_PER_MSEC, 10 * USEC_PER_MSEC, 2079 true, port->qdma, 2080 REG_TXWRR_WEIGHT_CFG); 2081 if (err) 2082 return err; 2083 } 2084 2085 airoha_qdma_rmw(port->qdma, REG_CHAN_QOS_MODE(channel >> 3), 2086 CHAN_QOS_MODE_MASK(channel), 2087 mode << __ffs(CHAN_QOS_MODE_MASK(channel))); 2088 2089 return 0; 2090 } 2091 2092 static int airoha_qdma_set_tx_prio_sched(struct airoha_gdm_port *port, 2093 int channel) 2094 { 2095 static const u16 w[AIROHA_NUM_QOS_QUEUES] = {}; 2096 2097 return airoha_qdma_set_chan_tx_sched(port, channel, TC_SCH_SP, w, 2098 ARRAY_SIZE(w)); 2099 } 2100 2101 static int airoha_qdma_set_tx_ets_sched(struct airoha_gdm_port *port, 2102 int channel, 2103 struct tc_ets_qopt_offload *opt) 2104 { 2105 struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params; 2106 enum tx_sched_mode mode = TC_SCH_SP; 2107 u16 w[AIROHA_NUM_QOS_QUEUES] = {}; 2108 int i, nstrict = 0; 2109 2110 if (p->bands > AIROHA_NUM_QOS_QUEUES) 2111 return -EINVAL; 2112 2113 for (i = 0; i < p->bands; i++) { 2114 if (!p->quanta[i]) 2115 nstrict++; 2116 } 2117 2118 /* this configuration is not supported by the hw */ 2119 if (nstrict == AIROHA_NUM_QOS_QUEUES - 1) 2120 return -EINVAL; 2121 2122 /* EN7581 SoC supports fixed QoS band priority where WRR queues have 2123 * lowest priorities with respect to SP ones. 2124 * e.g: WRR0, WRR1, .., WRRm, SP0, SP1, .., SPn 2125 */ 2126 for (i = 0; i < nstrict; i++) { 2127 if (p->priomap[p->bands - i - 1] != i) 2128 return -EINVAL; 2129 } 2130 2131 for (i = 0; i < p->bands - nstrict; i++) { 2132 if (p->priomap[i] != nstrict + i) 2133 return -EINVAL; 2134 2135 w[i] = p->weights[nstrict + i]; 2136 } 2137 2138 if (!nstrict) 2139 mode = TC_SCH_WRR8; 2140 else if (nstrict < AIROHA_NUM_QOS_QUEUES - 1) 2141 mode = nstrict + 1; 2142 2143 return airoha_qdma_set_chan_tx_sched(port, channel, mode, w, 2144 ARRAY_SIZE(w)); 2145 } 2146 2147 static int airoha_qdma_get_tx_ets_stats(struct airoha_gdm_port *port, 2148 int channel, 2149 struct tc_ets_qopt_offload *opt) 2150 { 2151 u64 cpu_tx_packets = airoha_qdma_rr(port->qdma, 2152 REG_CNTR_VAL(channel << 1)); 2153 u64 fwd_tx_packets = airoha_qdma_rr(port->qdma, 2154 REG_CNTR_VAL((channel << 1) + 1)); 2155 u64 tx_packets = (cpu_tx_packets - port->cpu_tx_packets) + 2156 (fwd_tx_packets - port->fwd_tx_packets); 2157 _bstats_update(opt->stats.bstats, 0, tx_packets); 2158 2159 port->cpu_tx_packets = cpu_tx_packets; 2160 port->fwd_tx_packets = fwd_tx_packets; 2161 2162 return 0; 2163 } 2164 2165 static int airoha_tc_setup_qdisc_ets(struct airoha_gdm_port *port, 2166 struct tc_ets_qopt_offload *opt) 2167 { 2168 int channel; 2169 2170 if (opt->parent == TC_H_ROOT) 2171 return -EINVAL; 2172 2173 channel = TC_H_MAJ(opt->handle) >> 16; 2174 channel = channel % AIROHA_NUM_QOS_CHANNELS; 2175 2176 switch (opt->command) { 2177 case TC_ETS_REPLACE: 2178 return airoha_qdma_set_tx_ets_sched(port, channel, opt); 2179 case TC_ETS_DESTROY: 2180 /* PRIO is default qdisc scheduler */ 2181 return airoha_qdma_set_tx_prio_sched(port, channel); 2182 case TC_ETS_STATS: 2183 return airoha_qdma_get_tx_ets_stats(port, channel, opt); 2184 default: 2185 return -EOPNOTSUPP; 2186 } 2187 } 2188 2189 static int airoha_qdma_get_rl_param(struct airoha_qdma *qdma, int queue_id, 2190 u32 addr, enum trtcm_param_type param, 2191 u32 *val_low, u32 *val_high) 2192 { 2193 u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id); 2194 u32 val, config = FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) | 2195 FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) | 2196 FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx); 2197 2198 airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config); 2199 if (read_poll_timeout(airoha_qdma_rr, val, 2200 val & RATE_LIMIT_PARAM_RW_DONE_MASK, 2201 USEC_PER_MSEC, 10 * USEC_PER_MSEC, true, qdma, 2202 REG_TRTCM_CFG_PARAM(addr))) 2203 return -ETIMEDOUT; 2204 2205 *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr)); 2206 if (val_high) 2207 *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr)); 2208 2209 return 0; 2210 } 2211 2212 static int airoha_qdma_set_rl_param(struct airoha_qdma *qdma, int queue_id, 2213 u32 addr, enum trtcm_param_type param, 2214 u32 val) 2215 { 2216 u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id); 2217 u32 config = RATE_LIMIT_PARAM_RW_MASK | 2218 FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) | 2219 FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) | 2220 FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx); 2221 2222 airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val); 2223 airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config); 2224 2225 return read_poll_timeout(airoha_qdma_rr, val, 2226 val & RATE_LIMIT_PARAM_RW_DONE_MASK, 2227 USEC_PER_MSEC, 10 * USEC_PER_MSEC, true, 2228 qdma, REG_TRTCM_CFG_PARAM(addr)); 2229 } 2230 2231 static int airoha_qdma_set_rl_config(struct airoha_qdma *qdma, int queue_id, 2232 u32 addr, bool enable, u32 enable_mask) 2233 { 2234 u32 val; 2235 int err; 2236 2237 err = airoha_qdma_get_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE, 2238 &val, NULL); 2239 if (err) 2240 return err; 2241 2242 val = enable ? val | enable_mask : val & ~enable_mask; 2243 2244 return airoha_qdma_set_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE, 2245 val); 2246 } 2247 2248 static int airoha_qdma_set_rl_token_bucket(struct airoha_qdma *qdma, 2249 int queue_id, u32 rate_val, 2250 u32 bucket_size) 2251 { 2252 u32 val, config, tick, unit, rate, rate_frac; 2253 int err; 2254 2255 err = airoha_qdma_get_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG, 2256 TRTCM_MISC_MODE, &config, NULL); 2257 if (err) 2258 return err; 2259 2260 val = airoha_qdma_rr(qdma, REG_INGRESS_TRTCM_CFG); 2261 tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val); 2262 if (config & TRTCM_TICK_SEL) 2263 tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val); 2264 if (!tick) 2265 return -EINVAL; 2266 2267 unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick; 2268 if (!unit) 2269 return -EINVAL; 2270 2271 rate = rate_val / unit; 2272 rate_frac = rate_val % unit; 2273 rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit; 2274 rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) | 2275 FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac); 2276 2277 err = airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG, 2278 TRTCM_TOKEN_RATE_MODE, rate); 2279 if (err) 2280 return err; 2281 2282 val = bucket_size; 2283 if (!(config & TRTCM_PKT_MODE)) 2284 val = max_t(u32, val, MIN_TOKEN_SIZE); 2285 val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET); 2286 2287 return airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG, 2288 TRTCM_BUCKETSIZE_SHIFT_MODE, val); 2289 } 2290 2291 static int airoha_qdma_init_rl_config(struct airoha_qdma *qdma, int queue_id, 2292 bool enable, enum trtcm_unit_type unit) 2293 { 2294 bool tick_sel = queue_id == 0 || queue_id == 2 || queue_id == 8; 2295 enum trtcm_param mode = TRTCM_METER_MODE; 2296 int err; 2297 2298 mode |= unit == TRTCM_PACKET_UNIT ? TRTCM_PKT_MODE : 0; 2299 err = airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG, 2300 enable, mode); 2301 if (err) 2302 return err; 2303 2304 return airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG, 2305 tick_sel, TRTCM_TICK_SEL); 2306 } 2307 2308 static int airoha_qdma_get_trtcm_param(struct airoha_qdma *qdma, int channel, 2309 u32 addr, enum trtcm_param_type param, 2310 enum trtcm_mode_type mode, 2311 u32 *val_low, u32 *val_high) 2312 { 2313 u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel); 2314 u32 val, config = FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) | 2315 FIELD_PREP(TRTCM_METER_GROUP_MASK, group) | 2316 FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) | 2317 FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode); 2318 2319 airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config); 2320 if (read_poll_timeout(airoha_qdma_rr, val, 2321 val & TRTCM_PARAM_RW_DONE_MASK, 2322 USEC_PER_MSEC, 10 * USEC_PER_MSEC, true, 2323 qdma, REG_TRTCM_CFG_PARAM(addr))) 2324 return -ETIMEDOUT; 2325 2326 *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr)); 2327 if (val_high) 2328 *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr)); 2329 2330 return 0; 2331 } 2332 2333 static int airoha_qdma_set_trtcm_param(struct airoha_qdma *qdma, int channel, 2334 u32 addr, enum trtcm_param_type param, 2335 enum trtcm_mode_type mode, u32 val) 2336 { 2337 u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel); 2338 u32 config = TRTCM_PARAM_RW_MASK | 2339 FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) | 2340 FIELD_PREP(TRTCM_METER_GROUP_MASK, group) | 2341 FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) | 2342 FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode); 2343 2344 airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val); 2345 airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config); 2346 2347 return read_poll_timeout(airoha_qdma_rr, val, 2348 val & TRTCM_PARAM_RW_DONE_MASK, 2349 USEC_PER_MSEC, 10 * USEC_PER_MSEC, true, 2350 qdma, REG_TRTCM_CFG_PARAM(addr)); 2351 } 2352 2353 static int airoha_qdma_set_trtcm_config(struct airoha_qdma *qdma, int channel, 2354 u32 addr, enum trtcm_mode_type mode, 2355 bool enable, u32 enable_mask) 2356 { 2357 u32 val; 2358 2359 if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE, 2360 mode, &val, NULL)) 2361 return -EINVAL; 2362 2363 val = enable ? val | enable_mask : val & ~enable_mask; 2364 2365 return airoha_qdma_set_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE, 2366 mode, val); 2367 } 2368 2369 static int airoha_qdma_set_trtcm_token_bucket(struct airoha_qdma *qdma, 2370 int channel, u32 addr, 2371 enum trtcm_mode_type mode, 2372 u32 rate_val, u32 bucket_size) 2373 { 2374 u32 val, config, tick, unit, rate, rate_frac; 2375 int err; 2376 2377 if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE, 2378 mode, &config, NULL)) 2379 return -EINVAL; 2380 2381 val = airoha_qdma_rr(qdma, addr); 2382 tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val); 2383 if (config & TRTCM_TICK_SEL) 2384 tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val); 2385 if (!tick) 2386 return -EINVAL; 2387 2388 unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick; 2389 if (!unit) 2390 return -EINVAL; 2391 2392 rate = rate_val / unit; 2393 rate_frac = rate_val % unit; 2394 rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit; 2395 rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) | 2396 FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac); 2397 2398 err = airoha_qdma_set_trtcm_param(qdma, channel, addr, 2399 TRTCM_TOKEN_RATE_MODE, mode, rate); 2400 if (err) 2401 return err; 2402 2403 val = max_t(u32, bucket_size, MIN_TOKEN_SIZE); 2404 val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET); 2405 2406 return airoha_qdma_set_trtcm_param(qdma, channel, addr, 2407 TRTCM_BUCKETSIZE_SHIFT_MODE, 2408 mode, val); 2409 } 2410 2411 static int airoha_qdma_set_tx_rate_limit(struct airoha_gdm_port *port, 2412 int channel, u32 rate, 2413 u32 bucket_size) 2414 { 2415 int i, err; 2416 2417 for (i = 0; i <= TRTCM_PEAK_MODE; i++) { 2418 err = airoha_qdma_set_trtcm_config(port->qdma, channel, 2419 REG_EGRESS_TRTCM_CFG, i, 2420 !!rate, TRTCM_METER_MODE); 2421 if (err) 2422 return err; 2423 2424 err = airoha_qdma_set_trtcm_token_bucket(port->qdma, channel, 2425 REG_EGRESS_TRTCM_CFG, 2426 i, rate, bucket_size); 2427 if (err) 2428 return err; 2429 } 2430 2431 return 0; 2432 } 2433 2434 static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port, 2435 struct tc_htb_qopt_offload *opt) 2436 { 2437 u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS; 2438 u32 rate = div_u64(opt->rate, 1000) << 3; /* kbps */ 2439 struct net_device *dev = port->dev; 2440 int num_tx_queues = dev->real_num_tx_queues; 2441 int err; 2442 2443 if (opt->parent_classid != TC_HTB_CLASSID_ROOT) { 2444 NL_SET_ERR_MSG_MOD(opt->extack, "invalid parent classid"); 2445 return -EINVAL; 2446 } 2447 2448 err = airoha_qdma_set_tx_rate_limit(port, channel, rate, opt->quantum); 2449 if (err) { 2450 NL_SET_ERR_MSG_MOD(opt->extack, 2451 "failed configuring htb offload"); 2452 return err; 2453 } 2454 2455 if (opt->command == TC_HTB_NODE_MODIFY) 2456 return 0; 2457 2458 err = netif_set_real_num_tx_queues(dev, num_tx_queues + 1); 2459 if (err) { 2460 airoha_qdma_set_tx_rate_limit(port, channel, 0, opt->quantum); 2461 NL_SET_ERR_MSG_MOD(opt->extack, 2462 "failed setting real_num_tx_queues"); 2463 return err; 2464 } 2465 2466 set_bit(channel, port->qos_sq_bmap); 2467 opt->qid = AIROHA_NUM_TX_RING + channel; 2468 2469 return 0; 2470 } 2471 2472 static int airoha_qdma_set_rx_meter(struct airoha_gdm_port *port, 2473 u32 rate, u32 bucket_size, 2474 enum trtcm_unit_type unit_type) 2475 { 2476 struct airoha_qdma *qdma = port->qdma; 2477 int i; 2478 2479 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 2480 int err; 2481 2482 if (!qdma->q_rx[i].ndesc) 2483 continue; 2484 2485 err = airoha_qdma_init_rl_config(qdma, i, !!rate, unit_type); 2486 if (err) 2487 return err; 2488 2489 err = airoha_qdma_set_rl_token_bucket(qdma, i, rate, 2490 bucket_size); 2491 if (err) 2492 return err; 2493 } 2494 2495 return 0; 2496 } 2497 2498 static int airoha_tc_matchall_act_validate(struct tc_cls_matchall_offload *f) 2499 { 2500 const struct flow_action *actions = &f->rule->action; 2501 const struct flow_action_entry *act; 2502 2503 if (!flow_action_has_entries(actions)) { 2504 NL_SET_ERR_MSG_MOD(f->common.extack, 2505 "filter run with no actions"); 2506 return -EINVAL; 2507 } 2508 2509 if (!flow_offload_has_one_action(actions)) { 2510 NL_SET_ERR_MSG_MOD(f->common.extack, 2511 "only once action per filter is supported"); 2512 return -EOPNOTSUPP; 2513 } 2514 2515 act = &actions->entries[0]; 2516 if (act->id != FLOW_ACTION_POLICE) { 2517 NL_SET_ERR_MSG_MOD(f->common.extack, "unsupported action"); 2518 return -EOPNOTSUPP; 2519 } 2520 2521 if (act->police.exceed.act_id != FLOW_ACTION_DROP) { 2522 NL_SET_ERR_MSG_MOD(f->common.extack, 2523 "invalid exceed action id"); 2524 return -EOPNOTSUPP; 2525 } 2526 2527 if (act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { 2528 NL_SET_ERR_MSG_MOD(f->common.extack, 2529 "invalid notexceed action id"); 2530 return -EOPNOTSUPP; 2531 } 2532 2533 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && 2534 !flow_action_is_last_entry(actions, act)) { 2535 NL_SET_ERR_MSG_MOD(f->common.extack, 2536 "action accept must be last"); 2537 return -EOPNOTSUPP; 2538 } 2539 2540 if (act->police.peakrate_bytes_ps || act->police.avrate || 2541 act->police.overhead || act->police.mtu) { 2542 NL_SET_ERR_MSG_MOD(f->common.extack, 2543 "peakrate/avrate/overhead/mtu unsupported"); 2544 return -EOPNOTSUPP; 2545 } 2546 2547 return 0; 2548 } 2549 2550 static int airoha_dev_tc_matchall(struct net_device *dev, 2551 struct tc_cls_matchall_offload *f) 2552 { 2553 enum trtcm_unit_type unit_type = TRTCM_BYTE_UNIT; 2554 struct airoha_gdm_port *port = netdev_priv(dev); 2555 u32 rate = 0, bucket_size = 0; 2556 2557 switch (f->command) { 2558 case TC_CLSMATCHALL_REPLACE: { 2559 const struct flow_action_entry *act; 2560 int err; 2561 2562 err = airoha_tc_matchall_act_validate(f); 2563 if (err) 2564 return err; 2565 2566 act = &f->rule->action.entries[0]; 2567 if (act->police.rate_pkt_ps) { 2568 rate = act->police.rate_pkt_ps; 2569 bucket_size = act->police.burst_pkt; 2570 unit_type = TRTCM_PACKET_UNIT; 2571 } else { 2572 rate = div_u64(act->police.rate_bytes_ps, 1000); 2573 rate = rate << 3; /* Kbps */ 2574 bucket_size = act->police.burst; 2575 } 2576 fallthrough; 2577 } 2578 case TC_CLSMATCHALL_DESTROY: 2579 return airoha_qdma_set_rx_meter(port, rate, bucket_size, 2580 unit_type); 2581 default: 2582 return -EOPNOTSUPP; 2583 } 2584 } 2585 2586 static int airoha_dev_setup_tc_block_cb(enum tc_setup_type type, 2587 void *type_data, void *cb_priv) 2588 { 2589 struct net_device *dev = cb_priv; 2590 2591 if (!tc_can_offload(dev)) 2592 return -EOPNOTSUPP; 2593 2594 switch (type) { 2595 case TC_SETUP_CLSFLOWER: 2596 return airoha_ppe_setup_tc_block_cb(dev, type_data); 2597 case TC_SETUP_CLSMATCHALL: 2598 return airoha_dev_tc_matchall(dev, type_data); 2599 default: 2600 return -EOPNOTSUPP; 2601 } 2602 } 2603 2604 static int airoha_dev_setup_tc_block(struct airoha_gdm_port *port, 2605 struct flow_block_offload *f) 2606 { 2607 flow_setup_cb_t *cb = airoha_dev_setup_tc_block_cb; 2608 static LIST_HEAD(block_cb_list); 2609 struct flow_block_cb *block_cb; 2610 2611 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 2612 return -EOPNOTSUPP; 2613 2614 f->driver_block_list = &block_cb_list; 2615 switch (f->command) { 2616 case FLOW_BLOCK_BIND: 2617 block_cb = flow_block_cb_lookup(f->block, cb, port->dev); 2618 if (block_cb) { 2619 flow_block_cb_incref(block_cb); 2620 return 0; 2621 } 2622 block_cb = flow_block_cb_alloc(cb, port->dev, port->dev, NULL); 2623 if (IS_ERR(block_cb)) 2624 return PTR_ERR(block_cb); 2625 2626 flow_block_cb_incref(block_cb); 2627 flow_block_cb_add(block_cb, f); 2628 list_add_tail(&block_cb->driver_list, &block_cb_list); 2629 return 0; 2630 case FLOW_BLOCK_UNBIND: 2631 block_cb = flow_block_cb_lookup(f->block, cb, port->dev); 2632 if (!block_cb) 2633 return -ENOENT; 2634 2635 if (!flow_block_cb_decref(block_cb)) { 2636 flow_block_cb_remove(block_cb, f); 2637 list_del(&block_cb->driver_list); 2638 } 2639 return 0; 2640 default: 2641 return -EOPNOTSUPP; 2642 } 2643 } 2644 2645 static void airoha_tc_remove_htb_queue(struct airoha_gdm_port *port, int queue) 2646 { 2647 struct net_device *dev = port->dev; 2648 2649 netif_set_real_num_tx_queues(dev, dev->real_num_tx_queues - 1); 2650 airoha_qdma_set_tx_rate_limit(port, queue + 1, 0, 0); 2651 clear_bit(queue, port->qos_sq_bmap); 2652 } 2653 2654 static int airoha_tc_htb_delete_leaf_queue(struct airoha_gdm_port *port, 2655 struct tc_htb_qopt_offload *opt) 2656 { 2657 u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS; 2658 2659 if (!test_bit(channel, port->qos_sq_bmap)) { 2660 NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id"); 2661 return -EINVAL; 2662 } 2663 2664 airoha_tc_remove_htb_queue(port, channel); 2665 2666 return 0; 2667 } 2668 2669 static int airoha_tc_htb_destroy(struct airoha_gdm_port *port) 2670 { 2671 int q; 2672 2673 for_each_set_bit(q, port->qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS) 2674 airoha_tc_remove_htb_queue(port, q); 2675 2676 return 0; 2677 } 2678 2679 static int airoha_tc_get_htb_get_leaf_queue(struct airoha_gdm_port *port, 2680 struct tc_htb_qopt_offload *opt) 2681 { 2682 u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS; 2683 2684 if (!test_bit(channel, port->qos_sq_bmap)) { 2685 NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id"); 2686 return -EINVAL; 2687 } 2688 2689 opt->qid = AIROHA_NUM_TX_RING + channel; 2690 2691 return 0; 2692 } 2693 2694 static int airoha_tc_setup_qdisc_htb(struct airoha_gdm_port *port, 2695 struct tc_htb_qopt_offload *opt) 2696 { 2697 switch (opt->command) { 2698 case TC_HTB_CREATE: 2699 break; 2700 case TC_HTB_DESTROY: 2701 return airoha_tc_htb_destroy(port); 2702 case TC_HTB_NODE_MODIFY: 2703 case TC_HTB_LEAF_ALLOC_QUEUE: 2704 return airoha_tc_htb_alloc_leaf_queue(port, opt); 2705 case TC_HTB_LEAF_DEL: 2706 case TC_HTB_LEAF_DEL_LAST: 2707 case TC_HTB_LEAF_DEL_LAST_FORCE: 2708 return airoha_tc_htb_delete_leaf_queue(port, opt); 2709 case TC_HTB_LEAF_QUERY_QUEUE: 2710 return airoha_tc_get_htb_get_leaf_queue(port, opt); 2711 default: 2712 return -EOPNOTSUPP; 2713 } 2714 2715 return 0; 2716 } 2717 2718 static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type, 2719 void *type_data) 2720 { 2721 struct airoha_gdm_port *port = netdev_priv(dev); 2722 2723 switch (type) { 2724 case TC_SETUP_QDISC_ETS: 2725 return airoha_tc_setup_qdisc_ets(port, type_data); 2726 case TC_SETUP_QDISC_HTB: 2727 return airoha_tc_setup_qdisc_htb(port, type_data); 2728 case TC_SETUP_BLOCK: 2729 case TC_SETUP_FT: 2730 return airoha_dev_setup_tc_block(port, type_data); 2731 default: 2732 return -EOPNOTSUPP; 2733 } 2734 } 2735 2736 static const struct net_device_ops airoha_netdev_ops = { 2737 .ndo_init = airoha_dev_init, 2738 .ndo_open = airoha_dev_open, 2739 .ndo_stop = airoha_dev_stop, 2740 .ndo_change_mtu = airoha_dev_change_mtu, 2741 .ndo_select_queue = airoha_dev_select_queue, 2742 .ndo_start_xmit = airoha_dev_xmit, 2743 .ndo_get_stats64 = airoha_dev_get_stats64, 2744 .ndo_set_mac_address = airoha_dev_set_macaddr, 2745 .ndo_setup_tc = airoha_dev_tc_setup, 2746 }; 2747 2748 static const struct ethtool_ops airoha_ethtool_ops = { 2749 .get_drvinfo = airoha_ethtool_get_drvinfo, 2750 .get_eth_mac_stats = airoha_ethtool_get_mac_stats, 2751 .get_rmon_stats = airoha_ethtool_get_rmon_stats, 2752 }; 2753 2754 static int airoha_metadata_dst_alloc(struct airoha_gdm_port *port) 2755 { 2756 int i; 2757 2758 for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) { 2759 struct metadata_dst *md_dst; 2760 2761 md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, 2762 GFP_KERNEL); 2763 if (!md_dst) 2764 return -ENOMEM; 2765 2766 md_dst->u.port_info.port_id = i; 2767 port->dsa_meta[i] = md_dst; 2768 } 2769 2770 return 0; 2771 } 2772 2773 static void airoha_metadata_dst_free(struct airoha_gdm_port *port) 2774 { 2775 int i; 2776 2777 for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) { 2778 if (!port->dsa_meta[i]) 2779 continue; 2780 2781 metadata_dst_free(port->dsa_meta[i]); 2782 } 2783 } 2784 2785 bool airoha_is_valid_gdm_port(struct airoha_eth *eth, 2786 struct airoha_gdm_port *port) 2787 { 2788 int i; 2789 2790 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { 2791 if (eth->ports[i] == port) 2792 return true; 2793 } 2794 2795 return false; 2796 } 2797 2798 static int airoha_alloc_gdm_port(struct airoha_eth *eth, 2799 struct device_node *np, int index) 2800 { 2801 const __be32 *id_ptr = of_get_property(np, "reg", NULL); 2802 struct airoha_gdm_port *port; 2803 struct airoha_qdma *qdma; 2804 struct net_device *dev; 2805 int err, p; 2806 u32 id; 2807 2808 if (!id_ptr) { 2809 dev_err(eth->dev, "missing gdm port id\n"); 2810 return -EINVAL; 2811 } 2812 2813 id = be32_to_cpup(id_ptr); 2814 p = id - 1; 2815 2816 if (!id || id > ARRAY_SIZE(eth->ports)) { 2817 dev_err(eth->dev, "invalid gdm port id: %d\n", id); 2818 return -EINVAL; 2819 } 2820 2821 if (eth->ports[p]) { 2822 dev_err(eth->dev, "duplicate gdm port id: %d\n", id); 2823 return -EINVAL; 2824 } 2825 2826 dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port), 2827 AIROHA_NUM_NETDEV_TX_RINGS, 2828 AIROHA_NUM_RX_RING); 2829 if (!dev) { 2830 dev_err(eth->dev, "alloc_etherdev failed\n"); 2831 return -ENOMEM; 2832 } 2833 2834 qdma = ð->qdma[index % AIROHA_MAX_NUM_QDMA]; 2835 dev->netdev_ops = &airoha_netdev_ops; 2836 dev->ethtool_ops = &airoha_ethtool_ops; 2837 dev->max_mtu = AIROHA_MAX_MTU; 2838 dev->watchdog_timeo = 5 * HZ; 2839 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 2840 NETIF_F_TSO6 | NETIF_F_IPV6_CSUM | 2841 NETIF_F_SG | NETIF_F_TSO | 2842 NETIF_F_HW_TC; 2843 dev->features |= dev->hw_features; 2844 dev->vlan_features = dev->hw_features; 2845 dev->dev.of_node = np; 2846 dev->irq = qdma->irq_banks[0].irq; 2847 SET_NETDEV_DEV(dev, eth->dev); 2848 2849 /* reserve hw queues for HTB offloading */ 2850 err = netif_set_real_num_tx_queues(dev, AIROHA_NUM_TX_RING); 2851 if (err) 2852 return err; 2853 2854 err = of_get_ethdev_address(np, dev); 2855 if (err) { 2856 if (err == -EPROBE_DEFER) 2857 return err; 2858 2859 eth_hw_addr_random(dev); 2860 dev_info(eth->dev, "generated random MAC address %pM\n", 2861 dev->dev_addr); 2862 } 2863 2864 port = netdev_priv(dev); 2865 u64_stats_init(&port->stats.syncp); 2866 spin_lock_init(&port->stats.lock); 2867 port->qdma = qdma; 2868 port->dev = dev; 2869 port->id = id; 2870 eth->ports[p] = port; 2871 2872 err = airoha_metadata_dst_alloc(port); 2873 if (err) 2874 return err; 2875 2876 return register_netdev(dev); 2877 } 2878 2879 static int airoha_probe(struct platform_device *pdev) 2880 { 2881 struct device_node *np; 2882 struct airoha_eth *eth; 2883 int i, err; 2884 2885 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); 2886 if (!eth) 2887 return -ENOMEM; 2888 2889 eth->dev = &pdev->dev; 2890 2891 err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32)); 2892 if (err) { 2893 dev_err(eth->dev, "failed configuring DMA mask\n"); 2894 return err; 2895 } 2896 2897 eth->fe_regs = devm_platform_ioremap_resource_byname(pdev, "fe"); 2898 if (IS_ERR(eth->fe_regs)) 2899 return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs), 2900 "failed to iomap fe regs\n"); 2901 2902 eth->rsts[0].id = "fe"; 2903 eth->rsts[1].id = "pdma"; 2904 eth->rsts[2].id = "qdma"; 2905 err = devm_reset_control_bulk_get_exclusive(eth->dev, 2906 ARRAY_SIZE(eth->rsts), 2907 eth->rsts); 2908 if (err) { 2909 dev_err(eth->dev, "failed to get bulk reset lines\n"); 2910 return err; 2911 } 2912 2913 eth->xsi_rsts[0].id = "xsi-mac"; 2914 eth->xsi_rsts[1].id = "hsi0-mac"; 2915 eth->xsi_rsts[2].id = "hsi1-mac"; 2916 eth->xsi_rsts[3].id = "hsi-mac"; 2917 eth->xsi_rsts[4].id = "xfp-mac"; 2918 err = devm_reset_control_bulk_get_exclusive(eth->dev, 2919 ARRAY_SIZE(eth->xsi_rsts), 2920 eth->xsi_rsts); 2921 if (err) { 2922 dev_err(eth->dev, "failed to get bulk xsi reset lines\n"); 2923 return err; 2924 } 2925 2926 eth->napi_dev = alloc_netdev_dummy(0); 2927 if (!eth->napi_dev) 2928 return -ENOMEM; 2929 2930 /* Enable threaded NAPI by default */ 2931 eth->napi_dev->threaded = true; 2932 strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name)); 2933 platform_set_drvdata(pdev, eth); 2934 2935 err = airoha_hw_init(pdev, eth); 2936 if (err) 2937 goto error_hw_cleanup; 2938 2939 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) 2940 airoha_qdma_start_napi(ð->qdma[i]); 2941 2942 i = 0; 2943 for_each_child_of_node(pdev->dev.of_node, np) { 2944 if (!of_device_is_compatible(np, "airoha,eth-mac")) 2945 continue; 2946 2947 if (!of_device_is_available(np)) 2948 continue; 2949 2950 err = airoha_alloc_gdm_port(eth, np, i++); 2951 if (err) { 2952 of_node_put(np); 2953 goto error_napi_stop; 2954 } 2955 } 2956 2957 return 0; 2958 2959 error_napi_stop: 2960 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) 2961 airoha_qdma_stop_napi(ð->qdma[i]); 2962 error_hw_cleanup: 2963 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) 2964 airoha_hw_cleanup(ð->qdma[i]); 2965 2966 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { 2967 struct airoha_gdm_port *port = eth->ports[i]; 2968 2969 if (port && port->dev->reg_state == NETREG_REGISTERED) { 2970 unregister_netdev(port->dev); 2971 airoha_metadata_dst_free(port); 2972 } 2973 } 2974 free_netdev(eth->napi_dev); 2975 platform_set_drvdata(pdev, NULL); 2976 2977 return err; 2978 } 2979 2980 static void airoha_remove(struct platform_device *pdev) 2981 { 2982 struct airoha_eth *eth = platform_get_drvdata(pdev); 2983 int i; 2984 2985 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) { 2986 airoha_qdma_stop_napi(ð->qdma[i]); 2987 airoha_hw_cleanup(ð->qdma[i]); 2988 } 2989 2990 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { 2991 struct airoha_gdm_port *port = eth->ports[i]; 2992 2993 if (!port) 2994 continue; 2995 2996 airoha_dev_stop(port->dev); 2997 unregister_netdev(port->dev); 2998 airoha_metadata_dst_free(port); 2999 } 3000 free_netdev(eth->napi_dev); 3001 3002 airoha_ppe_deinit(eth); 3003 platform_set_drvdata(pdev, NULL); 3004 } 3005 3006 static const struct of_device_id of_airoha_match[] = { 3007 { .compatible = "airoha,en7581-eth" }, 3008 { /* sentinel */ } 3009 }; 3010 MODULE_DEVICE_TABLE(of, of_airoha_match); 3011 3012 static struct platform_driver airoha_driver = { 3013 .probe = airoha_probe, 3014 .remove = airoha_remove, 3015 .driver = { 3016 .name = KBUILD_MODNAME, 3017 .of_match_table = of_airoha_match, 3018 }, 3019 }; 3020 module_platform_driver(airoha_driver); 3021 3022 MODULE_LICENSE("GPL"); 3023 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); 3024 MODULE_DESCRIPTION("Ethernet driver for Airoha SoC"); 3025