1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Mediatek MT7530 DSA Switch driver 4 * Copyright (C) 2017 Sean Wang <sean.wang@mediatek.com> 5 */ 6 #include <linux/etherdevice.h> 7 #include <linux/if_bridge.h> 8 #include <linux/iopoll.h> 9 #include <linux/mdio.h> 10 #include <linux/mfd/syscon.h> 11 #include <linux/module.h> 12 #include <linux/netdevice.h> 13 #include <linux/of_irq.h> 14 #include <linux/of_mdio.h> 15 #include <linux/of_net.h> 16 #include <linux/of_platform.h> 17 #include <linux/phylink.h> 18 #include <linux/regmap.h> 19 #include <linux/regulator/consumer.h> 20 #include <linux/reset.h> 21 #include <linux/gpio/consumer.h> 22 #include <linux/gpio/driver.h> 23 #include <net/dsa.h> 24 25 #include "mt7530.h" 26 27 static struct mt753x_pcs *pcs_to_mt753x_pcs(struct phylink_pcs *pcs) 28 { 29 return container_of(pcs, struct mt753x_pcs, pcs); 30 } 31 32 /* String, offset, and register size in bytes if different from 4 bytes */ 33 static const struct mt7530_mib_desc mt7530_mib[] = { 34 MIB_DESC(1, 0x00, "TxDrop"), 35 MIB_DESC(1, 0x04, "TxCrcErr"), 36 MIB_DESC(1, 0x08, "TxUnicast"), 37 MIB_DESC(1, 0x0c, "TxMulticast"), 38 MIB_DESC(1, 0x10, "TxBroadcast"), 39 MIB_DESC(1, 0x14, "TxCollision"), 40 MIB_DESC(1, 0x18, "TxSingleCollision"), 41 MIB_DESC(1, 0x1c, "TxMultipleCollision"), 42 MIB_DESC(1, 0x20, "TxDeferred"), 43 MIB_DESC(1, 0x24, "TxLateCollision"), 44 MIB_DESC(1, 0x28, "TxExcessiveCollistion"), 45 MIB_DESC(1, 0x2c, "TxPause"), 46 MIB_DESC(1, 0x30, "TxPktSz64"), 47 MIB_DESC(1, 0x34, "TxPktSz65To127"), 48 MIB_DESC(1, 0x38, "TxPktSz128To255"), 49 MIB_DESC(1, 0x3c, "TxPktSz256To511"), 50 MIB_DESC(1, 0x40, "TxPktSz512To1023"), 51 MIB_DESC(1, 0x44, "Tx1024ToMax"), 52 MIB_DESC(2, 0x48, "TxBytes"), 53 MIB_DESC(1, 0x60, "RxDrop"), 54 MIB_DESC(1, 0x64, "RxFiltering"), 55 MIB_DESC(1, 0x68, "RxUnicast"), 56 MIB_DESC(1, 0x6c, "RxMulticast"), 57 MIB_DESC(1, 0x70, "RxBroadcast"), 58 MIB_DESC(1, 0x74, "RxAlignErr"), 59 MIB_DESC(1, 0x78, "RxCrcErr"), 60 MIB_DESC(1, 0x7c, "RxUnderSizeErr"), 61 MIB_DESC(1, 0x80, "RxFragErr"), 62 MIB_DESC(1, 0x84, "RxOverSzErr"), 63 MIB_DESC(1, 0x88, "RxJabberErr"), 64 MIB_DESC(1, 0x8c, "RxPause"), 65 MIB_DESC(1, 0x90, "RxPktSz64"), 66 MIB_DESC(1, 0x94, "RxPktSz65To127"), 67 MIB_DESC(1, 0x98, "RxPktSz128To255"), 68 MIB_DESC(1, 0x9c, "RxPktSz256To511"), 69 MIB_DESC(1, 0xa0, "RxPktSz512To1023"), 70 MIB_DESC(1, 0xa4, "RxPktSz1024ToMax"), 71 MIB_DESC(2, 0xa8, "RxBytes"), 72 MIB_DESC(1, 0xb0, "RxCtrlDrop"), 73 MIB_DESC(1, 0xb4, "RxIngressDrop"), 74 MIB_DESC(1, 0xb8, "RxArlDrop"), 75 }; 76 77 /* Since phy_device has not yet been created and 78 * phy_{read,write}_mmd_indirect is not available, we provide our own 79 * core_{read,write}_mmd_indirect with core_{clear,write,set} wrappers 80 * to complete this function. 81 */ 82 static int 83 core_read_mmd_indirect(struct mt7530_priv *priv, int prtad, int devad) 84 { 85 struct mii_bus *bus = priv->bus; 86 int value, ret; 87 88 /* Write the desired MMD Devad */ 89 ret = bus->write(bus, 0, MII_MMD_CTRL, devad); 90 if (ret < 0) 91 goto err; 92 93 /* Write the desired MMD register address */ 94 ret = bus->write(bus, 0, MII_MMD_DATA, prtad); 95 if (ret < 0) 96 goto err; 97 98 /* Select the Function : DATA with no post increment */ 99 ret = bus->write(bus, 0, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR)); 100 if (ret < 0) 101 goto err; 102 103 /* Read the content of the MMD's selected register */ 104 value = bus->read(bus, 0, MII_MMD_DATA); 105 106 return value; 107 err: 108 dev_err(&bus->dev, "failed to read mmd register\n"); 109 110 return ret; 111 } 112 113 static int 114 core_write_mmd_indirect(struct mt7530_priv *priv, int prtad, 115 int devad, u32 data) 116 { 117 struct mii_bus *bus = priv->bus; 118 int ret; 119 120 /* Write the desired MMD Devad */ 121 ret = bus->write(bus, 0, MII_MMD_CTRL, devad); 122 if (ret < 0) 123 goto err; 124 125 /* Write the desired MMD register address */ 126 ret = bus->write(bus, 0, MII_MMD_DATA, prtad); 127 if (ret < 0) 128 goto err; 129 130 /* Select the Function : DATA with no post increment */ 131 ret = bus->write(bus, 0, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR)); 132 if (ret < 0) 133 goto err; 134 135 /* Write the data into MMD's selected register */ 136 ret = bus->write(bus, 0, MII_MMD_DATA, data); 137 err: 138 if (ret < 0) 139 dev_err(&bus->dev, 140 "failed to write mmd register\n"); 141 return ret; 142 } 143 144 static void 145 mt7530_mutex_lock(struct mt7530_priv *priv) 146 { 147 if (priv->bus) 148 mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED); 149 } 150 151 static void 152 mt7530_mutex_unlock(struct mt7530_priv *priv) 153 { 154 if (priv->bus) 155 mutex_unlock(&priv->bus->mdio_lock); 156 } 157 158 static void 159 core_write(struct mt7530_priv *priv, u32 reg, u32 val) 160 { 161 mt7530_mutex_lock(priv); 162 163 core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val); 164 165 mt7530_mutex_unlock(priv); 166 } 167 168 static void 169 core_rmw(struct mt7530_priv *priv, u32 reg, u32 mask, u32 set) 170 { 171 u32 val; 172 173 mt7530_mutex_lock(priv); 174 175 val = core_read_mmd_indirect(priv, reg, MDIO_MMD_VEND2); 176 val &= ~mask; 177 val |= set; 178 core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val); 179 180 mt7530_mutex_unlock(priv); 181 } 182 183 static void 184 core_set(struct mt7530_priv *priv, u32 reg, u32 val) 185 { 186 core_rmw(priv, reg, 0, val); 187 } 188 189 static void 190 core_clear(struct mt7530_priv *priv, u32 reg, u32 val) 191 { 192 core_rmw(priv, reg, val, 0); 193 } 194 195 static int 196 mt7530_mii_write(struct mt7530_priv *priv, u32 reg, u32 val) 197 { 198 int ret; 199 200 ret = regmap_write(priv->regmap, reg, val); 201 202 if (ret < 0) 203 dev_err(priv->dev, 204 "failed to write mt7530 register\n"); 205 206 return ret; 207 } 208 209 static u32 210 mt7530_mii_read(struct mt7530_priv *priv, u32 reg) 211 { 212 int ret; 213 u32 val; 214 215 ret = regmap_read(priv->regmap, reg, &val); 216 if (ret) { 217 WARN_ON_ONCE(1); 218 dev_err(priv->dev, 219 "failed to read mt7530 register\n"); 220 return 0; 221 } 222 223 return val; 224 } 225 226 static void 227 mt7530_write(struct mt7530_priv *priv, u32 reg, u32 val) 228 { 229 mt7530_mutex_lock(priv); 230 231 mt7530_mii_write(priv, reg, val); 232 233 mt7530_mutex_unlock(priv); 234 } 235 236 static u32 237 _mt7530_unlocked_read(struct mt7530_dummy_poll *p) 238 { 239 return mt7530_mii_read(p->priv, p->reg); 240 } 241 242 static u32 243 _mt7530_read(struct mt7530_dummy_poll *p) 244 { 245 u32 val; 246 247 mt7530_mutex_lock(p->priv); 248 249 val = mt7530_mii_read(p->priv, p->reg); 250 251 mt7530_mutex_unlock(p->priv); 252 253 return val; 254 } 255 256 static u32 257 mt7530_read(struct mt7530_priv *priv, u32 reg) 258 { 259 struct mt7530_dummy_poll p; 260 261 INIT_MT7530_DUMMY_POLL(&p, priv, reg); 262 return _mt7530_read(&p); 263 } 264 265 static void 266 mt7530_rmw(struct mt7530_priv *priv, u32 reg, 267 u32 mask, u32 set) 268 { 269 mt7530_mutex_lock(priv); 270 271 regmap_update_bits(priv->regmap, reg, mask, set); 272 273 mt7530_mutex_unlock(priv); 274 } 275 276 static void 277 mt7530_set(struct mt7530_priv *priv, u32 reg, u32 val) 278 { 279 mt7530_rmw(priv, reg, val, val); 280 } 281 282 static void 283 mt7530_clear(struct mt7530_priv *priv, u32 reg, u32 val) 284 { 285 mt7530_rmw(priv, reg, val, 0); 286 } 287 288 static int 289 mt7530_fdb_cmd(struct mt7530_priv *priv, enum mt7530_fdb_cmd cmd, u32 *rsp) 290 { 291 u32 val; 292 int ret; 293 struct mt7530_dummy_poll p; 294 295 /* Set the command operating upon the MAC address entries */ 296 val = ATC_BUSY | ATC_MAT(0) | cmd; 297 mt7530_write(priv, MT7530_ATC, val); 298 299 INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_ATC); 300 ret = readx_poll_timeout(_mt7530_read, &p, val, 301 !(val & ATC_BUSY), 20, 20000); 302 if (ret < 0) { 303 dev_err(priv->dev, "reset timeout\n"); 304 return ret; 305 } 306 307 /* Additional sanity for read command if the specified 308 * entry is invalid 309 */ 310 val = mt7530_read(priv, MT7530_ATC); 311 if ((cmd == MT7530_FDB_READ) && (val & ATC_INVALID)) 312 return -EINVAL; 313 314 if (rsp) 315 *rsp = val; 316 317 return 0; 318 } 319 320 static void 321 mt7530_fdb_read(struct mt7530_priv *priv, struct mt7530_fdb *fdb) 322 { 323 u32 reg[3]; 324 int i; 325 326 /* Read from ARL table into an array */ 327 for (i = 0; i < 3; i++) { 328 reg[i] = mt7530_read(priv, MT7530_TSRA1 + (i * 4)); 329 330 dev_dbg(priv->dev, "%s(%d) reg[%d]=0x%x\n", 331 __func__, __LINE__, i, reg[i]); 332 } 333 334 fdb->vid = (reg[1] >> CVID) & CVID_MASK; 335 fdb->aging = (reg[2] >> AGE_TIMER) & AGE_TIMER_MASK; 336 fdb->port_mask = (reg[2] >> PORT_MAP) & PORT_MAP_MASK; 337 fdb->mac[0] = (reg[0] >> MAC_BYTE_0) & MAC_BYTE_MASK; 338 fdb->mac[1] = (reg[0] >> MAC_BYTE_1) & MAC_BYTE_MASK; 339 fdb->mac[2] = (reg[0] >> MAC_BYTE_2) & MAC_BYTE_MASK; 340 fdb->mac[3] = (reg[0] >> MAC_BYTE_3) & MAC_BYTE_MASK; 341 fdb->mac[4] = (reg[1] >> MAC_BYTE_4) & MAC_BYTE_MASK; 342 fdb->mac[5] = (reg[1] >> MAC_BYTE_5) & MAC_BYTE_MASK; 343 fdb->noarp = ((reg[2] >> ENT_STATUS) & ENT_STATUS_MASK) == STATIC_ENT; 344 } 345 346 static void 347 mt7530_fdb_write(struct mt7530_priv *priv, u16 vid, 348 u8 port_mask, const u8 *mac, 349 u8 aging, u8 type) 350 { 351 u32 reg[3] = { 0 }; 352 int i; 353 354 reg[1] |= vid & CVID_MASK; 355 reg[1] |= ATA2_IVL; 356 reg[1] |= ATA2_FID(FID_BRIDGED); 357 reg[2] |= (aging & AGE_TIMER_MASK) << AGE_TIMER; 358 reg[2] |= (port_mask & PORT_MAP_MASK) << PORT_MAP; 359 /* STATIC_ENT indicate that entry is static wouldn't 360 * be aged out and STATIC_EMP specified as erasing an 361 * entry 362 */ 363 reg[2] |= (type & ENT_STATUS_MASK) << ENT_STATUS; 364 reg[1] |= mac[5] << MAC_BYTE_5; 365 reg[1] |= mac[4] << MAC_BYTE_4; 366 reg[0] |= mac[3] << MAC_BYTE_3; 367 reg[0] |= mac[2] << MAC_BYTE_2; 368 reg[0] |= mac[1] << MAC_BYTE_1; 369 reg[0] |= mac[0] << MAC_BYTE_0; 370 371 /* Write array into the ARL table */ 372 for (i = 0; i < 3; i++) 373 mt7530_write(priv, MT7530_ATA1 + (i * 4), reg[i]); 374 } 375 376 /* Set up switch core clock for MT7530 */ 377 static void mt7530_pll_setup(struct mt7530_priv *priv) 378 { 379 /* Disable core clock */ 380 core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); 381 382 /* Disable PLL */ 383 core_write(priv, CORE_GSWPLL_GRP1, 0); 384 385 /* Set core clock into 500Mhz */ 386 core_write(priv, CORE_GSWPLL_GRP2, 387 RG_GSWPLL_POSDIV_500M(1) | 388 RG_GSWPLL_FBKDIV_500M(25)); 389 390 /* Enable PLL */ 391 core_write(priv, CORE_GSWPLL_GRP1, 392 RG_GSWPLL_EN_PRE | 393 RG_GSWPLL_POSDIV_200M(2) | 394 RG_GSWPLL_FBKDIV_200M(32)); 395 396 udelay(20); 397 398 /* Enable core clock */ 399 core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); 400 } 401 402 /* If port 6 is available as a CPU port, always prefer that as the default, 403 * otherwise don't care. 404 */ 405 static struct dsa_port * 406 mt753x_preferred_default_local_cpu_port(struct dsa_switch *ds) 407 { 408 struct dsa_port *cpu_dp = dsa_to_port(ds, 6); 409 410 if (dsa_port_is_cpu(cpu_dp)) 411 return cpu_dp; 412 413 return NULL; 414 } 415 416 /* Setup port 6 interface mode and TRGMII TX circuit */ 417 static void 418 mt7530_setup_port6(struct dsa_switch *ds, phy_interface_t interface) 419 { 420 struct mt7530_priv *priv = ds->priv; 421 u32 ncpo1, ssc_delta, xtal; 422 423 /* Disable the MT7530 TRGMII clocks */ 424 core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN); 425 426 if (interface == PHY_INTERFACE_MODE_RGMII) { 427 mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK, 428 P6_INTF_MODE(0)); 429 return; 430 } 431 432 mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK, P6_INTF_MODE(1)); 433 434 xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK; 435 436 if (xtal == HWTRAP_XTAL_25MHZ) 437 ssc_delta = 0x57; 438 else 439 ssc_delta = 0x87; 440 441 if (priv->id == ID_MT7621) { 442 /* PLL frequency: 125MHz: 1.0GBit */ 443 if (xtal == HWTRAP_XTAL_40MHZ) 444 ncpo1 = 0x0640; 445 if (xtal == HWTRAP_XTAL_25MHZ) 446 ncpo1 = 0x0a00; 447 } else { /* PLL frequency: 250MHz: 2.0Gbit */ 448 if (xtal == HWTRAP_XTAL_40MHZ) 449 ncpo1 = 0x0c80; 450 if (xtal == HWTRAP_XTAL_25MHZ) 451 ncpo1 = 0x1400; 452 } 453 454 /* Setup the MT7530 TRGMII Tx Clock */ 455 core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1)); 456 core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0)); 457 core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta)); 458 core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta)); 459 core_write(priv, CORE_PLL_GROUP4, RG_SYSPLL_DDSFBK_EN | 460 RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN); 461 core_write(priv, CORE_PLL_GROUP2, RG_SYSPLL_EN_NORMAL | 462 RG_SYSPLL_VODEN | RG_SYSPLL_POSDIV(1)); 463 core_write(priv, CORE_PLL_GROUP7, RG_LCDDS_PCW_NCPO_CHG | 464 RG_LCCDS_C(3) | RG_LCDDS_PWDB | RG_LCDDS_ISO_EN); 465 466 /* Enable the MT7530 TRGMII clocks */ 467 core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN); 468 } 469 470 static void 471 mt7531_pll_setup(struct mt7530_priv *priv) 472 { 473 u32 top_sig; 474 u32 hwstrap; 475 u32 xtal; 476 u32 val; 477 478 val = mt7530_read(priv, MT7531_CREV); 479 top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR); 480 hwstrap = mt7530_read(priv, MT7531_HWTRAP); 481 if ((val & CHIP_REV_M) > 0) 482 xtal = (top_sig & PAD_MCM_SMI_EN) ? HWTRAP_XTAL_FSEL_40MHZ : 483 HWTRAP_XTAL_FSEL_25MHZ; 484 else 485 xtal = hwstrap & HWTRAP_XTAL_FSEL_MASK; 486 487 /* Step 1 : Disable MT7531 COREPLL */ 488 val = mt7530_read(priv, MT7531_PLLGP_EN); 489 val &= ~EN_COREPLL; 490 mt7530_write(priv, MT7531_PLLGP_EN, val); 491 492 /* Step 2: switch to XTAL output */ 493 val = mt7530_read(priv, MT7531_PLLGP_EN); 494 val |= SW_CLKSW; 495 mt7530_write(priv, MT7531_PLLGP_EN, val); 496 497 val = mt7530_read(priv, MT7531_PLLGP_CR0); 498 val &= ~RG_COREPLL_EN; 499 mt7530_write(priv, MT7531_PLLGP_CR0, val); 500 501 /* Step 3: disable PLLGP and enable program PLLGP */ 502 val = mt7530_read(priv, MT7531_PLLGP_EN); 503 val |= SW_PLLGP; 504 mt7530_write(priv, MT7531_PLLGP_EN, val); 505 506 /* Step 4: program COREPLL output frequency to 500MHz */ 507 val = mt7530_read(priv, MT7531_PLLGP_CR0); 508 val &= ~RG_COREPLL_POSDIV_M; 509 val |= 2 << RG_COREPLL_POSDIV_S; 510 mt7530_write(priv, MT7531_PLLGP_CR0, val); 511 usleep_range(25, 35); 512 513 switch (xtal) { 514 case HWTRAP_XTAL_FSEL_25MHZ: 515 val = mt7530_read(priv, MT7531_PLLGP_CR0); 516 val &= ~RG_COREPLL_SDM_PCW_M; 517 val |= 0x140000 << RG_COREPLL_SDM_PCW_S; 518 mt7530_write(priv, MT7531_PLLGP_CR0, val); 519 break; 520 case HWTRAP_XTAL_FSEL_40MHZ: 521 val = mt7530_read(priv, MT7531_PLLGP_CR0); 522 val &= ~RG_COREPLL_SDM_PCW_M; 523 val |= 0x190000 << RG_COREPLL_SDM_PCW_S; 524 mt7530_write(priv, MT7531_PLLGP_CR0, val); 525 break; 526 } 527 528 /* Set feedback divide ratio update signal to high */ 529 val = mt7530_read(priv, MT7531_PLLGP_CR0); 530 val |= RG_COREPLL_SDM_PCW_CHG; 531 mt7530_write(priv, MT7531_PLLGP_CR0, val); 532 /* Wait for at least 16 XTAL clocks */ 533 usleep_range(10, 20); 534 535 /* Step 5: set feedback divide ratio update signal to low */ 536 val = mt7530_read(priv, MT7531_PLLGP_CR0); 537 val &= ~RG_COREPLL_SDM_PCW_CHG; 538 mt7530_write(priv, MT7531_PLLGP_CR0, val); 539 540 /* Enable 325M clock for SGMII */ 541 mt7530_write(priv, MT7531_ANA_PLLGP_CR5, 0xad0000); 542 543 /* Enable 250SSC clock for RGMII */ 544 mt7530_write(priv, MT7531_ANA_PLLGP_CR2, 0x4f40000); 545 546 /* Step 6: Enable MT7531 PLL */ 547 val = mt7530_read(priv, MT7531_PLLGP_CR0); 548 val |= RG_COREPLL_EN; 549 mt7530_write(priv, MT7531_PLLGP_CR0, val); 550 551 val = mt7530_read(priv, MT7531_PLLGP_EN); 552 val |= EN_COREPLL; 553 mt7530_write(priv, MT7531_PLLGP_EN, val); 554 usleep_range(25, 35); 555 } 556 557 static void 558 mt7530_mib_reset(struct dsa_switch *ds) 559 { 560 struct mt7530_priv *priv = ds->priv; 561 562 mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_FLUSH); 563 mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_ACTIVATE); 564 } 565 566 static int mt7530_phy_read_c22(struct mt7530_priv *priv, int port, int regnum) 567 { 568 return mdiobus_read_nested(priv->bus, port, regnum); 569 } 570 571 static int mt7530_phy_write_c22(struct mt7530_priv *priv, int port, int regnum, 572 u16 val) 573 { 574 return mdiobus_write_nested(priv->bus, port, regnum, val); 575 } 576 577 static int mt7530_phy_read_c45(struct mt7530_priv *priv, int port, 578 int devad, int regnum) 579 { 580 return mdiobus_c45_read_nested(priv->bus, port, devad, regnum); 581 } 582 583 static int mt7530_phy_write_c45(struct mt7530_priv *priv, int port, int devad, 584 int regnum, u16 val) 585 { 586 return mdiobus_c45_write_nested(priv->bus, port, devad, regnum, val); 587 } 588 589 static int 590 mt7531_ind_c45_phy_read(struct mt7530_priv *priv, int port, int devad, 591 int regnum) 592 { 593 struct mt7530_dummy_poll p; 594 u32 reg, val; 595 int ret; 596 597 INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC); 598 599 mt7530_mutex_lock(priv); 600 601 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 602 !(val & MT7531_PHY_ACS_ST), 20, 100000); 603 if (ret < 0) { 604 dev_err(priv->dev, "poll timeout\n"); 605 goto out; 606 } 607 608 reg = MT7531_MDIO_CL45_ADDR | MT7531_MDIO_PHY_ADDR(port) | 609 MT7531_MDIO_DEV_ADDR(devad) | regnum; 610 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST); 611 612 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 613 !(val & MT7531_PHY_ACS_ST), 20, 100000); 614 if (ret < 0) { 615 dev_err(priv->dev, "poll timeout\n"); 616 goto out; 617 } 618 619 reg = MT7531_MDIO_CL45_READ | MT7531_MDIO_PHY_ADDR(port) | 620 MT7531_MDIO_DEV_ADDR(devad); 621 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST); 622 623 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 624 !(val & MT7531_PHY_ACS_ST), 20, 100000); 625 if (ret < 0) { 626 dev_err(priv->dev, "poll timeout\n"); 627 goto out; 628 } 629 630 ret = val & MT7531_MDIO_RW_DATA_MASK; 631 out: 632 mt7530_mutex_unlock(priv); 633 634 return ret; 635 } 636 637 static int 638 mt7531_ind_c45_phy_write(struct mt7530_priv *priv, int port, int devad, 639 int regnum, u16 data) 640 { 641 struct mt7530_dummy_poll p; 642 u32 val, reg; 643 int ret; 644 645 INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC); 646 647 mt7530_mutex_lock(priv); 648 649 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 650 !(val & MT7531_PHY_ACS_ST), 20, 100000); 651 if (ret < 0) { 652 dev_err(priv->dev, "poll timeout\n"); 653 goto out; 654 } 655 656 reg = MT7531_MDIO_CL45_ADDR | MT7531_MDIO_PHY_ADDR(port) | 657 MT7531_MDIO_DEV_ADDR(devad) | regnum; 658 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST); 659 660 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 661 !(val & MT7531_PHY_ACS_ST), 20, 100000); 662 if (ret < 0) { 663 dev_err(priv->dev, "poll timeout\n"); 664 goto out; 665 } 666 667 reg = MT7531_MDIO_CL45_WRITE | MT7531_MDIO_PHY_ADDR(port) | 668 MT7531_MDIO_DEV_ADDR(devad) | data; 669 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST); 670 671 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 672 !(val & MT7531_PHY_ACS_ST), 20, 100000); 673 if (ret < 0) { 674 dev_err(priv->dev, "poll timeout\n"); 675 goto out; 676 } 677 678 out: 679 mt7530_mutex_unlock(priv); 680 681 return ret; 682 } 683 684 static int 685 mt7531_ind_c22_phy_read(struct mt7530_priv *priv, int port, int regnum) 686 { 687 struct mt7530_dummy_poll p; 688 int ret; 689 u32 val; 690 691 INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC); 692 693 mt7530_mutex_lock(priv); 694 695 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 696 !(val & MT7531_PHY_ACS_ST), 20, 100000); 697 if (ret < 0) { 698 dev_err(priv->dev, "poll timeout\n"); 699 goto out; 700 } 701 702 val = MT7531_MDIO_CL22_READ | MT7531_MDIO_PHY_ADDR(port) | 703 MT7531_MDIO_REG_ADDR(regnum); 704 705 mt7530_mii_write(priv, MT7531_PHY_IAC, val | MT7531_PHY_ACS_ST); 706 707 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 708 !(val & MT7531_PHY_ACS_ST), 20, 100000); 709 if (ret < 0) { 710 dev_err(priv->dev, "poll timeout\n"); 711 goto out; 712 } 713 714 ret = val & MT7531_MDIO_RW_DATA_MASK; 715 out: 716 mt7530_mutex_unlock(priv); 717 718 return ret; 719 } 720 721 static int 722 mt7531_ind_c22_phy_write(struct mt7530_priv *priv, int port, int regnum, 723 u16 data) 724 { 725 struct mt7530_dummy_poll p; 726 int ret; 727 u32 reg; 728 729 INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC); 730 731 mt7530_mutex_lock(priv); 732 733 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, reg, 734 !(reg & MT7531_PHY_ACS_ST), 20, 100000); 735 if (ret < 0) { 736 dev_err(priv->dev, "poll timeout\n"); 737 goto out; 738 } 739 740 reg = MT7531_MDIO_CL22_WRITE | MT7531_MDIO_PHY_ADDR(port) | 741 MT7531_MDIO_REG_ADDR(regnum) | data; 742 743 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST); 744 745 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, reg, 746 !(reg & MT7531_PHY_ACS_ST), 20, 100000); 747 if (ret < 0) { 748 dev_err(priv->dev, "poll timeout\n"); 749 goto out; 750 } 751 752 out: 753 mt7530_mutex_unlock(priv); 754 755 return ret; 756 } 757 758 static int 759 mt753x_phy_read_c22(struct mii_bus *bus, int port, int regnum) 760 { 761 struct mt7530_priv *priv = bus->priv; 762 763 return priv->info->phy_read_c22(priv, port, regnum); 764 } 765 766 static int 767 mt753x_phy_read_c45(struct mii_bus *bus, int port, int devad, int regnum) 768 { 769 struct mt7530_priv *priv = bus->priv; 770 771 return priv->info->phy_read_c45(priv, port, devad, regnum); 772 } 773 774 static int 775 mt753x_phy_write_c22(struct mii_bus *bus, int port, int regnum, u16 val) 776 { 777 struct mt7530_priv *priv = bus->priv; 778 779 return priv->info->phy_write_c22(priv, port, regnum, val); 780 } 781 782 static int 783 mt753x_phy_write_c45(struct mii_bus *bus, int port, int devad, int regnum, 784 u16 val) 785 { 786 struct mt7530_priv *priv = bus->priv; 787 788 return priv->info->phy_write_c45(priv, port, devad, regnum, val); 789 } 790 791 static void 792 mt7530_get_strings(struct dsa_switch *ds, int port, u32 stringset, 793 uint8_t *data) 794 { 795 int i; 796 797 if (stringset != ETH_SS_STATS) 798 return; 799 800 for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++) 801 ethtool_puts(&data, mt7530_mib[i].name); 802 } 803 804 static void 805 mt7530_get_ethtool_stats(struct dsa_switch *ds, int port, 806 uint64_t *data) 807 { 808 struct mt7530_priv *priv = ds->priv; 809 const struct mt7530_mib_desc *mib; 810 u32 reg, i; 811 u64 hi; 812 813 for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++) { 814 mib = &mt7530_mib[i]; 815 reg = MT7530_PORT_MIB_COUNTER(port) + mib->offset; 816 817 data[i] = mt7530_read(priv, reg); 818 if (mib->size == 2) { 819 hi = mt7530_read(priv, reg + 4); 820 data[i] |= hi << 32; 821 } 822 } 823 } 824 825 static int 826 mt7530_get_sset_count(struct dsa_switch *ds, int port, int sset) 827 { 828 if (sset != ETH_SS_STATS) 829 return 0; 830 831 return ARRAY_SIZE(mt7530_mib); 832 } 833 834 static int 835 mt7530_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) 836 { 837 struct mt7530_priv *priv = ds->priv; 838 unsigned int secs = msecs / 1000; 839 unsigned int tmp_age_count; 840 unsigned int error = -1; 841 unsigned int age_count; 842 unsigned int age_unit; 843 844 /* Applied timer is (AGE_CNT + 1) * (AGE_UNIT + 1) seconds */ 845 if (secs < 1 || secs > (AGE_CNT_MAX + 1) * (AGE_UNIT_MAX + 1)) 846 return -ERANGE; 847 848 /* iterate through all possible age_count to find the closest pair */ 849 for (tmp_age_count = 0; tmp_age_count <= AGE_CNT_MAX; ++tmp_age_count) { 850 unsigned int tmp_age_unit = secs / (tmp_age_count + 1) - 1; 851 852 if (tmp_age_unit <= AGE_UNIT_MAX) { 853 unsigned int tmp_error = secs - 854 (tmp_age_count + 1) * (tmp_age_unit + 1); 855 856 /* found a closer pair */ 857 if (error > tmp_error) { 858 error = tmp_error; 859 age_count = tmp_age_count; 860 age_unit = tmp_age_unit; 861 } 862 863 /* found the exact match, so break the loop */ 864 if (!error) 865 break; 866 } 867 } 868 869 mt7530_write(priv, MT7530_AAC, AGE_CNT(age_count) | AGE_UNIT(age_unit)); 870 871 return 0; 872 } 873 874 static const char *p5_intf_modes(unsigned int p5_interface) 875 { 876 switch (p5_interface) { 877 case P5_DISABLED: 878 return "DISABLED"; 879 case P5_INTF_SEL_PHY_P0: 880 return "PHY P0"; 881 case P5_INTF_SEL_PHY_P4: 882 return "PHY P4"; 883 case P5_INTF_SEL_GMAC5: 884 return "GMAC5"; 885 default: 886 return "unknown"; 887 } 888 } 889 890 static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface) 891 { 892 struct mt7530_priv *priv = ds->priv; 893 u8 tx_delay = 0; 894 int val; 895 896 mutex_lock(&priv->reg_mutex); 897 898 val = mt7530_read(priv, MT7530_MHWTRAP); 899 900 val |= MHWTRAP_MANUAL | MHWTRAP_P5_MAC_SEL | MHWTRAP_P5_DIS; 901 val &= ~MHWTRAP_P5_RGMII_MODE & ~MHWTRAP_PHY0_SEL; 902 903 switch (priv->p5_intf_sel) { 904 case P5_INTF_SEL_PHY_P0: 905 /* MT7530_P5_MODE_GPHY_P0: 2nd GMAC -> P5 -> P0 */ 906 val |= MHWTRAP_PHY0_SEL; 907 fallthrough; 908 case P5_INTF_SEL_PHY_P4: 909 /* MT7530_P5_MODE_GPHY_P4: 2nd GMAC -> P5 -> P4 */ 910 val &= ~MHWTRAP_P5_MAC_SEL & ~MHWTRAP_P5_DIS; 911 912 /* Setup the MAC by default for the cpu port */ 913 mt7530_write(priv, MT7530_PMCR_P(5), 0x56300); 914 break; 915 case P5_INTF_SEL_GMAC5: 916 /* MT7530_P5_MODE_GMAC: P5 -> External phy or 2nd GMAC */ 917 val &= ~MHWTRAP_P5_DIS; 918 break; 919 default: 920 break; 921 } 922 923 /* Setup RGMII settings */ 924 if (phy_interface_mode_is_rgmii(interface)) { 925 val |= MHWTRAP_P5_RGMII_MODE; 926 927 /* P5 RGMII RX Clock Control: delay setting for 1000M */ 928 mt7530_write(priv, MT7530_P5RGMIIRXCR, CSR_RGMII_EDGE_ALIGN); 929 930 /* Don't set delay in DSA mode */ 931 if (!dsa_is_dsa_port(priv->ds, 5) && 932 (interface == PHY_INTERFACE_MODE_RGMII_TXID || 933 interface == PHY_INTERFACE_MODE_RGMII_ID)) 934 tx_delay = 4; /* n * 0.5 ns */ 935 936 /* P5 RGMII TX Clock Control: delay x */ 937 mt7530_write(priv, MT7530_P5RGMIITXCR, 938 CSR_RGMII_TXC_CFG(0x10 + tx_delay)); 939 940 /* reduce P5 RGMII Tx driving, 8mA */ 941 mt7530_write(priv, MT7530_IO_DRV_CR, 942 P5_IO_CLK_DRV(1) | P5_IO_DATA_DRV(1)); 943 } 944 945 mt7530_write(priv, MT7530_MHWTRAP, val); 946 947 dev_dbg(ds->dev, "Setup P5, HWTRAP=0x%x, intf_sel=%s, phy-mode=%s\n", 948 val, p5_intf_modes(priv->p5_intf_sel), phy_modes(interface)); 949 950 mutex_unlock(&priv->reg_mutex); 951 } 952 953 /* In Clause 5 of IEEE Std 802-2014, two sublayers of the data link layer (DLL) 954 * of the Open Systems Interconnection basic reference model (OSI/RM) are 955 * described; the medium access control (MAC) and logical link control (LLC) 956 * sublayers. The MAC sublayer is the one facing the physical layer. 957 * 958 * In 8.2 of IEEE Std 802.1Q-2022, the Bridge architecture is described. A 959 * Bridge component comprises a MAC Relay Entity for interconnecting the Ports 960 * of the Bridge, at least two Ports, and higher layer entities with at least a 961 * Spanning Tree Protocol Entity included. 962 * 963 * Each Bridge Port also functions as an end station and shall provide the MAC 964 * Service to an LLC Entity. Each instance of the MAC Service is provided to a 965 * distinct LLC Entity that supports protocol identification, multiplexing, and 966 * demultiplexing, for protocol data unit (PDU) transmission and reception by 967 * one or more higher layer entities. 968 * 969 * It is described in 8.13.9 of IEEE Std 802.1Q-2022 that in a Bridge, the LLC 970 * Entity associated with each Bridge Port is modeled as being directly 971 * connected to the attached Local Area Network (LAN). 972 * 973 * On the switch with CPU port architecture, CPU port functions as Management 974 * Port, and the Management Port functionality is provided by software which 975 * functions as an end station. Software is connected to an IEEE 802 LAN that is 976 * wholly contained within the system that incorporates the Bridge. Software 977 * provides access to the LLC Entity associated with each Bridge Port by the 978 * value of the source port field on the special tag on the frame received by 979 * software. 980 * 981 * We call frames that carry control information to determine the active 982 * topology and current extent of each Virtual Local Area Network (VLAN), i.e., 983 * spanning tree or Shortest Path Bridging (SPB) and Multiple VLAN Registration 984 * Protocol Data Units (MVRPDUs), and frames from other link constrained 985 * protocols, such as Extensible Authentication Protocol over LAN (EAPOL) and 986 * Link Layer Discovery Protocol (LLDP), link-local frames. They are not 987 * forwarded by a Bridge. Permanently configured entries in the filtering 988 * database (FDB) ensure that such frames are discarded by the Forwarding 989 * Process. In 8.6.3 of IEEE Std 802.1Q-2022, this is described in detail: 990 * 991 * Each of the reserved MAC addresses specified in Table 8-1 992 * (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]) shall be 993 * permanently configured in the FDB in C-VLAN components and ERs. 994 * 995 * Each of the reserved MAC addresses specified in Table 8-2 996 * (01-80-C2-00-00-[01,02,03,04,05,06,07,08,09,0A,0E]) shall be permanently 997 * configured in the FDB in S-VLAN components. 998 * 999 * Each of the reserved MAC addresses specified in Table 8-3 1000 * (01-80-C2-00-00-[01,02,04,0E]) shall be permanently configured in the FDB in 1001 * TPMR components. 1002 * 1003 * The FDB entries for reserved MAC addresses shall specify filtering for all 1004 * Bridge Ports and all VIDs. Management shall not provide the capability to 1005 * modify or remove entries for reserved MAC addresses. 1006 * 1007 * The addresses in Table 8-1, Table 8-2, and Table 8-3 determine the scope of 1008 * propagation of PDUs within a Bridged Network, as follows: 1009 * 1010 * The Nearest Bridge group address (01-80-C2-00-00-0E) is an address that no 1011 * conformant Two-Port MAC Relay (TPMR) component, Service VLAN (S-VLAN) 1012 * component, Customer VLAN (C-VLAN) component, or MAC Bridge can forward. 1013 * PDUs transmitted using this destination address, or any other addresses 1014 * that appear in Table 8-1, Table 8-2, and Table 8-3 1015 * (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]), can 1016 * therefore travel no further than those stations that can be reached via a 1017 * single individual LAN from the originating station. 1018 * 1019 * The Nearest non-TPMR Bridge group address (01-80-C2-00-00-03), is an 1020 * address that no conformant S-VLAN component, C-VLAN component, or MAC 1021 * Bridge can forward; however, this address is relayed by a TPMR component. 1022 * PDUs using this destination address, or any of the other addresses that 1023 * appear in both Table 8-1 and Table 8-2 but not in Table 8-3 1024 * (01-80-C2-00-00-[00,03,05,06,07,08,09,0A,0B,0C,0D,0F]), will be relayed by 1025 * any TPMRs but will propagate no further than the nearest S-VLAN component, 1026 * C-VLAN component, or MAC Bridge. 1027 * 1028 * The Nearest Customer Bridge group address (01-80-C2-00-00-00) is an address 1029 * that no conformant C-VLAN component, MAC Bridge can forward; however, it is 1030 * relayed by TPMR components and S-VLAN components. PDUs using this 1031 * destination address, or any of the other addresses that appear in Table 8-1 1032 * but not in either Table 8-2 or Table 8-3 (01-80-C2-00-00-[00,0B,0C,0D,0F]), 1033 * will be relayed by TPMR components and S-VLAN components but will propagate 1034 * no further than the nearest C-VLAN component or MAC Bridge. 1035 * 1036 * Because the LLC Entity associated with each Bridge Port is provided via CPU 1037 * port, we must not filter these frames but forward them to CPU port. 1038 * 1039 * In a Bridge, the transmission Port is majorly decided by ingress and egress 1040 * rules, FDB, and spanning tree Port State functions of the Forwarding Process. 1041 * For link-local frames, only CPU port should be designated as destination port 1042 * in the FDB, and the other functions of the Forwarding Process must not 1043 * interfere with the decision of the transmission Port. We call this process 1044 * trapping frames to CPU port. 1045 * 1046 * Therefore, on the switch with CPU port architecture, link-local frames must 1047 * be trapped to CPU port, and certain link-local frames received by a Port of a 1048 * Bridge comprising a TPMR component or an S-VLAN component must be excluded 1049 * from it. 1050 * 1051 * A Bridge of the switch with CPU port architecture cannot comprise a Two-Port 1052 * MAC Relay (TPMR) component as a TPMR component supports only a subset of the 1053 * functionality of a MAC Bridge. A Bridge comprising two Ports (Management Port 1054 * doesn't count) of this architecture will either function as a standard MAC 1055 * Bridge or a standard VLAN Bridge. 1056 * 1057 * Therefore, a Bridge of this architecture can only comprise S-VLAN components, 1058 * C-VLAN components, or MAC Bridge components. Since there's no TPMR component, 1059 * we don't need to relay PDUs using the destination addresses specified on the 1060 * Nearest non-TPMR section, and the proportion of the Nearest Customer Bridge 1061 * section where they must be relayed by TPMR components. 1062 * 1063 * One option to trap link-local frames to CPU port is to add static FDB entries 1064 * with CPU port designated as destination port. However, because that 1065 * Independent VLAN Learning (IVL) is being used on every VID, each entry only 1066 * applies to a single VLAN Identifier (VID). For a Bridge comprising a MAC 1067 * Bridge component or a C-VLAN component, there would have to be 16 times 4096 1068 * entries. This switch intellectual property can only hold a maximum of 2048 1069 * entries. Using this option, there also isn't a mechanism to prevent 1070 * link-local frames from being discarded when the spanning tree Port State of 1071 * the reception Port is discarding. 1072 * 1073 * The remaining option is to utilise the BPC, RGAC1, RGAC2, RGAC3, and RGAC4 1074 * registers. Whilst this applies to every VID, it doesn't contain all of the 1075 * reserved MAC addresses without affecting the remaining Standard Group MAC 1076 * Addresses. The REV_UN frame tag utilised using the RGAC4 register covers the 1077 * remaining 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F] destination 1078 * addresses. It also includes the 01-80-C2-00-00-22 to 01-80-C2-00-00-FF 1079 * destination addresses which may be relayed by MAC Bridges or VLAN Bridges. 1080 * The latter option provides better but not complete conformance. 1081 * 1082 * This switch intellectual property also does not provide a mechanism to trap 1083 * link-local frames with specific destination addresses to CPU port by Bridge, 1084 * to conform to the filtering rules for the distinct Bridge components. 1085 * 1086 * Therefore, regardless of the type of the Bridge component, link-local frames 1087 * with these destination addresses will be trapped to CPU port: 1088 * 1089 * 01-80-C2-00-00-[00,01,02,03,0E] 1090 * 1091 * In a Bridge comprising a MAC Bridge component or a C-VLAN component: 1092 * 1093 * Link-local frames with these destination addresses won't be trapped to CPU 1094 * port which won't conform to IEEE Std 802.1Q-2022: 1095 * 1096 * 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F] 1097 * 1098 * In a Bridge comprising an S-VLAN component: 1099 * 1100 * Link-local frames with these destination addresses will be trapped to CPU 1101 * port which won't conform to IEEE Std 802.1Q-2022: 1102 * 1103 * 01-80-C2-00-00-00 1104 * 1105 * Link-local frames with these destination addresses won't be trapped to CPU 1106 * port which won't conform to IEEE Std 802.1Q-2022: 1107 * 1108 * 01-80-C2-00-00-[04,05,06,07,08,09,0A] 1109 * 1110 * To trap link-local frames to CPU port as conformant as this switch 1111 * intellectual property can allow, link-local frames are made to be regarded as 1112 * Bridge Protocol Data Units (BPDUs). This is because this switch intellectual 1113 * property only lets the frames regarded as BPDUs bypass the spanning tree Port 1114 * State function of the Forwarding Process. 1115 * 1116 * The only remaining interference is the ingress rules. When the reception Port 1117 * has no PVID assigned on software, VLAN-untagged frames won't be allowed in. 1118 * There doesn't seem to be a mechanism on the switch intellectual property to 1119 * have link-local frames bypass this function of the Forwarding Process. 1120 */ 1121 static void 1122 mt753x_trap_frames(struct mt7530_priv *priv) 1123 { 1124 /* Trap 802.1X PAE frames and BPDUs to the CPU port(s) and egress them 1125 * VLAN-untagged. 1126 */ 1127 mt7530_rmw(priv, MT753X_BPC, 1128 MT753X_PAE_BPDU_FR | MT753X_PAE_EG_TAG_MASK | 1129 MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK | 1130 MT753X_BPDU_PORT_FW_MASK, 1131 MT753X_PAE_BPDU_FR | 1132 MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | 1133 MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) | 1134 MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | 1135 MT753X_BPDU_CPU_ONLY); 1136 1137 /* Trap frames with :01 and :02 MAC DAs to the CPU port(s) and egress 1138 * them VLAN-untagged. 1139 */ 1140 mt7530_rmw(priv, MT753X_RGAC1, 1141 MT753X_R02_BPDU_FR | MT753X_R02_EG_TAG_MASK | 1142 MT753X_R02_PORT_FW_MASK | MT753X_R01_BPDU_FR | 1143 MT753X_R01_EG_TAG_MASK | MT753X_R01_PORT_FW_MASK, 1144 MT753X_R02_BPDU_FR | 1145 MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | 1146 MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) | 1147 MT753X_R01_BPDU_FR | 1148 MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | 1149 MT753X_BPDU_CPU_ONLY); 1150 1151 /* Trap frames with :03 and :0E MAC DAs to the CPU port(s) and egress 1152 * them VLAN-untagged. 1153 */ 1154 mt7530_rmw(priv, MT753X_RGAC2, 1155 MT753X_R0E_BPDU_FR | MT753X_R0E_EG_TAG_MASK | 1156 MT753X_R0E_PORT_FW_MASK | MT753X_R03_BPDU_FR | 1157 MT753X_R03_EG_TAG_MASK | MT753X_R03_PORT_FW_MASK, 1158 MT753X_R0E_BPDU_FR | 1159 MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | 1160 MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) | 1161 MT753X_R03_BPDU_FR | 1162 MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | 1163 MT753X_BPDU_CPU_ONLY); 1164 } 1165 1166 static void 1167 mt753x_cpu_port_enable(struct dsa_switch *ds, int port) 1168 { 1169 struct mt7530_priv *priv = ds->priv; 1170 1171 /* Enable Mediatek header mode on the cpu port */ 1172 mt7530_write(priv, MT7530_PVC_P(port), 1173 PORT_SPEC_TAG); 1174 1175 /* Enable flooding on the CPU port */ 1176 mt7530_set(priv, MT7530_MFC, BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) | 1177 UNU_FFP(BIT(port))); 1178 1179 /* Add the CPU port to the CPU port bitmap for MT7531 and the switch on 1180 * the MT7988 SoC. Trapped frames will be forwarded to the CPU port that 1181 * is affine to the inbound user port. 1182 */ 1183 if (priv->id == ID_MT7531 || priv->id == ID_MT7988) 1184 mt7530_set(priv, MT7531_CFC, MT7531_CPU_PMAP(BIT(port))); 1185 1186 /* CPU port gets connected to all user ports of 1187 * the switch. 1188 */ 1189 mt7530_write(priv, MT7530_PCR_P(port), 1190 PCR_MATRIX(dsa_user_ports(priv->ds))); 1191 1192 /* Set to fallback mode for independent VLAN learning */ 1193 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, 1194 MT7530_PORT_FALLBACK_MODE); 1195 } 1196 1197 static int 1198 mt7530_port_enable(struct dsa_switch *ds, int port, 1199 struct phy_device *phy) 1200 { 1201 struct dsa_port *dp = dsa_to_port(ds, port); 1202 struct mt7530_priv *priv = ds->priv; 1203 1204 mutex_lock(&priv->reg_mutex); 1205 1206 /* Allow the user port gets connected to the cpu port and also 1207 * restore the port matrix if the port is the member of a certain 1208 * bridge. 1209 */ 1210 if (dsa_port_is_user(dp)) { 1211 struct dsa_port *cpu_dp = dp->cpu_dp; 1212 1213 priv->ports[port].pm |= PCR_MATRIX(BIT(cpu_dp->index)); 1214 } 1215 priv->ports[port].enable = true; 1216 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK, 1217 priv->ports[port].pm); 1218 1219 mutex_unlock(&priv->reg_mutex); 1220 1221 return 0; 1222 } 1223 1224 static void 1225 mt7530_port_disable(struct dsa_switch *ds, int port) 1226 { 1227 struct mt7530_priv *priv = ds->priv; 1228 1229 mutex_lock(&priv->reg_mutex); 1230 1231 /* Clear up all port matrix which could be restored in the next 1232 * enablement for the port. 1233 */ 1234 priv->ports[port].enable = false; 1235 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK, 1236 PCR_MATRIX_CLR); 1237 1238 mutex_unlock(&priv->reg_mutex); 1239 } 1240 1241 static int 1242 mt7530_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) 1243 { 1244 struct mt7530_priv *priv = ds->priv; 1245 int length; 1246 u32 val; 1247 1248 /* When a new MTU is set, DSA always set the CPU port's MTU to the 1249 * largest MTU of the user ports. Because the switch only has a global 1250 * RX length register, only allowing CPU port here is enough. 1251 */ 1252 if (!dsa_is_cpu_port(ds, port)) 1253 return 0; 1254 1255 mt7530_mutex_lock(priv); 1256 1257 val = mt7530_mii_read(priv, MT7530_GMACCR); 1258 val &= ~MAX_RX_PKT_LEN_MASK; 1259 1260 /* RX length also includes Ethernet header, MTK tag, and FCS length */ 1261 length = new_mtu + ETH_HLEN + MTK_HDR_LEN + ETH_FCS_LEN; 1262 if (length <= 1522) { 1263 val |= MAX_RX_PKT_LEN_1522; 1264 } else if (length <= 1536) { 1265 val |= MAX_RX_PKT_LEN_1536; 1266 } else if (length <= 1552) { 1267 val |= MAX_RX_PKT_LEN_1552; 1268 } else { 1269 val &= ~MAX_RX_JUMBO_MASK; 1270 val |= MAX_RX_JUMBO(DIV_ROUND_UP(length, 1024)); 1271 val |= MAX_RX_PKT_LEN_JUMBO; 1272 } 1273 1274 mt7530_mii_write(priv, MT7530_GMACCR, val); 1275 1276 mt7530_mutex_unlock(priv); 1277 1278 return 0; 1279 } 1280 1281 static int 1282 mt7530_port_max_mtu(struct dsa_switch *ds, int port) 1283 { 1284 return MT7530_MAX_MTU; 1285 } 1286 1287 static void 1288 mt7530_stp_state_set(struct dsa_switch *ds, int port, u8 state) 1289 { 1290 struct mt7530_priv *priv = ds->priv; 1291 u32 stp_state; 1292 1293 switch (state) { 1294 case BR_STATE_DISABLED: 1295 stp_state = MT7530_STP_DISABLED; 1296 break; 1297 case BR_STATE_BLOCKING: 1298 stp_state = MT7530_STP_BLOCKING; 1299 break; 1300 case BR_STATE_LISTENING: 1301 stp_state = MT7530_STP_LISTENING; 1302 break; 1303 case BR_STATE_LEARNING: 1304 stp_state = MT7530_STP_LEARNING; 1305 break; 1306 case BR_STATE_FORWARDING: 1307 default: 1308 stp_state = MT7530_STP_FORWARDING; 1309 break; 1310 } 1311 1312 mt7530_rmw(priv, MT7530_SSP_P(port), FID_PST_MASK(FID_BRIDGED), 1313 FID_PST(FID_BRIDGED, stp_state)); 1314 } 1315 1316 static int 1317 mt7530_port_pre_bridge_flags(struct dsa_switch *ds, int port, 1318 struct switchdev_brport_flags flags, 1319 struct netlink_ext_ack *extack) 1320 { 1321 if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 1322 BR_BCAST_FLOOD)) 1323 return -EINVAL; 1324 1325 return 0; 1326 } 1327 1328 static int 1329 mt7530_port_bridge_flags(struct dsa_switch *ds, int port, 1330 struct switchdev_brport_flags flags, 1331 struct netlink_ext_ack *extack) 1332 { 1333 struct mt7530_priv *priv = ds->priv; 1334 1335 if (flags.mask & BR_LEARNING) 1336 mt7530_rmw(priv, MT7530_PSC_P(port), SA_DIS, 1337 flags.val & BR_LEARNING ? 0 : SA_DIS); 1338 1339 if (flags.mask & BR_FLOOD) 1340 mt7530_rmw(priv, MT7530_MFC, UNU_FFP(BIT(port)), 1341 flags.val & BR_FLOOD ? UNU_FFP(BIT(port)) : 0); 1342 1343 if (flags.mask & BR_MCAST_FLOOD) 1344 mt7530_rmw(priv, MT7530_MFC, UNM_FFP(BIT(port)), 1345 flags.val & BR_MCAST_FLOOD ? UNM_FFP(BIT(port)) : 0); 1346 1347 if (flags.mask & BR_BCAST_FLOOD) 1348 mt7530_rmw(priv, MT7530_MFC, BC_FFP(BIT(port)), 1349 flags.val & BR_BCAST_FLOOD ? BC_FFP(BIT(port)) : 0); 1350 1351 return 0; 1352 } 1353 1354 static int 1355 mt7530_port_bridge_join(struct dsa_switch *ds, int port, 1356 struct dsa_bridge bridge, bool *tx_fwd_offload, 1357 struct netlink_ext_ack *extack) 1358 { 1359 struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; 1360 struct dsa_port *cpu_dp = dp->cpu_dp; 1361 u32 port_bitmap = BIT(cpu_dp->index); 1362 struct mt7530_priv *priv = ds->priv; 1363 1364 mutex_lock(&priv->reg_mutex); 1365 1366 dsa_switch_for_each_user_port(other_dp, ds) { 1367 int other_port = other_dp->index; 1368 1369 if (dp == other_dp) 1370 continue; 1371 1372 /* Add this port to the port matrix of the other ports in the 1373 * same bridge. If the port is disabled, port matrix is kept 1374 * and not being setup until the port becomes enabled. 1375 */ 1376 if (!dsa_port_offloads_bridge(other_dp, &bridge)) 1377 continue; 1378 1379 if (priv->ports[other_port].enable) 1380 mt7530_set(priv, MT7530_PCR_P(other_port), 1381 PCR_MATRIX(BIT(port))); 1382 priv->ports[other_port].pm |= PCR_MATRIX(BIT(port)); 1383 1384 port_bitmap |= BIT(other_port); 1385 } 1386 1387 /* Add the all other ports to this port matrix. */ 1388 if (priv->ports[port].enable) 1389 mt7530_rmw(priv, MT7530_PCR_P(port), 1390 PCR_MATRIX_MASK, PCR_MATRIX(port_bitmap)); 1391 priv->ports[port].pm |= PCR_MATRIX(port_bitmap); 1392 1393 /* Set to fallback mode for independent VLAN learning */ 1394 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, 1395 MT7530_PORT_FALLBACK_MODE); 1396 1397 mutex_unlock(&priv->reg_mutex); 1398 1399 return 0; 1400 } 1401 1402 static void 1403 mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port) 1404 { 1405 struct mt7530_priv *priv = ds->priv; 1406 bool all_user_ports_removed = true; 1407 int i; 1408 1409 /* This is called after .port_bridge_leave when leaving a VLAN-aware 1410 * bridge. Don't set standalone ports to fallback mode. 1411 */ 1412 if (dsa_port_bridge_dev_get(dsa_to_port(ds, port))) 1413 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, 1414 MT7530_PORT_FALLBACK_MODE); 1415 1416 mt7530_rmw(priv, MT7530_PVC_P(port), 1417 VLAN_ATTR_MASK | PVC_EG_TAG_MASK | ACC_FRM_MASK, 1418 VLAN_ATTR(MT7530_VLAN_TRANSPARENT) | 1419 PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT) | 1420 MT7530_VLAN_ACC_ALL); 1421 1422 /* Set PVID to 0 */ 1423 mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, 1424 G0_PORT_VID_DEF); 1425 1426 for (i = 0; i < MT7530_NUM_PORTS; i++) { 1427 if (dsa_is_user_port(ds, i) && 1428 dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) { 1429 all_user_ports_removed = false; 1430 break; 1431 } 1432 } 1433 1434 /* CPU port also does the same thing until all user ports belonging to 1435 * the CPU port get out of VLAN filtering mode. 1436 */ 1437 if (all_user_ports_removed) { 1438 struct dsa_port *dp = dsa_to_port(ds, port); 1439 struct dsa_port *cpu_dp = dp->cpu_dp; 1440 1441 mt7530_write(priv, MT7530_PCR_P(cpu_dp->index), 1442 PCR_MATRIX(dsa_user_ports(priv->ds))); 1443 mt7530_write(priv, MT7530_PVC_P(cpu_dp->index), PORT_SPEC_TAG 1444 | PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); 1445 } 1446 } 1447 1448 static void 1449 mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port) 1450 { 1451 struct mt7530_priv *priv = ds->priv; 1452 1453 /* Trapped into security mode allows packet forwarding through VLAN 1454 * table lookup. 1455 */ 1456 if (dsa_is_user_port(ds, port)) { 1457 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, 1458 MT7530_PORT_SECURITY_MODE); 1459 mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, 1460 G0_PORT_VID(priv->ports[port].pvid)); 1461 1462 /* Only accept tagged frames if PVID is not set */ 1463 if (!priv->ports[port].pvid) 1464 mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK, 1465 MT7530_VLAN_ACC_TAGGED); 1466 1467 /* Set the port as a user port which is to be able to recognize 1468 * VID from incoming packets before fetching entry within the 1469 * VLAN table. 1470 */ 1471 mt7530_rmw(priv, MT7530_PVC_P(port), 1472 VLAN_ATTR_MASK | PVC_EG_TAG_MASK, 1473 VLAN_ATTR(MT7530_VLAN_USER) | 1474 PVC_EG_TAG(MT7530_VLAN_EG_DISABLED)); 1475 } else { 1476 /* Also set CPU ports to the "user" VLAN port attribute, to 1477 * allow VLAN classification, but keep the EG_TAG attribute as 1478 * "consistent" (i.o.w. don't change its value) for packets 1479 * received by the switch from the CPU, so that tagged packets 1480 * are forwarded to user ports as tagged, and untagged as 1481 * untagged. 1482 */ 1483 mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK, 1484 VLAN_ATTR(MT7530_VLAN_USER)); 1485 } 1486 } 1487 1488 static void 1489 mt7530_port_bridge_leave(struct dsa_switch *ds, int port, 1490 struct dsa_bridge bridge) 1491 { 1492 struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; 1493 struct dsa_port *cpu_dp = dp->cpu_dp; 1494 struct mt7530_priv *priv = ds->priv; 1495 1496 mutex_lock(&priv->reg_mutex); 1497 1498 dsa_switch_for_each_user_port(other_dp, ds) { 1499 int other_port = other_dp->index; 1500 1501 if (dp == other_dp) 1502 continue; 1503 1504 /* Remove this port from the port matrix of the other ports 1505 * in the same bridge. If the port is disabled, port matrix 1506 * is kept and not being setup until the port becomes enabled. 1507 */ 1508 if (!dsa_port_offloads_bridge(other_dp, &bridge)) 1509 continue; 1510 1511 if (priv->ports[other_port].enable) 1512 mt7530_clear(priv, MT7530_PCR_P(other_port), 1513 PCR_MATRIX(BIT(port))); 1514 priv->ports[other_port].pm &= ~PCR_MATRIX(BIT(port)); 1515 } 1516 1517 /* Set the cpu port to be the only one in the port matrix of 1518 * this port. 1519 */ 1520 if (priv->ports[port].enable) 1521 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK, 1522 PCR_MATRIX(BIT(cpu_dp->index))); 1523 priv->ports[port].pm = PCR_MATRIX(BIT(cpu_dp->index)); 1524 1525 /* When a port is removed from the bridge, the port would be set up 1526 * back to the default as is at initial boot which is a VLAN-unaware 1527 * port. 1528 */ 1529 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, 1530 MT7530_PORT_MATRIX_MODE); 1531 1532 mutex_unlock(&priv->reg_mutex); 1533 } 1534 1535 static int 1536 mt7530_port_fdb_add(struct dsa_switch *ds, int port, 1537 const unsigned char *addr, u16 vid, 1538 struct dsa_db db) 1539 { 1540 struct mt7530_priv *priv = ds->priv; 1541 int ret; 1542 u8 port_mask = BIT(port); 1543 1544 mutex_lock(&priv->reg_mutex); 1545 mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_ENT); 1546 ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL); 1547 mutex_unlock(&priv->reg_mutex); 1548 1549 return ret; 1550 } 1551 1552 static int 1553 mt7530_port_fdb_del(struct dsa_switch *ds, int port, 1554 const unsigned char *addr, u16 vid, 1555 struct dsa_db db) 1556 { 1557 struct mt7530_priv *priv = ds->priv; 1558 int ret; 1559 u8 port_mask = BIT(port); 1560 1561 mutex_lock(&priv->reg_mutex); 1562 mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_EMP); 1563 ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL); 1564 mutex_unlock(&priv->reg_mutex); 1565 1566 return ret; 1567 } 1568 1569 static int 1570 mt7530_port_fdb_dump(struct dsa_switch *ds, int port, 1571 dsa_fdb_dump_cb_t *cb, void *data) 1572 { 1573 struct mt7530_priv *priv = ds->priv; 1574 struct mt7530_fdb _fdb = { 0 }; 1575 int cnt = MT7530_NUM_FDB_RECORDS; 1576 int ret = 0; 1577 u32 rsp = 0; 1578 1579 mutex_lock(&priv->reg_mutex); 1580 1581 ret = mt7530_fdb_cmd(priv, MT7530_FDB_START, &rsp); 1582 if (ret < 0) 1583 goto err; 1584 1585 do { 1586 if (rsp & ATC_SRCH_HIT) { 1587 mt7530_fdb_read(priv, &_fdb); 1588 if (_fdb.port_mask & BIT(port)) { 1589 ret = cb(_fdb.mac, _fdb.vid, _fdb.noarp, 1590 data); 1591 if (ret < 0) 1592 break; 1593 } 1594 } 1595 } while (--cnt && 1596 !(rsp & ATC_SRCH_END) && 1597 !mt7530_fdb_cmd(priv, MT7530_FDB_NEXT, &rsp)); 1598 err: 1599 mutex_unlock(&priv->reg_mutex); 1600 1601 return 0; 1602 } 1603 1604 static int 1605 mt7530_port_mdb_add(struct dsa_switch *ds, int port, 1606 const struct switchdev_obj_port_mdb *mdb, 1607 struct dsa_db db) 1608 { 1609 struct mt7530_priv *priv = ds->priv; 1610 const u8 *addr = mdb->addr; 1611 u16 vid = mdb->vid; 1612 u8 port_mask = 0; 1613 int ret; 1614 1615 mutex_lock(&priv->reg_mutex); 1616 1617 mt7530_fdb_write(priv, vid, 0, addr, 0, STATIC_EMP); 1618 if (!mt7530_fdb_cmd(priv, MT7530_FDB_READ, NULL)) 1619 port_mask = (mt7530_read(priv, MT7530_ATRD) >> PORT_MAP) 1620 & PORT_MAP_MASK; 1621 1622 port_mask |= BIT(port); 1623 mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_ENT); 1624 ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL); 1625 1626 mutex_unlock(&priv->reg_mutex); 1627 1628 return ret; 1629 } 1630 1631 static int 1632 mt7530_port_mdb_del(struct dsa_switch *ds, int port, 1633 const struct switchdev_obj_port_mdb *mdb, 1634 struct dsa_db db) 1635 { 1636 struct mt7530_priv *priv = ds->priv; 1637 const u8 *addr = mdb->addr; 1638 u16 vid = mdb->vid; 1639 u8 port_mask = 0; 1640 int ret; 1641 1642 mutex_lock(&priv->reg_mutex); 1643 1644 mt7530_fdb_write(priv, vid, 0, addr, 0, STATIC_EMP); 1645 if (!mt7530_fdb_cmd(priv, MT7530_FDB_READ, NULL)) 1646 port_mask = (mt7530_read(priv, MT7530_ATRD) >> PORT_MAP) 1647 & PORT_MAP_MASK; 1648 1649 port_mask &= ~BIT(port); 1650 mt7530_fdb_write(priv, vid, port_mask, addr, -1, 1651 port_mask ? STATIC_ENT : STATIC_EMP); 1652 ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL); 1653 1654 mutex_unlock(&priv->reg_mutex); 1655 1656 return ret; 1657 } 1658 1659 static int 1660 mt7530_vlan_cmd(struct mt7530_priv *priv, enum mt7530_vlan_cmd cmd, u16 vid) 1661 { 1662 struct mt7530_dummy_poll p; 1663 u32 val; 1664 int ret; 1665 1666 val = VTCR_BUSY | VTCR_FUNC(cmd) | vid; 1667 mt7530_write(priv, MT7530_VTCR, val); 1668 1669 INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_VTCR); 1670 ret = readx_poll_timeout(_mt7530_read, &p, val, 1671 !(val & VTCR_BUSY), 20, 20000); 1672 if (ret < 0) { 1673 dev_err(priv->dev, "poll timeout\n"); 1674 return ret; 1675 } 1676 1677 val = mt7530_read(priv, MT7530_VTCR); 1678 if (val & VTCR_INVALID) { 1679 dev_err(priv->dev, "read VTCR invalid\n"); 1680 return -EINVAL; 1681 } 1682 1683 return 0; 1684 } 1685 1686 static int 1687 mt7530_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, 1688 struct netlink_ext_ack *extack) 1689 { 1690 struct dsa_port *dp = dsa_to_port(ds, port); 1691 struct dsa_port *cpu_dp = dp->cpu_dp; 1692 1693 if (vlan_filtering) { 1694 /* The port is being kept as VLAN-unaware port when bridge is 1695 * set up with vlan_filtering not being set, Otherwise, the 1696 * port and the corresponding CPU port is required the setup 1697 * for becoming a VLAN-aware port. 1698 */ 1699 mt7530_port_set_vlan_aware(ds, port); 1700 mt7530_port_set_vlan_aware(ds, cpu_dp->index); 1701 } else { 1702 mt7530_port_set_vlan_unaware(ds, port); 1703 } 1704 1705 return 0; 1706 } 1707 1708 static void 1709 mt7530_hw_vlan_add(struct mt7530_priv *priv, 1710 struct mt7530_hw_vlan_entry *entry) 1711 { 1712 struct dsa_port *dp = dsa_to_port(priv->ds, entry->port); 1713 u8 new_members; 1714 u32 val; 1715 1716 new_members = entry->old_members | BIT(entry->port); 1717 1718 /* Validate the entry with independent learning, create egress tag per 1719 * VLAN and joining the port as one of the port members. 1720 */ 1721 val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) | FID(FID_BRIDGED) | 1722 VLAN_VALID; 1723 mt7530_write(priv, MT7530_VAWD1, val); 1724 1725 /* Decide whether adding tag or not for those outgoing packets from the 1726 * port inside the VLAN. 1727 * CPU port is always taken as a tagged port for serving more than one 1728 * VLANs across and also being applied with egress type stack mode for 1729 * that VLAN tags would be appended after hardware special tag used as 1730 * DSA tag. 1731 */ 1732 if (dsa_port_is_cpu(dp)) 1733 val = MT7530_VLAN_EGRESS_STACK; 1734 else if (entry->untagged) 1735 val = MT7530_VLAN_EGRESS_UNTAG; 1736 else 1737 val = MT7530_VLAN_EGRESS_TAG; 1738 mt7530_rmw(priv, MT7530_VAWD2, 1739 ETAG_CTRL_P_MASK(entry->port), 1740 ETAG_CTRL_P(entry->port, val)); 1741 } 1742 1743 static void 1744 mt7530_hw_vlan_del(struct mt7530_priv *priv, 1745 struct mt7530_hw_vlan_entry *entry) 1746 { 1747 u8 new_members; 1748 u32 val; 1749 1750 new_members = entry->old_members & ~BIT(entry->port); 1751 1752 val = mt7530_read(priv, MT7530_VAWD1); 1753 if (!(val & VLAN_VALID)) { 1754 dev_err(priv->dev, 1755 "Cannot be deleted due to invalid entry\n"); 1756 return; 1757 } 1758 1759 if (new_members) { 1760 val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) | 1761 VLAN_VALID; 1762 mt7530_write(priv, MT7530_VAWD1, val); 1763 } else { 1764 mt7530_write(priv, MT7530_VAWD1, 0); 1765 mt7530_write(priv, MT7530_VAWD2, 0); 1766 } 1767 } 1768 1769 static void 1770 mt7530_hw_vlan_update(struct mt7530_priv *priv, u16 vid, 1771 struct mt7530_hw_vlan_entry *entry, 1772 mt7530_vlan_op vlan_op) 1773 { 1774 u32 val; 1775 1776 /* Fetch entry */ 1777 mt7530_vlan_cmd(priv, MT7530_VTCR_RD_VID, vid); 1778 1779 val = mt7530_read(priv, MT7530_VAWD1); 1780 1781 entry->old_members = (val >> PORT_MEM_SHFT) & PORT_MEM_MASK; 1782 1783 /* Manipulate entry */ 1784 vlan_op(priv, entry); 1785 1786 /* Flush result to hardware */ 1787 mt7530_vlan_cmd(priv, MT7530_VTCR_WR_VID, vid); 1788 } 1789 1790 static int 1791 mt7530_setup_vlan0(struct mt7530_priv *priv) 1792 { 1793 u32 val; 1794 1795 /* Validate the entry with independent learning, keep the original 1796 * ingress tag attribute. 1797 */ 1798 val = IVL_MAC | EG_CON | PORT_MEM(MT7530_ALL_MEMBERS) | FID(FID_BRIDGED) | 1799 VLAN_VALID; 1800 mt7530_write(priv, MT7530_VAWD1, val); 1801 1802 return mt7530_vlan_cmd(priv, MT7530_VTCR_WR_VID, 0); 1803 } 1804 1805 static int 1806 mt7530_port_vlan_add(struct dsa_switch *ds, int port, 1807 const struct switchdev_obj_port_vlan *vlan, 1808 struct netlink_ext_ack *extack) 1809 { 1810 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1811 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 1812 struct mt7530_hw_vlan_entry new_entry; 1813 struct mt7530_priv *priv = ds->priv; 1814 1815 mutex_lock(&priv->reg_mutex); 1816 1817 mt7530_hw_vlan_entry_init(&new_entry, port, untagged); 1818 mt7530_hw_vlan_update(priv, vlan->vid, &new_entry, mt7530_hw_vlan_add); 1819 1820 if (pvid) { 1821 priv->ports[port].pvid = vlan->vid; 1822 1823 /* Accept all frames if PVID is set */ 1824 mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK, 1825 MT7530_VLAN_ACC_ALL); 1826 1827 /* Only configure PVID if VLAN filtering is enabled */ 1828 if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) 1829 mt7530_rmw(priv, MT7530_PPBV1_P(port), 1830 G0_PORT_VID_MASK, 1831 G0_PORT_VID(vlan->vid)); 1832 } else if (vlan->vid && priv->ports[port].pvid == vlan->vid) { 1833 /* This VLAN is overwritten without PVID, so unset it */ 1834 priv->ports[port].pvid = G0_PORT_VID_DEF; 1835 1836 /* Only accept tagged frames if the port is VLAN-aware */ 1837 if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) 1838 mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK, 1839 MT7530_VLAN_ACC_TAGGED); 1840 1841 mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, 1842 G0_PORT_VID_DEF); 1843 } 1844 1845 mutex_unlock(&priv->reg_mutex); 1846 1847 return 0; 1848 } 1849 1850 static int 1851 mt7530_port_vlan_del(struct dsa_switch *ds, int port, 1852 const struct switchdev_obj_port_vlan *vlan) 1853 { 1854 struct mt7530_hw_vlan_entry target_entry; 1855 struct mt7530_priv *priv = ds->priv; 1856 1857 mutex_lock(&priv->reg_mutex); 1858 1859 mt7530_hw_vlan_entry_init(&target_entry, port, 0); 1860 mt7530_hw_vlan_update(priv, vlan->vid, &target_entry, 1861 mt7530_hw_vlan_del); 1862 1863 /* PVID is being restored to the default whenever the PVID port 1864 * is being removed from the VLAN. 1865 */ 1866 if (priv->ports[port].pvid == vlan->vid) { 1867 priv->ports[port].pvid = G0_PORT_VID_DEF; 1868 1869 /* Only accept tagged frames if the port is VLAN-aware */ 1870 if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) 1871 mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK, 1872 MT7530_VLAN_ACC_TAGGED); 1873 1874 mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, 1875 G0_PORT_VID_DEF); 1876 } 1877 1878 1879 mutex_unlock(&priv->reg_mutex); 1880 1881 return 0; 1882 } 1883 1884 static int mt753x_mirror_port_get(unsigned int id, u32 val) 1885 { 1886 return (id == ID_MT7531) ? MT7531_MIRROR_PORT_GET(val) : 1887 MIRROR_PORT(val); 1888 } 1889 1890 static int mt753x_mirror_port_set(unsigned int id, u32 val) 1891 { 1892 return (id == ID_MT7531) ? MT7531_MIRROR_PORT_SET(val) : 1893 MIRROR_PORT(val); 1894 } 1895 1896 static int mt753x_port_mirror_add(struct dsa_switch *ds, int port, 1897 struct dsa_mall_mirror_tc_entry *mirror, 1898 bool ingress, struct netlink_ext_ack *extack) 1899 { 1900 struct mt7530_priv *priv = ds->priv; 1901 int monitor_port; 1902 u32 val; 1903 1904 /* Check for existent entry */ 1905 if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port)) 1906 return -EEXIST; 1907 1908 val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id)); 1909 1910 /* MT7530 only supports one monitor port */ 1911 monitor_port = mt753x_mirror_port_get(priv->id, val); 1912 if (val & MT753X_MIRROR_EN(priv->id) && 1913 monitor_port != mirror->to_local_port) 1914 return -EEXIST; 1915 1916 val |= MT753X_MIRROR_EN(priv->id); 1917 val &= ~MT753X_MIRROR_MASK(priv->id); 1918 val |= mt753x_mirror_port_set(priv->id, mirror->to_local_port); 1919 mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val); 1920 1921 val = mt7530_read(priv, MT7530_PCR_P(port)); 1922 if (ingress) { 1923 val |= PORT_RX_MIR; 1924 priv->mirror_rx |= BIT(port); 1925 } else { 1926 val |= PORT_TX_MIR; 1927 priv->mirror_tx |= BIT(port); 1928 } 1929 mt7530_write(priv, MT7530_PCR_P(port), val); 1930 1931 return 0; 1932 } 1933 1934 static void mt753x_port_mirror_del(struct dsa_switch *ds, int port, 1935 struct dsa_mall_mirror_tc_entry *mirror) 1936 { 1937 struct mt7530_priv *priv = ds->priv; 1938 u32 val; 1939 1940 val = mt7530_read(priv, MT7530_PCR_P(port)); 1941 if (mirror->ingress) { 1942 val &= ~PORT_RX_MIR; 1943 priv->mirror_rx &= ~BIT(port); 1944 } else { 1945 val &= ~PORT_TX_MIR; 1946 priv->mirror_tx &= ~BIT(port); 1947 } 1948 mt7530_write(priv, MT7530_PCR_P(port), val); 1949 1950 if (!priv->mirror_rx && !priv->mirror_tx) { 1951 val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id)); 1952 val &= ~MT753X_MIRROR_EN(priv->id); 1953 mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val); 1954 } 1955 } 1956 1957 static enum dsa_tag_protocol 1958 mtk_get_tag_protocol(struct dsa_switch *ds, int port, 1959 enum dsa_tag_protocol mp) 1960 { 1961 return DSA_TAG_PROTO_MTK; 1962 } 1963 1964 #ifdef CONFIG_GPIOLIB 1965 static inline u32 1966 mt7530_gpio_to_bit(unsigned int offset) 1967 { 1968 /* Map GPIO offset to register bit 1969 * [ 2: 0] port 0 LED 0..2 as GPIO 0..2 1970 * [ 6: 4] port 1 LED 0..2 as GPIO 3..5 1971 * [10: 8] port 2 LED 0..2 as GPIO 6..8 1972 * [14:12] port 3 LED 0..2 as GPIO 9..11 1973 * [18:16] port 4 LED 0..2 as GPIO 12..14 1974 */ 1975 return BIT(offset + offset / 3); 1976 } 1977 1978 static int 1979 mt7530_gpio_get(struct gpio_chip *gc, unsigned int offset) 1980 { 1981 struct mt7530_priv *priv = gpiochip_get_data(gc); 1982 u32 bit = mt7530_gpio_to_bit(offset); 1983 1984 return !!(mt7530_read(priv, MT7530_LED_GPIO_DATA) & bit); 1985 } 1986 1987 static void 1988 mt7530_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) 1989 { 1990 struct mt7530_priv *priv = gpiochip_get_data(gc); 1991 u32 bit = mt7530_gpio_to_bit(offset); 1992 1993 if (value) 1994 mt7530_set(priv, MT7530_LED_GPIO_DATA, bit); 1995 else 1996 mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit); 1997 } 1998 1999 static int 2000 mt7530_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) 2001 { 2002 struct mt7530_priv *priv = gpiochip_get_data(gc); 2003 u32 bit = mt7530_gpio_to_bit(offset); 2004 2005 return (mt7530_read(priv, MT7530_LED_GPIO_DIR) & bit) ? 2006 GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN; 2007 } 2008 2009 static int 2010 mt7530_gpio_direction_input(struct gpio_chip *gc, unsigned int offset) 2011 { 2012 struct mt7530_priv *priv = gpiochip_get_data(gc); 2013 u32 bit = mt7530_gpio_to_bit(offset); 2014 2015 mt7530_clear(priv, MT7530_LED_GPIO_OE, bit); 2016 mt7530_clear(priv, MT7530_LED_GPIO_DIR, bit); 2017 2018 return 0; 2019 } 2020 2021 static int 2022 mt7530_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value) 2023 { 2024 struct mt7530_priv *priv = gpiochip_get_data(gc); 2025 u32 bit = mt7530_gpio_to_bit(offset); 2026 2027 mt7530_set(priv, MT7530_LED_GPIO_DIR, bit); 2028 2029 if (value) 2030 mt7530_set(priv, MT7530_LED_GPIO_DATA, bit); 2031 else 2032 mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit); 2033 2034 mt7530_set(priv, MT7530_LED_GPIO_OE, bit); 2035 2036 return 0; 2037 } 2038 2039 static int 2040 mt7530_setup_gpio(struct mt7530_priv *priv) 2041 { 2042 struct device *dev = priv->dev; 2043 struct gpio_chip *gc; 2044 2045 gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL); 2046 if (!gc) 2047 return -ENOMEM; 2048 2049 mt7530_write(priv, MT7530_LED_GPIO_OE, 0); 2050 mt7530_write(priv, MT7530_LED_GPIO_DIR, 0); 2051 mt7530_write(priv, MT7530_LED_IO_MODE, 0); 2052 2053 gc->label = "mt7530"; 2054 gc->parent = dev; 2055 gc->owner = THIS_MODULE; 2056 gc->get_direction = mt7530_gpio_get_direction; 2057 gc->direction_input = mt7530_gpio_direction_input; 2058 gc->direction_output = mt7530_gpio_direction_output; 2059 gc->get = mt7530_gpio_get; 2060 gc->set = mt7530_gpio_set; 2061 gc->base = -1; 2062 gc->ngpio = 15; 2063 gc->can_sleep = true; 2064 2065 return devm_gpiochip_add_data(dev, gc, priv); 2066 } 2067 #endif /* CONFIG_GPIOLIB */ 2068 2069 static irqreturn_t 2070 mt7530_irq_thread_fn(int irq, void *dev_id) 2071 { 2072 struct mt7530_priv *priv = dev_id; 2073 bool handled = false; 2074 u32 val; 2075 int p; 2076 2077 mt7530_mutex_lock(priv); 2078 val = mt7530_mii_read(priv, MT7530_SYS_INT_STS); 2079 mt7530_mii_write(priv, MT7530_SYS_INT_STS, val); 2080 mt7530_mutex_unlock(priv); 2081 2082 for (p = 0; p < MT7530_NUM_PHYS; p++) { 2083 if (BIT(p) & val) { 2084 unsigned int irq; 2085 2086 irq = irq_find_mapping(priv->irq_domain, p); 2087 handle_nested_irq(irq); 2088 handled = true; 2089 } 2090 } 2091 2092 return IRQ_RETVAL(handled); 2093 } 2094 2095 static void 2096 mt7530_irq_mask(struct irq_data *d) 2097 { 2098 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); 2099 2100 priv->irq_enable &= ~BIT(d->hwirq); 2101 } 2102 2103 static void 2104 mt7530_irq_unmask(struct irq_data *d) 2105 { 2106 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); 2107 2108 priv->irq_enable |= BIT(d->hwirq); 2109 } 2110 2111 static void 2112 mt7530_irq_bus_lock(struct irq_data *d) 2113 { 2114 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); 2115 2116 mt7530_mutex_lock(priv); 2117 } 2118 2119 static void 2120 mt7530_irq_bus_sync_unlock(struct irq_data *d) 2121 { 2122 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); 2123 2124 mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable); 2125 mt7530_mutex_unlock(priv); 2126 } 2127 2128 static struct irq_chip mt7530_irq_chip = { 2129 .name = KBUILD_MODNAME, 2130 .irq_mask = mt7530_irq_mask, 2131 .irq_unmask = mt7530_irq_unmask, 2132 .irq_bus_lock = mt7530_irq_bus_lock, 2133 .irq_bus_sync_unlock = mt7530_irq_bus_sync_unlock, 2134 }; 2135 2136 static int 2137 mt7530_irq_map(struct irq_domain *domain, unsigned int irq, 2138 irq_hw_number_t hwirq) 2139 { 2140 irq_set_chip_data(irq, domain->host_data); 2141 irq_set_chip_and_handler(irq, &mt7530_irq_chip, handle_simple_irq); 2142 irq_set_nested_thread(irq, true); 2143 irq_set_noprobe(irq); 2144 2145 return 0; 2146 } 2147 2148 static const struct irq_domain_ops mt7530_irq_domain_ops = { 2149 .map = mt7530_irq_map, 2150 .xlate = irq_domain_xlate_onecell, 2151 }; 2152 2153 static void 2154 mt7988_irq_mask(struct irq_data *d) 2155 { 2156 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); 2157 2158 priv->irq_enable &= ~BIT(d->hwirq); 2159 mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable); 2160 } 2161 2162 static void 2163 mt7988_irq_unmask(struct irq_data *d) 2164 { 2165 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); 2166 2167 priv->irq_enable |= BIT(d->hwirq); 2168 mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable); 2169 } 2170 2171 static struct irq_chip mt7988_irq_chip = { 2172 .name = KBUILD_MODNAME, 2173 .irq_mask = mt7988_irq_mask, 2174 .irq_unmask = mt7988_irq_unmask, 2175 }; 2176 2177 static int 2178 mt7988_irq_map(struct irq_domain *domain, unsigned int irq, 2179 irq_hw_number_t hwirq) 2180 { 2181 irq_set_chip_data(irq, domain->host_data); 2182 irq_set_chip_and_handler(irq, &mt7988_irq_chip, handle_simple_irq); 2183 irq_set_nested_thread(irq, true); 2184 irq_set_noprobe(irq); 2185 2186 return 0; 2187 } 2188 2189 static const struct irq_domain_ops mt7988_irq_domain_ops = { 2190 .map = mt7988_irq_map, 2191 .xlate = irq_domain_xlate_onecell, 2192 }; 2193 2194 static void 2195 mt7530_setup_mdio_irq(struct mt7530_priv *priv) 2196 { 2197 struct dsa_switch *ds = priv->ds; 2198 int p; 2199 2200 for (p = 0; p < MT7530_NUM_PHYS; p++) { 2201 if (BIT(p) & ds->phys_mii_mask) { 2202 unsigned int irq; 2203 2204 irq = irq_create_mapping(priv->irq_domain, p); 2205 ds->user_mii_bus->irq[p] = irq; 2206 } 2207 } 2208 } 2209 2210 static int 2211 mt7530_setup_irq(struct mt7530_priv *priv) 2212 { 2213 struct device *dev = priv->dev; 2214 struct device_node *np = dev->of_node; 2215 int ret; 2216 2217 if (!of_property_read_bool(np, "interrupt-controller")) { 2218 dev_info(dev, "no interrupt support\n"); 2219 return 0; 2220 } 2221 2222 priv->irq = of_irq_get(np, 0); 2223 if (priv->irq <= 0) { 2224 dev_err(dev, "failed to get parent IRQ: %d\n", priv->irq); 2225 return priv->irq ? : -EINVAL; 2226 } 2227 2228 if (priv->id == ID_MT7988) 2229 priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS, 2230 &mt7988_irq_domain_ops, 2231 priv); 2232 else 2233 priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS, 2234 &mt7530_irq_domain_ops, 2235 priv); 2236 2237 if (!priv->irq_domain) { 2238 dev_err(dev, "failed to create IRQ domain\n"); 2239 return -ENOMEM; 2240 } 2241 2242 /* This register must be set for MT7530 to properly fire interrupts */ 2243 if (priv->id == ID_MT7530 || priv->id == ID_MT7621) 2244 mt7530_set(priv, MT7530_TOP_SIG_CTRL, TOP_SIG_CTRL_NORMAL); 2245 2246 ret = request_threaded_irq(priv->irq, NULL, mt7530_irq_thread_fn, 2247 IRQF_ONESHOT, KBUILD_MODNAME, priv); 2248 if (ret) { 2249 irq_domain_remove(priv->irq_domain); 2250 dev_err(dev, "failed to request IRQ: %d\n", ret); 2251 return ret; 2252 } 2253 2254 return 0; 2255 } 2256 2257 static void 2258 mt7530_free_mdio_irq(struct mt7530_priv *priv) 2259 { 2260 int p; 2261 2262 for (p = 0; p < MT7530_NUM_PHYS; p++) { 2263 if (BIT(p) & priv->ds->phys_mii_mask) { 2264 unsigned int irq; 2265 2266 irq = irq_find_mapping(priv->irq_domain, p); 2267 irq_dispose_mapping(irq); 2268 } 2269 } 2270 } 2271 2272 static void 2273 mt7530_free_irq_common(struct mt7530_priv *priv) 2274 { 2275 free_irq(priv->irq, priv); 2276 irq_domain_remove(priv->irq_domain); 2277 } 2278 2279 static void 2280 mt7530_free_irq(struct mt7530_priv *priv) 2281 { 2282 struct device_node *mnp, *np = priv->dev->of_node; 2283 2284 mnp = of_get_child_by_name(np, "mdio"); 2285 if (!mnp) 2286 mt7530_free_mdio_irq(priv); 2287 of_node_put(mnp); 2288 2289 mt7530_free_irq_common(priv); 2290 } 2291 2292 static int 2293 mt7530_setup_mdio(struct mt7530_priv *priv) 2294 { 2295 struct device_node *mnp, *np = priv->dev->of_node; 2296 struct dsa_switch *ds = priv->ds; 2297 struct device *dev = priv->dev; 2298 struct mii_bus *bus; 2299 static int idx; 2300 int ret = 0; 2301 2302 mnp = of_get_child_by_name(np, "mdio"); 2303 2304 if (mnp && !of_device_is_available(mnp)) 2305 goto out; 2306 2307 bus = devm_mdiobus_alloc(dev); 2308 if (!bus) { 2309 ret = -ENOMEM; 2310 goto out; 2311 } 2312 2313 if (!mnp) 2314 ds->user_mii_bus = bus; 2315 2316 bus->priv = priv; 2317 bus->name = KBUILD_MODNAME "-mii"; 2318 snprintf(bus->id, MII_BUS_ID_SIZE, KBUILD_MODNAME "-%d", idx++); 2319 bus->read = mt753x_phy_read_c22; 2320 bus->write = mt753x_phy_write_c22; 2321 bus->read_c45 = mt753x_phy_read_c45; 2322 bus->write_c45 = mt753x_phy_write_c45; 2323 bus->parent = dev; 2324 bus->phy_mask = ~ds->phys_mii_mask; 2325 2326 if (priv->irq && !mnp) 2327 mt7530_setup_mdio_irq(priv); 2328 2329 ret = devm_of_mdiobus_register(dev, bus, mnp); 2330 if (ret) { 2331 dev_err(dev, "failed to register MDIO bus: %d\n", ret); 2332 if (priv->irq && !mnp) 2333 mt7530_free_mdio_irq(priv); 2334 } 2335 2336 out: 2337 of_node_put(mnp); 2338 return ret; 2339 } 2340 2341 static int 2342 mt7530_setup(struct dsa_switch *ds) 2343 { 2344 struct mt7530_priv *priv = ds->priv; 2345 struct device_node *dn = NULL; 2346 struct device_node *phy_node; 2347 struct device_node *mac_np; 2348 struct mt7530_dummy_poll p; 2349 phy_interface_t interface; 2350 struct dsa_port *cpu_dp; 2351 u32 id, val; 2352 int ret, i; 2353 2354 /* The parent node of conduit netdev which holds the common system 2355 * controller also is the container for two GMACs nodes representing 2356 * as two netdev instances. 2357 */ 2358 dsa_switch_for_each_cpu_port(cpu_dp, ds) { 2359 dn = cpu_dp->conduit->dev.of_node->parent; 2360 /* It doesn't matter which CPU port is found first, 2361 * their conduits should share the same parent OF node 2362 */ 2363 break; 2364 } 2365 2366 if (!dn) { 2367 dev_err(ds->dev, "parent OF node of DSA conduit not found"); 2368 return -EINVAL; 2369 } 2370 2371 ds->assisted_learning_on_cpu_port = true; 2372 ds->mtu_enforcement_ingress = true; 2373 2374 if (priv->id == ID_MT7530) { 2375 regulator_set_voltage(priv->core_pwr, 1000000, 1000000); 2376 ret = regulator_enable(priv->core_pwr); 2377 if (ret < 0) { 2378 dev_err(priv->dev, 2379 "Failed to enable core power: %d\n", ret); 2380 return ret; 2381 } 2382 2383 regulator_set_voltage(priv->io_pwr, 3300000, 3300000); 2384 ret = regulator_enable(priv->io_pwr); 2385 if (ret < 0) { 2386 dev_err(priv->dev, "Failed to enable io pwr: %d\n", 2387 ret); 2388 return ret; 2389 } 2390 } 2391 2392 /* Reset whole chip through gpio pin or memory-mapped registers for 2393 * different type of hardware 2394 */ 2395 if (priv->mcm) { 2396 reset_control_assert(priv->rstc); 2397 usleep_range(5000, 5100); 2398 reset_control_deassert(priv->rstc); 2399 } else { 2400 gpiod_set_value_cansleep(priv->reset, 0); 2401 usleep_range(5000, 5100); 2402 gpiod_set_value_cansleep(priv->reset, 1); 2403 } 2404 2405 /* Waiting for MT7530 got to stable */ 2406 INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP); 2407 ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0, 2408 20, 1000000); 2409 if (ret < 0) { 2410 dev_err(priv->dev, "reset timeout\n"); 2411 return ret; 2412 } 2413 2414 id = mt7530_read(priv, MT7530_CREV); 2415 id >>= CHIP_NAME_SHIFT; 2416 if (id != MT7530_ID) { 2417 dev_err(priv->dev, "chip %x can't be supported\n", id); 2418 return -ENODEV; 2419 } 2420 2421 if ((val & HWTRAP_XTAL_MASK) == HWTRAP_XTAL_20MHZ) { 2422 dev_err(priv->dev, 2423 "MT7530 with a 20MHz XTAL is not supported!\n"); 2424 return -EINVAL; 2425 } 2426 2427 /* Reset the switch through internal reset */ 2428 mt7530_write(priv, MT7530_SYS_CTRL, 2429 SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST | 2430 SYS_CTRL_REG_RST); 2431 2432 /* Lower Tx driving for TRGMII path */ 2433 for (i = 0; i < NUM_TRGMII_CTRL; i++) 2434 mt7530_write(priv, MT7530_TRGMII_TD_ODT(i), 2435 TD_DM_DRVP(8) | TD_DM_DRVN(8)); 2436 2437 for (i = 0; i < NUM_TRGMII_CTRL; i++) 2438 mt7530_rmw(priv, MT7530_TRGMII_RD(i), 2439 RD_TAP_MASK, RD_TAP(16)); 2440 2441 /* Enable port 6 */ 2442 val = mt7530_read(priv, MT7530_MHWTRAP); 2443 val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS; 2444 val |= MHWTRAP_MANUAL; 2445 mt7530_write(priv, MT7530_MHWTRAP, val); 2446 2447 if ((val & HWTRAP_XTAL_MASK) == HWTRAP_XTAL_40MHZ) 2448 mt7530_pll_setup(priv); 2449 2450 mt753x_trap_frames(priv); 2451 2452 /* Enable and reset MIB counters */ 2453 mt7530_mib_reset(ds); 2454 2455 for (i = 0; i < MT7530_NUM_PORTS; i++) { 2456 /* Clear link settings and enable force mode to force link down 2457 * on all ports until they're enabled later. 2458 */ 2459 mt7530_rmw(priv, MT7530_PMCR_P(i), PMCR_LINK_SETTINGS_MASK | 2460 PMCR_FORCE_MODE, PMCR_FORCE_MODE); 2461 2462 /* Disable forwarding by default on all ports */ 2463 mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK, 2464 PCR_MATRIX_CLR); 2465 2466 /* Disable learning by default on all ports */ 2467 mt7530_set(priv, MT7530_PSC_P(i), SA_DIS); 2468 2469 if (dsa_is_cpu_port(ds, i)) { 2470 mt753x_cpu_port_enable(ds, i); 2471 } else { 2472 mt7530_port_disable(ds, i); 2473 2474 /* Set default PVID to 0 on all user ports */ 2475 mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK, 2476 G0_PORT_VID_DEF); 2477 } 2478 /* Enable consistent egress tag */ 2479 mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK, 2480 PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); 2481 } 2482 2483 /* Setup VLAN ID 0 for VLAN-unaware bridges */ 2484 ret = mt7530_setup_vlan0(priv); 2485 if (ret) 2486 return ret; 2487 2488 /* Setup port 5 */ 2489 if (!dsa_is_unused_port(ds, 5)) { 2490 priv->p5_intf_sel = P5_INTF_SEL_GMAC5; 2491 } else { 2492 /* Scan the ethernet nodes. Look for GMAC1, lookup the used PHY. 2493 * Set priv->p5_intf_sel to the appropriate value if PHY muxing 2494 * is detected. 2495 */ 2496 for_each_child_of_node(dn, mac_np) { 2497 if (!of_device_is_compatible(mac_np, 2498 "mediatek,eth-mac")) 2499 continue; 2500 2501 ret = of_property_read_u32(mac_np, "reg", &id); 2502 if (ret < 0 || id != 1) 2503 continue; 2504 2505 phy_node = of_parse_phandle(mac_np, "phy-handle", 0); 2506 if (!phy_node) 2507 continue; 2508 2509 if (phy_node->parent == priv->dev->of_node->parent) { 2510 ret = of_get_phy_mode(mac_np, &interface); 2511 if (ret && ret != -ENODEV) { 2512 of_node_put(mac_np); 2513 of_node_put(phy_node); 2514 return ret; 2515 } 2516 id = of_mdio_parse_addr(ds->dev, phy_node); 2517 if (id == 0) 2518 priv->p5_intf_sel = P5_INTF_SEL_PHY_P0; 2519 if (id == 4) 2520 priv->p5_intf_sel = P5_INTF_SEL_PHY_P4; 2521 } 2522 of_node_put(mac_np); 2523 of_node_put(phy_node); 2524 break; 2525 } 2526 2527 if (priv->p5_intf_sel == P5_INTF_SEL_PHY_P0 || 2528 priv->p5_intf_sel == P5_INTF_SEL_PHY_P4) 2529 mt7530_setup_port5(ds, interface); 2530 } 2531 2532 #ifdef CONFIG_GPIOLIB 2533 if (of_property_read_bool(priv->dev->of_node, "gpio-controller")) { 2534 ret = mt7530_setup_gpio(priv); 2535 if (ret) 2536 return ret; 2537 } 2538 #endif /* CONFIG_GPIOLIB */ 2539 2540 /* Flush the FDB table */ 2541 ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL); 2542 if (ret < 0) 2543 return ret; 2544 2545 return 0; 2546 } 2547 2548 static int 2549 mt7531_setup_common(struct dsa_switch *ds) 2550 { 2551 struct mt7530_priv *priv = ds->priv; 2552 int ret, i; 2553 2554 mt753x_trap_frames(priv); 2555 2556 /* Enable and reset MIB counters */ 2557 mt7530_mib_reset(ds); 2558 2559 /* Disable flooding on all ports */ 2560 mt7530_clear(priv, MT7530_MFC, BC_FFP_MASK | UNM_FFP_MASK | 2561 UNU_FFP_MASK); 2562 2563 for (i = 0; i < MT7530_NUM_PORTS; i++) { 2564 /* Clear link settings and enable force mode to force link down 2565 * on all ports until they're enabled later. 2566 */ 2567 mt7530_rmw(priv, MT7530_PMCR_P(i), PMCR_LINK_SETTINGS_MASK | 2568 MT7531_FORCE_MODE, MT7531_FORCE_MODE); 2569 2570 /* Disable forwarding by default on all ports */ 2571 mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK, 2572 PCR_MATRIX_CLR); 2573 2574 /* Disable learning by default on all ports */ 2575 mt7530_set(priv, MT7530_PSC_P(i), SA_DIS); 2576 2577 mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR); 2578 2579 if (dsa_is_cpu_port(ds, i)) { 2580 mt753x_cpu_port_enable(ds, i); 2581 } else { 2582 mt7530_port_disable(ds, i); 2583 2584 /* Set default PVID to 0 on all user ports */ 2585 mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK, 2586 G0_PORT_VID_DEF); 2587 } 2588 2589 /* Enable consistent egress tag */ 2590 mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK, 2591 PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); 2592 } 2593 2594 /* Flush the FDB table */ 2595 ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL); 2596 if (ret < 0) 2597 return ret; 2598 2599 return 0; 2600 } 2601 2602 static int 2603 mt7531_setup(struct dsa_switch *ds) 2604 { 2605 struct mt7530_priv *priv = ds->priv; 2606 struct mt7530_dummy_poll p; 2607 u32 val, id; 2608 int ret, i; 2609 2610 /* Reset whole chip through gpio pin or memory-mapped registers for 2611 * different type of hardware 2612 */ 2613 if (priv->mcm) { 2614 reset_control_assert(priv->rstc); 2615 usleep_range(5000, 5100); 2616 reset_control_deassert(priv->rstc); 2617 } else { 2618 gpiod_set_value_cansleep(priv->reset, 0); 2619 usleep_range(5000, 5100); 2620 gpiod_set_value_cansleep(priv->reset, 1); 2621 } 2622 2623 /* Waiting for MT7530 got to stable */ 2624 INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP); 2625 ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0, 2626 20, 1000000); 2627 if (ret < 0) { 2628 dev_err(priv->dev, "reset timeout\n"); 2629 return ret; 2630 } 2631 2632 id = mt7530_read(priv, MT7531_CREV); 2633 id >>= CHIP_NAME_SHIFT; 2634 2635 if (id != MT7531_ID) { 2636 dev_err(priv->dev, "chip %x can't be supported\n", id); 2637 return -ENODEV; 2638 } 2639 2640 /* MT7531AE has got two SGMII units. One for port 5, one for port 6. 2641 * MT7531BE has got only one SGMII unit which is for port 6. 2642 */ 2643 val = mt7530_read(priv, MT7531_TOP_SIG_SR); 2644 priv->p5_sgmii = !!(val & PAD_DUAL_SGMII_EN); 2645 2646 /* Force link down on all ports before internal reset */ 2647 for (i = 0; i < MT7530_NUM_PORTS; i++) 2648 mt7530_write(priv, MT7530_PMCR_P(i), MT7531_FORCE_LNK); 2649 2650 /* Reset the switch through internal reset */ 2651 mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_SW_RST | SYS_CTRL_REG_RST); 2652 2653 if (!priv->p5_sgmii) { 2654 mt7531_pll_setup(priv); 2655 } else { 2656 /* Let ds->user_mii_bus be able to access external phy. */ 2657 mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO11_RG_RXD2_MASK, 2658 MT7531_EXT_P_MDC_11); 2659 mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO12_RG_RXD3_MASK, 2660 MT7531_EXT_P_MDIO_12); 2661 } 2662 2663 if (!dsa_is_unused_port(ds, 5)) 2664 priv->p5_intf_sel = P5_INTF_SEL_GMAC5; 2665 2666 mt7530_rmw(priv, MT7531_GPIO_MODE0, MT7531_GPIO0_MASK, 2667 MT7531_GPIO0_INTERRUPT); 2668 2669 /* Enable Energy-Efficient Ethernet (EEE) and PHY core PLL, since 2670 * phy_device has not yet been created provided for 2671 * phy_[read,write]_mmd_indirect is called, we provide our own 2672 * mt7531_ind_mmd_phy_[read,write] to complete this function. 2673 */ 2674 val = mt7531_ind_c45_phy_read(priv, MT753X_CTRL_PHY_ADDR, 2675 MDIO_MMD_VEND2, CORE_PLL_GROUP4); 2676 val |= MT7531_RG_SYSPLL_DMY2 | MT7531_PHY_PLL_BYPASS_MODE; 2677 val &= ~MT7531_PHY_PLL_OFF; 2678 mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2, 2679 CORE_PLL_GROUP4, val); 2680 2681 /* Disable EEE advertisement on the switch PHYs. */ 2682 for (i = MT753X_CTRL_PHY_ADDR; 2683 i < MT753X_CTRL_PHY_ADDR + MT7530_NUM_PHYS; i++) { 2684 mt7531_ind_c45_phy_write(priv, i, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 2685 0); 2686 } 2687 2688 mt7531_setup_common(ds); 2689 2690 /* Setup VLAN ID 0 for VLAN-unaware bridges */ 2691 ret = mt7530_setup_vlan0(priv); 2692 if (ret) 2693 return ret; 2694 2695 ds->assisted_learning_on_cpu_port = true; 2696 ds->mtu_enforcement_ingress = true; 2697 2698 return 0; 2699 } 2700 2701 static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port, 2702 struct phylink_config *config) 2703 { 2704 switch (port) { 2705 /* Ports which are connected to switch PHYs. There is no MII pinout. */ 2706 case 0 ... 4: 2707 __set_bit(PHY_INTERFACE_MODE_GMII, 2708 config->supported_interfaces); 2709 break; 2710 2711 /* Port 5 supports rgmii with delays, mii, and gmii. */ 2712 case 5: 2713 phy_interface_set_rgmii(config->supported_interfaces); 2714 __set_bit(PHY_INTERFACE_MODE_MII, 2715 config->supported_interfaces); 2716 __set_bit(PHY_INTERFACE_MODE_GMII, 2717 config->supported_interfaces); 2718 break; 2719 2720 /* Port 6 supports rgmii and trgmii. */ 2721 case 6: 2722 __set_bit(PHY_INTERFACE_MODE_RGMII, 2723 config->supported_interfaces); 2724 __set_bit(PHY_INTERFACE_MODE_TRGMII, 2725 config->supported_interfaces); 2726 break; 2727 } 2728 } 2729 2730 static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port, 2731 struct phylink_config *config) 2732 { 2733 struct mt7530_priv *priv = ds->priv; 2734 2735 switch (port) { 2736 /* Ports which are connected to switch PHYs. There is no MII pinout. */ 2737 case 0 ... 4: 2738 __set_bit(PHY_INTERFACE_MODE_GMII, 2739 config->supported_interfaces); 2740 break; 2741 2742 /* Port 5 supports rgmii with delays on MT7531BE, sgmii/802.3z on 2743 * MT7531AE. 2744 */ 2745 case 5: 2746 if (!priv->p5_sgmii) { 2747 phy_interface_set_rgmii(config->supported_interfaces); 2748 break; 2749 } 2750 fallthrough; 2751 2752 /* Port 6 supports sgmii/802.3z. */ 2753 case 6: 2754 __set_bit(PHY_INTERFACE_MODE_SGMII, 2755 config->supported_interfaces); 2756 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 2757 config->supported_interfaces); 2758 __set_bit(PHY_INTERFACE_MODE_2500BASEX, 2759 config->supported_interfaces); 2760 2761 config->mac_capabilities |= MAC_2500FD; 2762 break; 2763 } 2764 } 2765 2766 static void mt7988_mac_port_get_caps(struct dsa_switch *ds, int port, 2767 struct phylink_config *config) 2768 { 2769 switch (port) { 2770 /* Ports which are connected to switch PHYs. There is no MII pinout. */ 2771 case 0 ... 3: 2772 __set_bit(PHY_INTERFACE_MODE_INTERNAL, 2773 config->supported_interfaces); 2774 break; 2775 2776 /* Port 6 is connected to SoC's XGMII MAC. There is no MII pinout. */ 2777 case 6: 2778 __set_bit(PHY_INTERFACE_MODE_INTERNAL, 2779 config->supported_interfaces); 2780 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | 2781 MAC_10000FD; 2782 } 2783 } 2784 2785 static void 2786 mt7530_mac_config(struct dsa_switch *ds, int port, unsigned int mode, 2787 phy_interface_t interface) 2788 { 2789 struct mt7530_priv *priv = ds->priv; 2790 2791 if (port == 5) 2792 mt7530_setup_port5(priv->ds, interface); 2793 else if (port == 6) 2794 mt7530_setup_port6(priv->ds, interface); 2795 } 2796 2797 static void mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port, 2798 phy_interface_t interface, 2799 struct phy_device *phydev) 2800 { 2801 u32 val; 2802 2803 val = mt7530_read(priv, MT7531_CLKGEN_CTRL); 2804 val |= GP_CLK_EN; 2805 val &= ~GP_MODE_MASK; 2806 val |= GP_MODE(MT7531_GP_MODE_RGMII); 2807 val &= ~CLK_SKEW_IN_MASK; 2808 val |= CLK_SKEW_IN(MT7531_CLK_SKEW_NO_CHG); 2809 val &= ~CLK_SKEW_OUT_MASK; 2810 val |= CLK_SKEW_OUT(MT7531_CLK_SKEW_NO_CHG); 2811 val |= TXCLK_NO_REVERSE | RXCLK_NO_DELAY; 2812 2813 /* Do not adjust rgmii delay when vendor phy driver presents. */ 2814 if (!phydev || phy_driver_is_genphy(phydev)) { 2815 val &= ~(TXCLK_NO_REVERSE | RXCLK_NO_DELAY); 2816 switch (interface) { 2817 case PHY_INTERFACE_MODE_RGMII: 2818 val |= TXCLK_NO_REVERSE; 2819 val |= RXCLK_NO_DELAY; 2820 break; 2821 case PHY_INTERFACE_MODE_RGMII_RXID: 2822 val |= TXCLK_NO_REVERSE; 2823 break; 2824 case PHY_INTERFACE_MODE_RGMII_TXID: 2825 val |= RXCLK_NO_DELAY; 2826 break; 2827 case PHY_INTERFACE_MODE_RGMII_ID: 2828 break; 2829 default: 2830 break; 2831 } 2832 } 2833 2834 mt7530_write(priv, MT7531_CLKGEN_CTRL, val); 2835 } 2836 2837 static void 2838 mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode, 2839 phy_interface_t interface) 2840 { 2841 struct mt7530_priv *priv = ds->priv; 2842 struct phy_device *phydev; 2843 struct dsa_port *dp; 2844 2845 if (phy_interface_mode_is_rgmii(interface)) { 2846 dp = dsa_to_port(ds, port); 2847 phydev = dp->user->phydev; 2848 mt7531_rgmii_setup(priv, port, interface, phydev); 2849 } 2850 } 2851 2852 static struct phylink_pcs * 2853 mt753x_phylink_mac_select_pcs(struct phylink_config *config, 2854 phy_interface_t interface) 2855 { 2856 struct dsa_port *dp = dsa_phylink_to_port(config); 2857 struct mt7530_priv *priv = dp->ds->priv; 2858 2859 switch (interface) { 2860 case PHY_INTERFACE_MODE_TRGMII: 2861 return &priv->pcs[dp->index].pcs; 2862 case PHY_INTERFACE_MODE_SGMII: 2863 case PHY_INTERFACE_MODE_1000BASEX: 2864 case PHY_INTERFACE_MODE_2500BASEX: 2865 return priv->ports[dp->index].sgmii_pcs; 2866 default: 2867 return NULL; 2868 } 2869 } 2870 2871 static void 2872 mt753x_phylink_mac_config(struct phylink_config *config, unsigned int mode, 2873 const struct phylink_link_state *state) 2874 { 2875 struct dsa_port *dp = dsa_phylink_to_port(config); 2876 struct dsa_switch *ds = dp->ds; 2877 struct mt7530_priv *priv; 2878 int port = dp->index; 2879 2880 priv = ds->priv; 2881 2882 if ((port == 5 || port == 6) && priv->info->mac_port_config) 2883 priv->info->mac_port_config(ds, port, mode, state->interface); 2884 2885 /* Are we connected to external phy */ 2886 if (port == 5 && dsa_is_user_port(ds, 5)) 2887 mt7530_set(priv, MT7530_PMCR_P(port), PMCR_EXT_PHY); 2888 } 2889 2890 static void mt753x_phylink_mac_link_down(struct phylink_config *config, 2891 unsigned int mode, 2892 phy_interface_t interface) 2893 { 2894 struct dsa_port *dp = dsa_phylink_to_port(config); 2895 struct mt7530_priv *priv = dp->ds->priv; 2896 2897 mt7530_clear(priv, MT7530_PMCR_P(dp->index), PMCR_LINK_SETTINGS_MASK); 2898 } 2899 2900 static void mt753x_phylink_mac_link_up(struct phylink_config *config, 2901 struct phy_device *phydev, 2902 unsigned int mode, 2903 phy_interface_t interface, 2904 int speed, int duplex, 2905 bool tx_pause, bool rx_pause) 2906 { 2907 struct dsa_port *dp = dsa_phylink_to_port(config); 2908 struct mt7530_priv *priv = dp->ds->priv; 2909 u32 mcr; 2910 2911 mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK; 2912 2913 switch (speed) { 2914 case SPEED_1000: 2915 case SPEED_2500: 2916 case SPEED_10000: 2917 mcr |= PMCR_FORCE_SPEED_1000; 2918 break; 2919 case SPEED_100: 2920 mcr |= PMCR_FORCE_SPEED_100; 2921 break; 2922 } 2923 if (duplex == DUPLEX_FULL) { 2924 mcr |= PMCR_FORCE_FDX; 2925 if (tx_pause) 2926 mcr |= PMCR_TX_FC_EN; 2927 if (rx_pause) 2928 mcr |= PMCR_RX_FC_EN; 2929 } 2930 2931 if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, false) >= 0) { 2932 switch (speed) { 2933 case SPEED_1000: 2934 case SPEED_2500: 2935 mcr |= PMCR_FORCE_EEE1G; 2936 break; 2937 case SPEED_100: 2938 mcr |= PMCR_FORCE_EEE100; 2939 break; 2940 } 2941 } 2942 2943 mt7530_set(priv, MT7530_PMCR_P(dp->index), mcr); 2944 } 2945 2946 static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port, 2947 struct phylink_config *config) 2948 { 2949 struct mt7530_priv *priv = ds->priv; 2950 2951 /* This switch only supports full-duplex at 1Gbps */ 2952 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | 2953 MAC_10 | MAC_100 | MAC_1000FD; 2954 2955 priv->info->mac_port_get_caps(ds, port, config); 2956 } 2957 2958 static int mt753x_pcs_validate(struct phylink_pcs *pcs, 2959 unsigned long *supported, 2960 const struct phylink_link_state *state) 2961 { 2962 /* Autonegotiation is not supported in TRGMII nor 802.3z modes */ 2963 if (state->interface == PHY_INTERFACE_MODE_TRGMII || 2964 phy_interface_mode_is_8023z(state->interface)) 2965 phylink_clear(supported, Autoneg); 2966 2967 return 0; 2968 } 2969 2970 static void mt7530_pcs_get_state(struct phylink_pcs *pcs, 2971 struct phylink_link_state *state) 2972 { 2973 struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv; 2974 int port = pcs_to_mt753x_pcs(pcs)->port; 2975 u32 pmsr; 2976 2977 pmsr = mt7530_read(priv, MT7530_PMSR_P(port)); 2978 2979 state->link = (pmsr & PMSR_LINK); 2980 state->an_complete = state->link; 2981 state->duplex = !!(pmsr & PMSR_DPX); 2982 2983 switch (pmsr & PMSR_SPEED_MASK) { 2984 case PMSR_SPEED_10: 2985 state->speed = SPEED_10; 2986 break; 2987 case PMSR_SPEED_100: 2988 state->speed = SPEED_100; 2989 break; 2990 case PMSR_SPEED_1000: 2991 state->speed = SPEED_1000; 2992 break; 2993 default: 2994 state->speed = SPEED_UNKNOWN; 2995 break; 2996 } 2997 2998 state->pause &= ~(MLO_PAUSE_RX | MLO_PAUSE_TX); 2999 if (pmsr & PMSR_RX_FC) 3000 state->pause |= MLO_PAUSE_RX; 3001 if (pmsr & PMSR_TX_FC) 3002 state->pause |= MLO_PAUSE_TX; 3003 } 3004 3005 static int mt753x_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, 3006 phy_interface_t interface, 3007 const unsigned long *advertising, 3008 bool permit_pause_to_mac) 3009 { 3010 return 0; 3011 } 3012 3013 static void mt7530_pcs_an_restart(struct phylink_pcs *pcs) 3014 { 3015 } 3016 3017 static const struct phylink_pcs_ops mt7530_pcs_ops = { 3018 .pcs_validate = mt753x_pcs_validate, 3019 .pcs_get_state = mt7530_pcs_get_state, 3020 .pcs_config = mt753x_pcs_config, 3021 .pcs_an_restart = mt7530_pcs_an_restart, 3022 }; 3023 3024 static int 3025 mt753x_setup(struct dsa_switch *ds) 3026 { 3027 struct mt7530_priv *priv = ds->priv; 3028 int ret = priv->info->sw_setup(ds); 3029 int i; 3030 3031 if (ret) 3032 return ret; 3033 3034 ret = mt7530_setup_irq(priv); 3035 if (ret) 3036 return ret; 3037 3038 ret = mt7530_setup_mdio(priv); 3039 if (ret && priv->irq) 3040 mt7530_free_irq_common(priv); 3041 3042 /* Initialise the PCS devices */ 3043 for (i = 0; i < priv->ds->num_ports; i++) { 3044 priv->pcs[i].pcs.ops = priv->info->pcs_ops; 3045 priv->pcs[i].pcs.neg_mode = true; 3046 priv->pcs[i].priv = priv; 3047 priv->pcs[i].port = i; 3048 } 3049 3050 if (priv->create_sgmii) { 3051 ret = priv->create_sgmii(priv); 3052 if (ret && priv->irq) 3053 mt7530_free_irq(priv); 3054 } 3055 3056 return ret; 3057 } 3058 3059 static int mt753x_get_mac_eee(struct dsa_switch *ds, int port, 3060 struct ethtool_keee *e) 3061 { 3062 struct mt7530_priv *priv = ds->priv; 3063 u32 eeecr = mt7530_read(priv, MT7530_PMEEECR_P(port)); 3064 3065 e->tx_lpi_enabled = !(eeecr & LPI_MODE_EN); 3066 e->tx_lpi_timer = GET_LPI_THRESH(eeecr); 3067 3068 return 0; 3069 } 3070 3071 static int mt753x_set_mac_eee(struct dsa_switch *ds, int port, 3072 struct ethtool_keee *e) 3073 { 3074 struct mt7530_priv *priv = ds->priv; 3075 u32 set, mask = LPI_THRESH_MASK | LPI_MODE_EN; 3076 3077 if (e->tx_lpi_timer > 0xFFF) 3078 return -EINVAL; 3079 3080 set = SET_LPI_THRESH(e->tx_lpi_timer); 3081 if (!e->tx_lpi_enabled) 3082 /* Force LPI Mode without a delay */ 3083 set |= LPI_MODE_EN; 3084 mt7530_rmw(priv, MT7530_PMEEECR_P(port), mask, set); 3085 3086 return 0; 3087 } 3088 3089 static void 3090 mt753x_conduit_state_change(struct dsa_switch *ds, 3091 const struct net_device *conduit, 3092 bool operational) 3093 { 3094 struct dsa_port *cpu_dp = conduit->dsa_ptr; 3095 struct mt7530_priv *priv = ds->priv; 3096 int val = 0; 3097 u8 mask; 3098 3099 /* Set the CPU port to trap frames to for MT7530. Trapped frames will be 3100 * forwarded to the numerically smallest CPU port whose conduit 3101 * interface is up. 3102 */ 3103 if (priv->id != ID_MT7530 && priv->id != ID_MT7621) 3104 return; 3105 3106 mask = BIT(cpu_dp->index); 3107 3108 if (operational) 3109 priv->active_cpu_ports |= mask; 3110 else 3111 priv->active_cpu_ports &= ~mask; 3112 3113 if (priv->active_cpu_ports) 3114 val = CPU_EN | CPU_PORT(__ffs(priv->active_cpu_ports)); 3115 3116 mt7530_rmw(priv, MT7530_MFC, CPU_EN | CPU_PORT_MASK, val); 3117 } 3118 3119 static int mt7988_setup(struct dsa_switch *ds) 3120 { 3121 struct mt7530_priv *priv = ds->priv; 3122 3123 /* Reset the switch */ 3124 reset_control_assert(priv->rstc); 3125 usleep_range(20, 50); 3126 reset_control_deassert(priv->rstc); 3127 usleep_range(20, 50); 3128 3129 /* Reset the switch PHYs */ 3130 mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_PHY_RST); 3131 3132 return mt7531_setup_common(ds); 3133 } 3134 3135 const struct dsa_switch_ops mt7530_switch_ops = { 3136 .get_tag_protocol = mtk_get_tag_protocol, 3137 .setup = mt753x_setup, 3138 .preferred_default_local_cpu_port = mt753x_preferred_default_local_cpu_port, 3139 .get_strings = mt7530_get_strings, 3140 .get_ethtool_stats = mt7530_get_ethtool_stats, 3141 .get_sset_count = mt7530_get_sset_count, 3142 .set_ageing_time = mt7530_set_ageing_time, 3143 .port_enable = mt7530_port_enable, 3144 .port_disable = mt7530_port_disable, 3145 .port_change_mtu = mt7530_port_change_mtu, 3146 .port_max_mtu = mt7530_port_max_mtu, 3147 .port_stp_state_set = mt7530_stp_state_set, 3148 .port_pre_bridge_flags = mt7530_port_pre_bridge_flags, 3149 .port_bridge_flags = mt7530_port_bridge_flags, 3150 .port_bridge_join = mt7530_port_bridge_join, 3151 .port_bridge_leave = mt7530_port_bridge_leave, 3152 .port_fdb_add = mt7530_port_fdb_add, 3153 .port_fdb_del = mt7530_port_fdb_del, 3154 .port_fdb_dump = mt7530_port_fdb_dump, 3155 .port_mdb_add = mt7530_port_mdb_add, 3156 .port_mdb_del = mt7530_port_mdb_del, 3157 .port_vlan_filtering = mt7530_port_vlan_filtering, 3158 .port_vlan_add = mt7530_port_vlan_add, 3159 .port_vlan_del = mt7530_port_vlan_del, 3160 .port_mirror_add = mt753x_port_mirror_add, 3161 .port_mirror_del = mt753x_port_mirror_del, 3162 .phylink_get_caps = mt753x_phylink_get_caps, 3163 .get_mac_eee = mt753x_get_mac_eee, 3164 .set_mac_eee = mt753x_set_mac_eee, 3165 .conduit_state_change = mt753x_conduit_state_change, 3166 }; 3167 EXPORT_SYMBOL_GPL(mt7530_switch_ops); 3168 3169 static const struct phylink_mac_ops mt753x_phylink_mac_ops = { 3170 .mac_select_pcs = mt753x_phylink_mac_select_pcs, 3171 .mac_config = mt753x_phylink_mac_config, 3172 .mac_link_down = mt753x_phylink_mac_link_down, 3173 .mac_link_up = mt753x_phylink_mac_link_up, 3174 }; 3175 3176 const struct mt753x_info mt753x_table[] = { 3177 [ID_MT7621] = { 3178 .id = ID_MT7621, 3179 .pcs_ops = &mt7530_pcs_ops, 3180 .sw_setup = mt7530_setup, 3181 .phy_read_c22 = mt7530_phy_read_c22, 3182 .phy_write_c22 = mt7530_phy_write_c22, 3183 .phy_read_c45 = mt7530_phy_read_c45, 3184 .phy_write_c45 = mt7530_phy_write_c45, 3185 .mac_port_get_caps = mt7530_mac_port_get_caps, 3186 .mac_port_config = mt7530_mac_config, 3187 }, 3188 [ID_MT7530] = { 3189 .id = ID_MT7530, 3190 .pcs_ops = &mt7530_pcs_ops, 3191 .sw_setup = mt7530_setup, 3192 .phy_read_c22 = mt7530_phy_read_c22, 3193 .phy_write_c22 = mt7530_phy_write_c22, 3194 .phy_read_c45 = mt7530_phy_read_c45, 3195 .phy_write_c45 = mt7530_phy_write_c45, 3196 .mac_port_get_caps = mt7530_mac_port_get_caps, 3197 .mac_port_config = mt7530_mac_config, 3198 }, 3199 [ID_MT7531] = { 3200 .id = ID_MT7531, 3201 .pcs_ops = &mt7530_pcs_ops, 3202 .sw_setup = mt7531_setup, 3203 .phy_read_c22 = mt7531_ind_c22_phy_read, 3204 .phy_write_c22 = mt7531_ind_c22_phy_write, 3205 .phy_read_c45 = mt7531_ind_c45_phy_read, 3206 .phy_write_c45 = mt7531_ind_c45_phy_write, 3207 .mac_port_get_caps = mt7531_mac_port_get_caps, 3208 .mac_port_config = mt7531_mac_config, 3209 }, 3210 [ID_MT7988] = { 3211 .id = ID_MT7988, 3212 .pcs_ops = &mt7530_pcs_ops, 3213 .sw_setup = mt7988_setup, 3214 .phy_read_c22 = mt7531_ind_c22_phy_read, 3215 .phy_write_c22 = mt7531_ind_c22_phy_write, 3216 .phy_read_c45 = mt7531_ind_c45_phy_read, 3217 .phy_write_c45 = mt7531_ind_c45_phy_write, 3218 .mac_port_get_caps = mt7988_mac_port_get_caps, 3219 }, 3220 }; 3221 EXPORT_SYMBOL_GPL(mt753x_table); 3222 3223 int 3224 mt7530_probe_common(struct mt7530_priv *priv) 3225 { 3226 struct device *dev = priv->dev; 3227 3228 priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL); 3229 if (!priv->ds) 3230 return -ENOMEM; 3231 3232 priv->ds->dev = dev; 3233 priv->ds->num_ports = MT7530_NUM_PORTS; 3234 3235 /* Get the hardware identifier from the devicetree node. 3236 * We will need it for some of the clock and regulator setup. 3237 */ 3238 priv->info = of_device_get_match_data(dev); 3239 if (!priv->info) 3240 return -EINVAL; 3241 3242 /* Sanity check if these required device operations are filled 3243 * properly. 3244 */ 3245 if (!priv->info->sw_setup || !priv->info->phy_read_c22 || 3246 !priv->info->phy_write_c22 || !priv->info->mac_port_get_caps) 3247 return -EINVAL; 3248 3249 priv->id = priv->info->id; 3250 priv->dev = dev; 3251 priv->ds->priv = priv; 3252 priv->ds->ops = &mt7530_switch_ops; 3253 priv->ds->phylink_mac_ops = &mt753x_phylink_mac_ops; 3254 mutex_init(&priv->reg_mutex); 3255 dev_set_drvdata(dev, priv); 3256 3257 return 0; 3258 } 3259 EXPORT_SYMBOL_GPL(mt7530_probe_common); 3260 3261 void 3262 mt7530_remove_common(struct mt7530_priv *priv) 3263 { 3264 if (priv->irq) 3265 mt7530_free_irq(priv); 3266 3267 dsa_unregister_switch(priv->ds); 3268 3269 mutex_destroy(&priv->reg_mutex); 3270 } 3271 EXPORT_SYMBOL_GPL(mt7530_remove_common); 3272 3273 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); 3274 MODULE_DESCRIPTION("Driver for Mediatek MT7530 Switch"); 3275 MODULE_LICENSE("GPL"); 3276