1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Mediatek MT7530 DSA Switch driver 4 * Copyright (C) 2017 Sean Wang <sean.wang@mediatek.com> 5 */ 6 #include <linux/etherdevice.h> 7 #include <linux/if_bridge.h> 8 #include <linux/iopoll.h> 9 #include <linux/mdio.h> 10 #include <linux/mfd/syscon.h> 11 #include <linux/module.h> 12 #include <linux/netdevice.h> 13 #include <linux/of_irq.h> 14 #include <linux/of_mdio.h> 15 #include <linux/of_net.h> 16 #include <linux/of_platform.h> 17 #include <linux/phylink.h> 18 #include <linux/regmap.h> 19 #include <linux/regulator/consumer.h> 20 #include <linux/reset.h> 21 #include <linux/gpio/consumer.h> 22 #include <linux/gpio/driver.h> 23 #include <net/dsa.h> 24 25 #include "mt7530.h" 26 27 static struct mt753x_pcs *pcs_to_mt753x_pcs(struct phylink_pcs *pcs) 28 { 29 return container_of(pcs, struct mt753x_pcs, pcs); 30 } 31 32 /* String, offset, and register size in bytes if different from 4 bytes */ 33 static const struct mt7530_mib_desc mt7530_mib[] = { 34 MIB_DESC(1, 0x00, "TxDrop"), 35 MIB_DESC(1, 0x04, "TxCrcErr"), 36 MIB_DESC(1, 0x08, "TxUnicast"), 37 MIB_DESC(1, 0x0c, "TxMulticast"), 38 MIB_DESC(1, 0x10, "TxBroadcast"), 39 MIB_DESC(1, 0x14, "TxCollision"), 40 MIB_DESC(1, 0x18, "TxSingleCollision"), 41 MIB_DESC(1, 0x1c, "TxMultipleCollision"), 42 MIB_DESC(1, 0x20, "TxDeferred"), 43 MIB_DESC(1, 0x24, "TxLateCollision"), 44 MIB_DESC(1, 0x28, "TxExcessiveCollistion"), 45 MIB_DESC(1, 0x2c, "TxPause"), 46 MIB_DESC(1, 0x30, "TxPktSz64"), 47 MIB_DESC(1, 0x34, "TxPktSz65To127"), 48 MIB_DESC(1, 0x38, "TxPktSz128To255"), 49 MIB_DESC(1, 0x3c, "TxPktSz256To511"), 50 MIB_DESC(1, 0x40, "TxPktSz512To1023"), 51 MIB_DESC(1, 0x44, "Tx1024ToMax"), 52 MIB_DESC(2, 0x48, "TxBytes"), 53 MIB_DESC(1, 0x60, "RxDrop"), 54 MIB_DESC(1, 0x64, "RxFiltering"), 55 MIB_DESC(1, 0x68, "RxUnicast"), 56 MIB_DESC(1, 0x6c, "RxMulticast"), 57 MIB_DESC(1, 0x70, "RxBroadcast"), 58 MIB_DESC(1, 0x74, "RxAlignErr"), 59 MIB_DESC(1, 0x78, "RxCrcErr"), 60 MIB_DESC(1, 0x7c, "RxUnderSizeErr"), 61 MIB_DESC(1, 0x80, "RxFragErr"), 62 MIB_DESC(1, 0x84, "RxOverSzErr"), 63 MIB_DESC(1, 0x88, "RxJabberErr"), 64 MIB_DESC(1, 0x8c, "RxPause"), 65 MIB_DESC(1, 0x90, "RxPktSz64"), 66 MIB_DESC(1, 0x94, "RxPktSz65To127"), 67 MIB_DESC(1, 0x98, "RxPktSz128To255"), 68 MIB_DESC(1, 0x9c, "RxPktSz256To511"), 69 MIB_DESC(1, 0xa0, "RxPktSz512To1023"), 70 MIB_DESC(1, 0xa4, "RxPktSz1024ToMax"), 71 MIB_DESC(2, 0xa8, "RxBytes"), 72 MIB_DESC(1, 0xb0, "RxCtrlDrop"), 73 MIB_DESC(1, 0xb4, "RxIngressDrop"), 74 MIB_DESC(1, 0xb8, "RxArlDrop"), 75 }; 76 77 static void 78 mt7530_mutex_lock(struct mt7530_priv *priv) 79 { 80 if (priv->bus) 81 mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED); 82 } 83 84 static void 85 mt7530_mutex_unlock(struct mt7530_priv *priv) 86 { 87 if (priv->bus) 88 mutex_unlock(&priv->bus->mdio_lock); 89 } 90 91 static void 92 core_write(struct mt7530_priv *priv, u32 reg, u32 val) 93 { 94 struct mii_bus *bus = priv->bus; 95 int ret; 96 97 mt7530_mutex_lock(priv); 98 99 /* Write the desired MMD Devad */ 100 ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr), 101 MII_MMD_CTRL, MDIO_MMD_VEND2); 102 if (ret < 0) 103 goto err; 104 105 /* Write the desired MMD register address */ 106 ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr), 107 MII_MMD_DATA, reg); 108 if (ret < 0) 109 goto err; 110 111 /* Select the Function : DATA with no post increment */ 112 ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr), 113 MII_MMD_CTRL, MDIO_MMD_VEND2 | MII_MMD_CTRL_NOINCR); 114 if (ret < 0) 115 goto err; 116 117 /* Write the data into MMD's selected register */ 118 ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr), 119 MII_MMD_DATA, val); 120 err: 121 if (ret < 0) 122 dev_err(&bus->dev, "failed to write mmd register\n"); 123 124 mt7530_mutex_unlock(priv); 125 } 126 127 static void 128 core_rmw(struct mt7530_priv *priv, u32 reg, u32 mask, u32 set) 129 { 130 struct mii_bus *bus = priv->bus; 131 u32 val; 132 int ret; 133 134 mt7530_mutex_lock(priv); 135 136 /* Write the desired MMD Devad */ 137 ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr), 138 MII_MMD_CTRL, MDIO_MMD_VEND2); 139 if (ret < 0) 140 goto err; 141 142 /* Write the desired MMD register address */ 143 ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr), 144 MII_MMD_DATA, reg); 145 if (ret < 0) 146 goto err; 147 148 /* Select the Function : DATA with no post increment */ 149 ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr), 150 MII_MMD_CTRL, MDIO_MMD_VEND2 | MII_MMD_CTRL_NOINCR); 151 if (ret < 0) 152 goto err; 153 154 /* Read the content of the MMD's selected register */ 155 val = bus->read(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr), 156 MII_MMD_DATA); 157 val &= ~mask; 158 val |= set; 159 /* Write the data into MMD's selected register */ 160 ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr), 161 MII_MMD_DATA, val); 162 err: 163 if (ret < 0) 164 dev_err(&bus->dev, "failed to write mmd register\n"); 165 166 mt7530_mutex_unlock(priv); 167 } 168 169 static void 170 core_set(struct mt7530_priv *priv, u32 reg, u32 val) 171 { 172 core_rmw(priv, reg, 0, val); 173 } 174 175 static void 176 core_clear(struct mt7530_priv *priv, u32 reg, u32 val) 177 { 178 core_rmw(priv, reg, val, 0); 179 } 180 181 static int 182 mt7530_mii_write(struct mt7530_priv *priv, u32 reg, u32 val) 183 { 184 int ret; 185 186 ret = regmap_write(priv->regmap, reg, val); 187 188 if (ret < 0) 189 dev_err(priv->dev, 190 "failed to write mt7530 register\n"); 191 192 return ret; 193 } 194 195 static u32 196 mt7530_mii_read(struct mt7530_priv *priv, u32 reg) 197 { 198 int ret; 199 u32 val; 200 201 ret = regmap_read(priv->regmap, reg, &val); 202 if (ret) { 203 WARN_ON_ONCE(1); 204 dev_err(priv->dev, 205 "failed to read mt7530 register\n"); 206 return 0; 207 } 208 209 return val; 210 } 211 212 static void 213 mt7530_write(struct mt7530_priv *priv, u32 reg, u32 val) 214 { 215 mt7530_mutex_lock(priv); 216 217 mt7530_mii_write(priv, reg, val); 218 219 mt7530_mutex_unlock(priv); 220 } 221 222 static u32 223 _mt7530_unlocked_read(struct mt7530_dummy_poll *p) 224 { 225 return mt7530_mii_read(p->priv, p->reg); 226 } 227 228 static u32 229 _mt7530_read(struct mt7530_dummy_poll *p) 230 { 231 u32 val; 232 233 mt7530_mutex_lock(p->priv); 234 235 val = mt7530_mii_read(p->priv, p->reg); 236 237 mt7530_mutex_unlock(p->priv); 238 239 return val; 240 } 241 242 static u32 243 mt7530_read(struct mt7530_priv *priv, u32 reg) 244 { 245 struct mt7530_dummy_poll p; 246 247 INIT_MT7530_DUMMY_POLL(&p, priv, reg); 248 return _mt7530_read(&p); 249 } 250 251 static void 252 mt7530_rmw(struct mt7530_priv *priv, u32 reg, 253 u32 mask, u32 set) 254 { 255 mt7530_mutex_lock(priv); 256 257 regmap_update_bits(priv->regmap, reg, mask, set); 258 259 mt7530_mutex_unlock(priv); 260 } 261 262 static void 263 mt7530_set(struct mt7530_priv *priv, u32 reg, u32 val) 264 { 265 mt7530_rmw(priv, reg, val, val); 266 } 267 268 static void 269 mt7530_clear(struct mt7530_priv *priv, u32 reg, u32 val) 270 { 271 mt7530_rmw(priv, reg, val, 0); 272 } 273 274 static int 275 mt7530_fdb_cmd(struct mt7530_priv *priv, enum mt7530_fdb_cmd cmd, u32 *rsp) 276 { 277 u32 val; 278 int ret; 279 struct mt7530_dummy_poll p; 280 281 /* Set the command operating upon the MAC address entries */ 282 val = ATC_BUSY | ATC_MAT(0) | cmd; 283 mt7530_write(priv, MT7530_ATC, val); 284 285 INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_ATC); 286 ret = readx_poll_timeout(_mt7530_read, &p, val, 287 !(val & ATC_BUSY), 20, 20000); 288 if (ret < 0) { 289 dev_err(priv->dev, "reset timeout\n"); 290 return ret; 291 } 292 293 /* Additional sanity for read command if the specified 294 * entry is invalid 295 */ 296 val = mt7530_read(priv, MT7530_ATC); 297 if ((cmd == MT7530_FDB_READ) && (val & ATC_INVALID)) 298 return -EINVAL; 299 300 if (rsp) 301 *rsp = val; 302 303 return 0; 304 } 305 306 static void 307 mt7530_fdb_read(struct mt7530_priv *priv, struct mt7530_fdb *fdb) 308 { 309 u32 reg[3]; 310 int i; 311 312 /* Read from ARL table into an array */ 313 for (i = 0; i < 3; i++) { 314 reg[i] = mt7530_read(priv, MT7530_TSRA1 + (i * 4)); 315 316 dev_dbg(priv->dev, "%s(%d) reg[%d]=0x%x\n", 317 __func__, __LINE__, i, reg[i]); 318 } 319 320 fdb->vid = (reg[1] >> CVID) & CVID_MASK; 321 fdb->aging = (reg[2] >> AGE_TIMER) & AGE_TIMER_MASK; 322 fdb->port_mask = (reg[2] >> PORT_MAP) & PORT_MAP_MASK; 323 fdb->mac[0] = (reg[0] >> MAC_BYTE_0) & MAC_BYTE_MASK; 324 fdb->mac[1] = (reg[0] >> MAC_BYTE_1) & MAC_BYTE_MASK; 325 fdb->mac[2] = (reg[0] >> MAC_BYTE_2) & MAC_BYTE_MASK; 326 fdb->mac[3] = (reg[0] >> MAC_BYTE_3) & MAC_BYTE_MASK; 327 fdb->mac[4] = (reg[1] >> MAC_BYTE_4) & MAC_BYTE_MASK; 328 fdb->mac[5] = (reg[1] >> MAC_BYTE_5) & MAC_BYTE_MASK; 329 fdb->noarp = ((reg[2] >> ENT_STATUS) & ENT_STATUS_MASK) == STATIC_ENT; 330 } 331 332 static void 333 mt7530_fdb_write(struct mt7530_priv *priv, u16 vid, 334 u8 port_mask, const u8 *mac, 335 u8 aging, u8 type) 336 { 337 u32 reg[3] = { 0 }; 338 int i; 339 340 reg[1] |= vid & CVID_MASK; 341 reg[1] |= ATA2_IVL; 342 reg[1] |= ATA2_FID(FID_BRIDGED); 343 reg[2] |= (aging & AGE_TIMER_MASK) << AGE_TIMER; 344 reg[2] |= (port_mask & PORT_MAP_MASK) << PORT_MAP; 345 /* STATIC_ENT indicate that entry is static wouldn't 346 * be aged out and STATIC_EMP specified as erasing an 347 * entry 348 */ 349 reg[2] |= (type & ENT_STATUS_MASK) << ENT_STATUS; 350 reg[1] |= mac[5] << MAC_BYTE_5; 351 reg[1] |= mac[4] << MAC_BYTE_4; 352 reg[0] |= mac[3] << MAC_BYTE_3; 353 reg[0] |= mac[2] << MAC_BYTE_2; 354 reg[0] |= mac[1] << MAC_BYTE_1; 355 reg[0] |= mac[0] << MAC_BYTE_0; 356 357 /* Write array into the ARL table */ 358 for (i = 0; i < 3; i++) 359 mt7530_write(priv, MT7530_ATA1 + (i * 4), reg[i]); 360 } 361 362 /* Set up switch core clock for MT7530 */ 363 static void mt7530_pll_setup(struct mt7530_priv *priv) 364 { 365 /* Disable core clock */ 366 core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); 367 368 /* Disable PLL */ 369 core_write(priv, CORE_GSWPLL_GRP1, 0); 370 371 /* Set core clock into 500Mhz */ 372 core_write(priv, CORE_GSWPLL_GRP2, 373 RG_GSWPLL_POSDIV_500M(1) | 374 RG_GSWPLL_FBKDIV_500M(25)); 375 376 /* Enable PLL */ 377 core_write(priv, CORE_GSWPLL_GRP1, 378 RG_GSWPLL_EN_PRE | 379 RG_GSWPLL_POSDIV_200M(2) | 380 RG_GSWPLL_FBKDIV_200M(32)); 381 382 udelay(20); 383 384 /* Enable core clock */ 385 core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); 386 } 387 388 /* If port 6 is available as a CPU port, always prefer that as the default, 389 * otherwise don't care. 390 */ 391 static struct dsa_port * 392 mt753x_preferred_default_local_cpu_port(struct dsa_switch *ds) 393 { 394 struct dsa_port *cpu_dp = dsa_to_port(ds, 6); 395 396 if (dsa_port_is_cpu(cpu_dp)) 397 return cpu_dp; 398 399 return NULL; 400 } 401 402 /* Setup port 6 interface mode and TRGMII TX circuit */ 403 static void 404 mt7530_setup_port6(struct dsa_switch *ds, phy_interface_t interface) 405 { 406 struct mt7530_priv *priv = ds->priv; 407 u32 ncpo1, ssc_delta, xtal; 408 409 /* Disable the MT7530 TRGMII clocks */ 410 core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN); 411 412 if (interface == PHY_INTERFACE_MODE_RGMII) { 413 mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK, 414 P6_INTF_MODE(0)); 415 return; 416 } 417 418 mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK, P6_INTF_MODE(1)); 419 420 xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK; 421 422 if (xtal == HWTRAP_XTAL_25MHZ) 423 ssc_delta = 0x57; 424 else 425 ssc_delta = 0x87; 426 427 if (priv->id == ID_MT7621) { 428 /* PLL frequency: 125MHz: 1.0GBit */ 429 if (xtal == HWTRAP_XTAL_40MHZ) 430 ncpo1 = 0x0640; 431 if (xtal == HWTRAP_XTAL_25MHZ) 432 ncpo1 = 0x0a00; 433 } else { /* PLL frequency: 250MHz: 2.0Gbit */ 434 if (xtal == HWTRAP_XTAL_40MHZ) 435 ncpo1 = 0x0c80; 436 if (xtal == HWTRAP_XTAL_25MHZ) 437 ncpo1 = 0x1400; 438 } 439 440 /* Setup the MT7530 TRGMII Tx Clock */ 441 core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1)); 442 core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0)); 443 core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta)); 444 core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta)); 445 core_write(priv, CORE_PLL_GROUP4, RG_SYSPLL_DDSFBK_EN | 446 RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN); 447 core_write(priv, CORE_PLL_GROUP2, RG_SYSPLL_EN_NORMAL | 448 RG_SYSPLL_VODEN | RG_SYSPLL_POSDIV(1)); 449 core_write(priv, CORE_PLL_GROUP7, RG_LCDDS_PCW_NCPO_CHG | 450 RG_LCCDS_C(3) | RG_LCDDS_PWDB | RG_LCDDS_ISO_EN); 451 452 /* Enable the MT7530 TRGMII clocks */ 453 core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN); 454 } 455 456 static void 457 mt7531_pll_setup(struct mt7530_priv *priv) 458 { 459 u32 top_sig; 460 u32 hwstrap; 461 u32 xtal; 462 u32 val; 463 464 val = mt7530_read(priv, MT7531_CREV); 465 top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR); 466 hwstrap = mt7530_read(priv, MT7531_HWTRAP); 467 if ((val & CHIP_REV_M) > 0) 468 xtal = (top_sig & PAD_MCM_SMI_EN) ? HWTRAP_XTAL_FSEL_40MHZ : 469 HWTRAP_XTAL_FSEL_25MHZ; 470 else 471 xtal = hwstrap & HWTRAP_XTAL_FSEL_MASK; 472 473 /* Step 1 : Disable MT7531 COREPLL */ 474 val = mt7530_read(priv, MT7531_PLLGP_EN); 475 val &= ~EN_COREPLL; 476 mt7530_write(priv, MT7531_PLLGP_EN, val); 477 478 /* Step 2: switch to XTAL output */ 479 val = mt7530_read(priv, MT7531_PLLGP_EN); 480 val |= SW_CLKSW; 481 mt7530_write(priv, MT7531_PLLGP_EN, val); 482 483 val = mt7530_read(priv, MT7531_PLLGP_CR0); 484 val &= ~RG_COREPLL_EN; 485 mt7530_write(priv, MT7531_PLLGP_CR0, val); 486 487 /* Step 3: disable PLLGP and enable program PLLGP */ 488 val = mt7530_read(priv, MT7531_PLLGP_EN); 489 val |= SW_PLLGP; 490 mt7530_write(priv, MT7531_PLLGP_EN, val); 491 492 /* Step 4: program COREPLL output frequency to 500MHz */ 493 val = mt7530_read(priv, MT7531_PLLGP_CR0); 494 val &= ~RG_COREPLL_POSDIV_M; 495 val |= 2 << RG_COREPLL_POSDIV_S; 496 mt7530_write(priv, MT7531_PLLGP_CR0, val); 497 usleep_range(25, 35); 498 499 switch (xtal) { 500 case HWTRAP_XTAL_FSEL_25MHZ: 501 val = mt7530_read(priv, MT7531_PLLGP_CR0); 502 val &= ~RG_COREPLL_SDM_PCW_M; 503 val |= 0x140000 << RG_COREPLL_SDM_PCW_S; 504 mt7530_write(priv, MT7531_PLLGP_CR0, val); 505 break; 506 case HWTRAP_XTAL_FSEL_40MHZ: 507 val = mt7530_read(priv, MT7531_PLLGP_CR0); 508 val &= ~RG_COREPLL_SDM_PCW_M; 509 val |= 0x190000 << RG_COREPLL_SDM_PCW_S; 510 mt7530_write(priv, MT7531_PLLGP_CR0, val); 511 break; 512 } 513 514 /* Set feedback divide ratio update signal to high */ 515 val = mt7530_read(priv, MT7531_PLLGP_CR0); 516 val |= RG_COREPLL_SDM_PCW_CHG; 517 mt7530_write(priv, MT7531_PLLGP_CR0, val); 518 /* Wait for at least 16 XTAL clocks */ 519 usleep_range(10, 20); 520 521 /* Step 5: set feedback divide ratio update signal to low */ 522 val = mt7530_read(priv, MT7531_PLLGP_CR0); 523 val &= ~RG_COREPLL_SDM_PCW_CHG; 524 mt7530_write(priv, MT7531_PLLGP_CR0, val); 525 526 /* Enable 325M clock for SGMII */ 527 mt7530_write(priv, MT7531_ANA_PLLGP_CR5, 0xad0000); 528 529 /* Enable 250SSC clock for RGMII */ 530 mt7530_write(priv, MT7531_ANA_PLLGP_CR2, 0x4f40000); 531 532 /* Step 6: Enable MT7531 PLL */ 533 val = mt7530_read(priv, MT7531_PLLGP_CR0); 534 val |= RG_COREPLL_EN; 535 mt7530_write(priv, MT7531_PLLGP_CR0, val); 536 537 val = mt7530_read(priv, MT7531_PLLGP_EN); 538 val |= EN_COREPLL; 539 mt7530_write(priv, MT7531_PLLGP_EN, val); 540 usleep_range(25, 35); 541 } 542 543 static void 544 mt7530_mib_reset(struct dsa_switch *ds) 545 { 546 struct mt7530_priv *priv = ds->priv; 547 548 mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_FLUSH); 549 mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_ACTIVATE); 550 } 551 552 static int mt7530_phy_read_c22(struct mt7530_priv *priv, int port, int regnum) 553 { 554 return mdiobus_read_nested(priv->bus, port, regnum); 555 } 556 557 static int mt7530_phy_write_c22(struct mt7530_priv *priv, int port, int regnum, 558 u16 val) 559 { 560 return mdiobus_write_nested(priv->bus, port, regnum, val); 561 } 562 563 static int mt7530_phy_read_c45(struct mt7530_priv *priv, int port, 564 int devad, int regnum) 565 { 566 return mdiobus_c45_read_nested(priv->bus, port, devad, regnum); 567 } 568 569 static int mt7530_phy_write_c45(struct mt7530_priv *priv, int port, int devad, 570 int regnum, u16 val) 571 { 572 return mdiobus_c45_write_nested(priv->bus, port, devad, regnum, val); 573 } 574 575 static int 576 mt7531_ind_c45_phy_read(struct mt7530_priv *priv, int port, int devad, 577 int regnum) 578 { 579 struct mt7530_dummy_poll p; 580 u32 reg, val; 581 int ret; 582 583 INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC); 584 585 mt7530_mutex_lock(priv); 586 587 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 588 !(val & MT7531_PHY_ACS_ST), 20, 100000); 589 if (ret < 0) { 590 dev_err(priv->dev, "poll timeout\n"); 591 goto out; 592 } 593 594 reg = MT7531_MDIO_CL45_ADDR | MT7531_MDIO_PHY_ADDR(port) | 595 MT7531_MDIO_DEV_ADDR(devad) | regnum; 596 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST); 597 598 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 599 !(val & MT7531_PHY_ACS_ST), 20, 100000); 600 if (ret < 0) { 601 dev_err(priv->dev, "poll timeout\n"); 602 goto out; 603 } 604 605 reg = MT7531_MDIO_CL45_READ | MT7531_MDIO_PHY_ADDR(port) | 606 MT7531_MDIO_DEV_ADDR(devad); 607 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST); 608 609 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 610 !(val & MT7531_PHY_ACS_ST), 20, 100000); 611 if (ret < 0) { 612 dev_err(priv->dev, "poll timeout\n"); 613 goto out; 614 } 615 616 ret = val & MT7531_MDIO_RW_DATA_MASK; 617 out: 618 mt7530_mutex_unlock(priv); 619 620 return ret; 621 } 622 623 static int 624 mt7531_ind_c45_phy_write(struct mt7530_priv *priv, int port, int devad, 625 int regnum, u16 data) 626 { 627 struct mt7530_dummy_poll p; 628 u32 val, reg; 629 int ret; 630 631 INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC); 632 633 mt7530_mutex_lock(priv); 634 635 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 636 !(val & MT7531_PHY_ACS_ST), 20, 100000); 637 if (ret < 0) { 638 dev_err(priv->dev, "poll timeout\n"); 639 goto out; 640 } 641 642 reg = MT7531_MDIO_CL45_ADDR | MT7531_MDIO_PHY_ADDR(port) | 643 MT7531_MDIO_DEV_ADDR(devad) | regnum; 644 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST); 645 646 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 647 !(val & MT7531_PHY_ACS_ST), 20, 100000); 648 if (ret < 0) { 649 dev_err(priv->dev, "poll timeout\n"); 650 goto out; 651 } 652 653 reg = MT7531_MDIO_CL45_WRITE | MT7531_MDIO_PHY_ADDR(port) | 654 MT7531_MDIO_DEV_ADDR(devad) | data; 655 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST); 656 657 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 658 !(val & MT7531_PHY_ACS_ST), 20, 100000); 659 if (ret < 0) { 660 dev_err(priv->dev, "poll timeout\n"); 661 goto out; 662 } 663 664 out: 665 mt7530_mutex_unlock(priv); 666 667 return ret; 668 } 669 670 static int 671 mt7531_ind_c22_phy_read(struct mt7530_priv *priv, int port, int regnum) 672 { 673 struct mt7530_dummy_poll p; 674 int ret; 675 u32 val; 676 677 INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC); 678 679 mt7530_mutex_lock(priv); 680 681 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 682 !(val & MT7531_PHY_ACS_ST), 20, 100000); 683 if (ret < 0) { 684 dev_err(priv->dev, "poll timeout\n"); 685 goto out; 686 } 687 688 val = MT7531_MDIO_CL22_READ | MT7531_MDIO_PHY_ADDR(port) | 689 MT7531_MDIO_REG_ADDR(regnum); 690 691 mt7530_mii_write(priv, MT7531_PHY_IAC, val | MT7531_PHY_ACS_ST); 692 693 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 694 !(val & MT7531_PHY_ACS_ST), 20, 100000); 695 if (ret < 0) { 696 dev_err(priv->dev, "poll timeout\n"); 697 goto out; 698 } 699 700 ret = val & MT7531_MDIO_RW_DATA_MASK; 701 out: 702 mt7530_mutex_unlock(priv); 703 704 return ret; 705 } 706 707 static int 708 mt7531_ind_c22_phy_write(struct mt7530_priv *priv, int port, int regnum, 709 u16 data) 710 { 711 struct mt7530_dummy_poll p; 712 int ret; 713 u32 reg; 714 715 INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC); 716 717 mt7530_mutex_lock(priv); 718 719 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, reg, 720 !(reg & MT7531_PHY_ACS_ST), 20, 100000); 721 if (ret < 0) { 722 dev_err(priv->dev, "poll timeout\n"); 723 goto out; 724 } 725 726 reg = MT7531_MDIO_CL22_WRITE | MT7531_MDIO_PHY_ADDR(port) | 727 MT7531_MDIO_REG_ADDR(regnum) | data; 728 729 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST); 730 731 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, reg, 732 !(reg & MT7531_PHY_ACS_ST), 20, 100000); 733 if (ret < 0) { 734 dev_err(priv->dev, "poll timeout\n"); 735 goto out; 736 } 737 738 out: 739 mt7530_mutex_unlock(priv); 740 741 return ret; 742 } 743 744 static int 745 mt753x_phy_read_c22(struct mii_bus *bus, int port, int regnum) 746 { 747 struct mt7530_priv *priv = bus->priv; 748 749 return priv->info->phy_read_c22(priv, port, regnum); 750 } 751 752 static int 753 mt753x_phy_read_c45(struct mii_bus *bus, int port, int devad, int regnum) 754 { 755 struct mt7530_priv *priv = bus->priv; 756 757 return priv->info->phy_read_c45(priv, port, devad, regnum); 758 } 759 760 static int 761 mt753x_phy_write_c22(struct mii_bus *bus, int port, int regnum, u16 val) 762 { 763 struct mt7530_priv *priv = bus->priv; 764 765 return priv->info->phy_write_c22(priv, port, regnum, val); 766 } 767 768 static int 769 mt753x_phy_write_c45(struct mii_bus *bus, int port, int devad, int regnum, 770 u16 val) 771 { 772 struct mt7530_priv *priv = bus->priv; 773 774 return priv->info->phy_write_c45(priv, port, devad, regnum, val); 775 } 776 777 static void 778 mt7530_get_strings(struct dsa_switch *ds, int port, u32 stringset, 779 uint8_t *data) 780 { 781 int i; 782 783 if (stringset != ETH_SS_STATS) 784 return; 785 786 for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++) 787 ethtool_puts(&data, mt7530_mib[i].name); 788 } 789 790 static void 791 mt7530_get_ethtool_stats(struct dsa_switch *ds, int port, 792 uint64_t *data) 793 { 794 struct mt7530_priv *priv = ds->priv; 795 const struct mt7530_mib_desc *mib; 796 u32 reg, i; 797 u64 hi; 798 799 for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++) { 800 mib = &mt7530_mib[i]; 801 reg = MT7530_PORT_MIB_COUNTER(port) + mib->offset; 802 803 data[i] = mt7530_read(priv, reg); 804 if (mib->size == 2) { 805 hi = mt7530_read(priv, reg + 4); 806 data[i] |= hi << 32; 807 } 808 } 809 } 810 811 static int 812 mt7530_get_sset_count(struct dsa_switch *ds, int port, int sset) 813 { 814 if (sset != ETH_SS_STATS) 815 return 0; 816 817 return ARRAY_SIZE(mt7530_mib); 818 } 819 820 static int 821 mt7530_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) 822 { 823 struct mt7530_priv *priv = ds->priv; 824 unsigned int secs = msecs / 1000; 825 unsigned int tmp_age_count; 826 unsigned int error = -1; 827 unsigned int age_count; 828 unsigned int age_unit; 829 830 /* Applied timer is (AGE_CNT + 1) * (AGE_UNIT + 1) seconds */ 831 if (secs < 1 || secs > (AGE_CNT_MAX + 1) * (AGE_UNIT_MAX + 1)) 832 return -ERANGE; 833 834 /* iterate through all possible age_count to find the closest pair */ 835 for (tmp_age_count = 0; tmp_age_count <= AGE_CNT_MAX; ++tmp_age_count) { 836 unsigned int tmp_age_unit = secs / (tmp_age_count + 1) - 1; 837 838 if (tmp_age_unit <= AGE_UNIT_MAX) { 839 unsigned int tmp_error = secs - 840 (tmp_age_count + 1) * (tmp_age_unit + 1); 841 842 /* found a closer pair */ 843 if (error > tmp_error) { 844 error = tmp_error; 845 age_count = tmp_age_count; 846 age_unit = tmp_age_unit; 847 } 848 849 /* found the exact match, so break the loop */ 850 if (!error) 851 break; 852 } 853 } 854 855 mt7530_write(priv, MT7530_AAC, AGE_CNT(age_count) | AGE_UNIT(age_unit)); 856 857 return 0; 858 } 859 860 static const char *p5_intf_modes(unsigned int p5_interface) 861 { 862 switch (p5_interface) { 863 case P5_DISABLED: 864 return "DISABLED"; 865 case P5_INTF_SEL_PHY_P0: 866 return "PHY P0"; 867 case P5_INTF_SEL_PHY_P4: 868 return "PHY P4"; 869 case P5_INTF_SEL_GMAC5: 870 return "GMAC5"; 871 default: 872 return "unknown"; 873 } 874 } 875 876 static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface) 877 { 878 struct mt7530_priv *priv = ds->priv; 879 u8 tx_delay = 0; 880 int val; 881 882 mutex_lock(&priv->reg_mutex); 883 884 val = mt7530_read(priv, MT7530_MHWTRAP); 885 886 val |= MHWTRAP_MANUAL | MHWTRAP_P5_MAC_SEL | MHWTRAP_P5_DIS; 887 val &= ~MHWTRAP_P5_RGMII_MODE & ~MHWTRAP_PHY0_SEL; 888 889 switch (priv->p5_intf_sel) { 890 case P5_INTF_SEL_PHY_P0: 891 /* MT7530_P5_MODE_GPHY_P0: 2nd GMAC -> P5 -> P0 */ 892 val |= MHWTRAP_PHY0_SEL; 893 fallthrough; 894 case P5_INTF_SEL_PHY_P4: 895 /* MT7530_P5_MODE_GPHY_P4: 2nd GMAC -> P5 -> P4 */ 896 val &= ~MHWTRAP_P5_MAC_SEL & ~MHWTRAP_P5_DIS; 897 898 /* Setup the MAC by default for the cpu port */ 899 mt7530_write(priv, MT7530_PMCR_P(5), 0x56300); 900 break; 901 case P5_INTF_SEL_GMAC5: 902 /* MT7530_P5_MODE_GMAC: P5 -> External phy or 2nd GMAC */ 903 val &= ~MHWTRAP_P5_DIS; 904 break; 905 default: 906 break; 907 } 908 909 /* Setup RGMII settings */ 910 if (phy_interface_mode_is_rgmii(interface)) { 911 val |= MHWTRAP_P5_RGMII_MODE; 912 913 /* P5 RGMII RX Clock Control: delay setting for 1000M */ 914 mt7530_write(priv, MT7530_P5RGMIIRXCR, CSR_RGMII_EDGE_ALIGN); 915 916 /* Don't set delay in DSA mode */ 917 if (!dsa_is_dsa_port(priv->ds, 5) && 918 (interface == PHY_INTERFACE_MODE_RGMII_TXID || 919 interface == PHY_INTERFACE_MODE_RGMII_ID)) 920 tx_delay = 4; /* n * 0.5 ns */ 921 922 /* P5 RGMII TX Clock Control: delay x */ 923 mt7530_write(priv, MT7530_P5RGMIITXCR, 924 CSR_RGMII_TXC_CFG(0x10 + tx_delay)); 925 926 /* reduce P5 RGMII Tx driving, 8mA */ 927 mt7530_write(priv, MT7530_IO_DRV_CR, 928 P5_IO_CLK_DRV(1) | P5_IO_DATA_DRV(1)); 929 } 930 931 mt7530_write(priv, MT7530_MHWTRAP, val); 932 933 dev_dbg(ds->dev, "Setup P5, HWTRAP=0x%x, intf_sel=%s, phy-mode=%s\n", 934 val, p5_intf_modes(priv->p5_intf_sel), phy_modes(interface)); 935 936 mutex_unlock(&priv->reg_mutex); 937 } 938 939 /* In Clause 5 of IEEE Std 802-2014, two sublayers of the data link layer (DLL) 940 * of the Open Systems Interconnection basic reference model (OSI/RM) are 941 * described; the medium access control (MAC) and logical link control (LLC) 942 * sublayers. The MAC sublayer is the one facing the physical layer. 943 * 944 * In 8.2 of IEEE Std 802.1Q-2022, the Bridge architecture is described. A 945 * Bridge component comprises a MAC Relay Entity for interconnecting the Ports 946 * of the Bridge, at least two Ports, and higher layer entities with at least a 947 * Spanning Tree Protocol Entity included. 948 * 949 * Each Bridge Port also functions as an end station and shall provide the MAC 950 * Service to an LLC Entity. Each instance of the MAC Service is provided to a 951 * distinct LLC Entity that supports protocol identification, multiplexing, and 952 * demultiplexing, for protocol data unit (PDU) transmission and reception by 953 * one or more higher layer entities. 954 * 955 * It is described in 8.13.9 of IEEE Std 802.1Q-2022 that in a Bridge, the LLC 956 * Entity associated with each Bridge Port is modeled as being directly 957 * connected to the attached Local Area Network (LAN). 958 * 959 * On the switch with CPU port architecture, CPU port functions as Management 960 * Port, and the Management Port functionality is provided by software which 961 * functions as an end station. Software is connected to an IEEE 802 LAN that is 962 * wholly contained within the system that incorporates the Bridge. Software 963 * provides access to the LLC Entity associated with each Bridge Port by the 964 * value of the source port field on the special tag on the frame received by 965 * software. 966 * 967 * We call frames that carry control information to determine the active 968 * topology and current extent of each Virtual Local Area Network (VLAN), i.e., 969 * spanning tree or Shortest Path Bridging (SPB) and Multiple VLAN Registration 970 * Protocol Data Units (MVRPDUs), and frames from other link constrained 971 * protocols, such as Extensible Authentication Protocol over LAN (EAPOL) and 972 * Link Layer Discovery Protocol (LLDP), link-local frames. They are not 973 * forwarded by a Bridge. Permanently configured entries in the filtering 974 * database (FDB) ensure that such frames are discarded by the Forwarding 975 * Process. In 8.6.3 of IEEE Std 802.1Q-2022, this is described in detail: 976 * 977 * Each of the reserved MAC addresses specified in Table 8-1 978 * (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]) shall be 979 * permanently configured in the FDB in C-VLAN components and ERs. 980 * 981 * Each of the reserved MAC addresses specified in Table 8-2 982 * (01-80-C2-00-00-[01,02,03,04,05,06,07,08,09,0A,0E]) shall be permanently 983 * configured in the FDB in S-VLAN components. 984 * 985 * Each of the reserved MAC addresses specified in Table 8-3 986 * (01-80-C2-00-00-[01,02,04,0E]) shall be permanently configured in the FDB in 987 * TPMR components. 988 * 989 * The FDB entries for reserved MAC addresses shall specify filtering for all 990 * Bridge Ports and all VIDs. Management shall not provide the capability to 991 * modify or remove entries for reserved MAC addresses. 992 * 993 * The addresses in Table 8-1, Table 8-2, and Table 8-3 determine the scope of 994 * propagation of PDUs within a Bridged Network, as follows: 995 * 996 * The Nearest Bridge group address (01-80-C2-00-00-0E) is an address that no 997 * conformant Two-Port MAC Relay (TPMR) component, Service VLAN (S-VLAN) 998 * component, Customer VLAN (C-VLAN) component, or MAC Bridge can forward. 999 * PDUs transmitted using this destination address, or any other addresses 1000 * that appear in Table 8-1, Table 8-2, and Table 8-3 1001 * (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]), can 1002 * therefore travel no further than those stations that can be reached via a 1003 * single individual LAN from the originating station. 1004 * 1005 * The Nearest non-TPMR Bridge group address (01-80-C2-00-00-03), is an 1006 * address that no conformant S-VLAN component, C-VLAN component, or MAC 1007 * Bridge can forward; however, this address is relayed by a TPMR component. 1008 * PDUs using this destination address, or any of the other addresses that 1009 * appear in both Table 8-1 and Table 8-2 but not in Table 8-3 1010 * (01-80-C2-00-00-[00,03,05,06,07,08,09,0A,0B,0C,0D,0F]), will be relayed by 1011 * any TPMRs but will propagate no further than the nearest S-VLAN component, 1012 * C-VLAN component, or MAC Bridge. 1013 * 1014 * The Nearest Customer Bridge group address (01-80-C2-00-00-00) is an address 1015 * that no conformant C-VLAN component, MAC Bridge can forward; however, it is 1016 * relayed by TPMR components and S-VLAN components. PDUs using this 1017 * destination address, or any of the other addresses that appear in Table 8-1 1018 * but not in either Table 8-2 or Table 8-3 (01-80-C2-00-00-[00,0B,0C,0D,0F]), 1019 * will be relayed by TPMR components and S-VLAN components but will propagate 1020 * no further than the nearest C-VLAN component or MAC Bridge. 1021 * 1022 * Because the LLC Entity associated with each Bridge Port is provided via CPU 1023 * port, we must not filter these frames but forward them to CPU port. 1024 * 1025 * In a Bridge, the transmission Port is majorly decided by ingress and egress 1026 * rules, FDB, and spanning tree Port State functions of the Forwarding Process. 1027 * For link-local frames, only CPU port should be designated as destination port 1028 * in the FDB, and the other functions of the Forwarding Process must not 1029 * interfere with the decision of the transmission Port. We call this process 1030 * trapping frames to CPU port. 1031 * 1032 * Therefore, on the switch with CPU port architecture, link-local frames must 1033 * be trapped to CPU port, and certain link-local frames received by a Port of a 1034 * Bridge comprising a TPMR component or an S-VLAN component must be excluded 1035 * from it. 1036 * 1037 * A Bridge of the switch with CPU port architecture cannot comprise a Two-Port 1038 * MAC Relay (TPMR) component as a TPMR component supports only a subset of the 1039 * functionality of a MAC Bridge. A Bridge comprising two Ports (Management Port 1040 * doesn't count) of this architecture will either function as a standard MAC 1041 * Bridge or a standard VLAN Bridge. 1042 * 1043 * Therefore, a Bridge of this architecture can only comprise S-VLAN components, 1044 * C-VLAN components, or MAC Bridge components. Since there's no TPMR component, 1045 * we don't need to relay PDUs using the destination addresses specified on the 1046 * Nearest non-TPMR section, and the proportion of the Nearest Customer Bridge 1047 * section where they must be relayed by TPMR components. 1048 * 1049 * One option to trap link-local frames to CPU port is to add static FDB entries 1050 * with CPU port designated as destination port. However, because that 1051 * Independent VLAN Learning (IVL) is being used on every VID, each entry only 1052 * applies to a single VLAN Identifier (VID). For a Bridge comprising a MAC 1053 * Bridge component or a C-VLAN component, there would have to be 16 times 4096 1054 * entries. This switch intellectual property can only hold a maximum of 2048 1055 * entries. Using this option, there also isn't a mechanism to prevent 1056 * link-local frames from being discarded when the spanning tree Port State of 1057 * the reception Port is discarding. 1058 * 1059 * The remaining option is to utilise the BPC, RGAC1, RGAC2, RGAC3, and RGAC4 1060 * registers. Whilst this applies to every VID, it doesn't contain all of the 1061 * reserved MAC addresses without affecting the remaining Standard Group MAC 1062 * Addresses. The REV_UN frame tag utilised using the RGAC4 register covers the 1063 * remaining 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F] destination 1064 * addresses. It also includes the 01-80-C2-00-00-22 to 01-80-C2-00-00-FF 1065 * destination addresses which may be relayed by MAC Bridges or VLAN Bridges. 1066 * The latter option provides better but not complete conformance. 1067 * 1068 * This switch intellectual property also does not provide a mechanism to trap 1069 * link-local frames with specific destination addresses to CPU port by Bridge, 1070 * to conform to the filtering rules for the distinct Bridge components. 1071 * 1072 * Therefore, regardless of the type of the Bridge component, link-local frames 1073 * with these destination addresses will be trapped to CPU port: 1074 * 1075 * 01-80-C2-00-00-[00,01,02,03,0E] 1076 * 1077 * In a Bridge comprising a MAC Bridge component or a C-VLAN component: 1078 * 1079 * Link-local frames with these destination addresses won't be trapped to CPU 1080 * port which won't conform to IEEE Std 802.1Q-2022: 1081 * 1082 * 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F] 1083 * 1084 * In a Bridge comprising an S-VLAN component: 1085 * 1086 * Link-local frames with these destination addresses will be trapped to CPU 1087 * port which won't conform to IEEE Std 802.1Q-2022: 1088 * 1089 * 01-80-C2-00-00-00 1090 * 1091 * Link-local frames with these destination addresses won't be trapped to CPU 1092 * port which won't conform to IEEE Std 802.1Q-2022: 1093 * 1094 * 01-80-C2-00-00-[04,05,06,07,08,09,0A] 1095 * 1096 * To trap link-local frames to CPU port as conformant as this switch 1097 * intellectual property can allow, link-local frames are made to be regarded as 1098 * Bridge Protocol Data Units (BPDUs). This is because this switch intellectual 1099 * property only lets the frames regarded as BPDUs bypass the spanning tree Port 1100 * State function of the Forwarding Process. 1101 * 1102 * The only remaining interference is the ingress rules. When the reception Port 1103 * has no PVID assigned on software, VLAN-untagged frames won't be allowed in. 1104 * There doesn't seem to be a mechanism on the switch intellectual property to 1105 * have link-local frames bypass this function of the Forwarding Process. 1106 */ 1107 static void 1108 mt753x_trap_frames(struct mt7530_priv *priv) 1109 { 1110 /* Trap 802.1X PAE frames and BPDUs to the CPU port(s) and egress them 1111 * VLAN-untagged. 1112 */ 1113 mt7530_rmw(priv, MT753X_BPC, 1114 MT753X_PAE_BPDU_FR | MT753X_PAE_EG_TAG_MASK | 1115 MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK | 1116 MT753X_BPDU_PORT_FW_MASK, 1117 MT753X_PAE_BPDU_FR | 1118 MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | 1119 MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) | 1120 MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | 1121 MT753X_BPDU_CPU_ONLY); 1122 1123 /* Trap frames with :01 and :02 MAC DAs to the CPU port(s) and egress 1124 * them VLAN-untagged. 1125 */ 1126 mt7530_rmw(priv, MT753X_RGAC1, 1127 MT753X_R02_BPDU_FR | MT753X_R02_EG_TAG_MASK | 1128 MT753X_R02_PORT_FW_MASK | MT753X_R01_BPDU_FR | 1129 MT753X_R01_EG_TAG_MASK | MT753X_R01_PORT_FW_MASK, 1130 MT753X_R02_BPDU_FR | 1131 MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | 1132 MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) | 1133 MT753X_R01_BPDU_FR | 1134 MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | 1135 MT753X_BPDU_CPU_ONLY); 1136 1137 /* Trap frames with :03 and :0E MAC DAs to the CPU port(s) and egress 1138 * them VLAN-untagged. 1139 */ 1140 mt7530_rmw(priv, MT753X_RGAC2, 1141 MT753X_R0E_BPDU_FR | MT753X_R0E_EG_TAG_MASK | 1142 MT753X_R0E_PORT_FW_MASK | MT753X_R03_BPDU_FR | 1143 MT753X_R03_EG_TAG_MASK | MT753X_R03_PORT_FW_MASK, 1144 MT753X_R0E_BPDU_FR | 1145 MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | 1146 MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) | 1147 MT753X_R03_BPDU_FR | 1148 MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | 1149 MT753X_BPDU_CPU_ONLY); 1150 } 1151 1152 static void 1153 mt753x_cpu_port_enable(struct dsa_switch *ds, int port) 1154 { 1155 struct mt7530_priv *priv = ds->priv; 1156 1157 /* Enable Mediatek header mode on the cpu port */ 1158 mt7530_write(priv, MT7530_PVC_P(port), 1159 PORT_SPEC_TAG); 1160 1161 /* Enable flooding on the CPU port */ 1162 mt7530_set(priv, MT7530_MFC, BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) | 1163 UNU_FFP(BIT(port))); 1164 1165 /* Add the CPU port to the CPU port bitmap for MT7531 and the switch on 1166 * the MT7988 SoC. Trapped frames will be forwarded to the CPU port that 1167 * is affine to the inbound user port. 1168 */ 1169 if (priv->id == ID_MT7531 || priv->id == ID_MT7988) 1170 mt7530_set(priv, MT7531_CFC, MT7531_CPU_PMAP(BIT(port))); 1171 1172 /* CPU port gets connected to all user ports of 1173 * the switch. 1174 */ 1175 mt7530_write(priv, MT7530_PCR_P(port), 1176 PCR_MATRIX(dsa_user_ports(priv->ds))); 1177 1178 /* Set to fallback mode for independent VLAN learning */ 1179 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, 1180 MT7530_PORT_FALLBACK_MODE); 1181 } 1182 1183 static int 1184 mt7530_port_enable(struct dsa_switch *ds, int port, 1185 struct phy_device *phy) 1186 { 1187 struct dsa_port *dp = dsa_to_port(ds, port); 1188 struct mt7530_priv *priv = ds->priv; 1189 1190 mutex_lock(&priv->reg_mutex); 1191 1192 /* Allow the user port gets connected to the cpu port and also 1193 * restore the port matrix if the port is the member of a certain 1194 * bridge. 1195 */ 1196 if (dsa_port_is_user(dp)) { 1197 struct dsa_port *cpu_dp = dp->cpu_dp; 1198 1199 priv->ports[port].pm |= PCR_MATRIX(BIT(cpu_dp->index)); 1200 } 1201 priv->ports[port].enable = true; 1202 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK, 1203 priv->ports[port].pm); 1204 1205 mutex_unlock(&priv->reg_mutex); 1206 1207 return 0; 1208 } 1209 1210 static void 1211 mt7530_port_disable(struct dsa_switch *ds, int port) 1212 { 1213 struct mt7530_priv *priv = ds->priv; 1214 1215 mutex_lock(&priv->reg_mutex); 1216 1217 /* Clear up all port matrix which could be restored in the next 1218 * enablement for the port. 1219 */ 1220 priv->ports[port].enable = false; 1221 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK, 1222 PCR_MATRIX_CLR); 1223 1224 mutex_unlock(&priv->reg_mutex); 1225 } 1226 1227 static int 1228 mt7530_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) 1229 { 1230 struct mt7530_priv *priv = ds->priv; 1231 int length; 1232 u32 val; 1233 1234 /* When a new MTU is set, DSA always set the CPU port's MTU to the 1235 * largest MTU of the user ports. Because the switch only has a global 1236 * RX length register, only allowing CPU port here is enough. 1237 */ 1238 if (!dsa_is_cpu_port(ds, port)) 1239 return 0; 1240 1241 mt7530_mutex_lock(priv); 1242 1243 val = mt7530_mii_read(priv, MT7530_GMACCR); 1244 val &= ~MAX_RX_PKT_LEN_MASK; 1245 1246 /* RX length also includes Ethernet header, MTK tag, and FCS length */ 1247 length = new_mtu + ETH_HLEN + MTK_HDR_LEN + ETH_FCS_LEN; 1248 if (length <= 1522) { 1249 val |= MAX_RX_PKT_LEN_1522; 1250 } else if (length <= 1536) { 1251 val |= MAX_RX_PKT_LEN_1536; 1252 } else if (length <= 1552) { 1253 val |= MAX_RX_PKT_LEN_1552; 1254 } else { 1255 val &= ~MAX_RX_JUMBO_MASK; 1256 val |= MAX_RX_JUMBO(DIV_ROUND_UP(length, 1024)); 1257 val |= MAX_RX_PKT_LEN_JUMBO; 1258 } 1259 1260 mt7530_mii_write(priv, MT7530_GMACCR, val); 1261 1262 mt7530_mutex_unlock(priv); 1263 1264 return 0; 1265 } 1266 1267 static int 1268 mt7530_port_max_mtu(struct dsa_switch *ds, int port) 1269 { 1270 return MT7530_MAX_MTU; 1271 } 1272 1273 static void 1274 mt7530_stp_state_set(struct dsa_switch *ds, int port, u8 state) 1275 { 1276 struct mt7530_priv *priv = ds->priv; 1277 u32 stp_state; 1278 1279 switch (state) { 1280 case BR_STATE_DISABLED: 1281 stp_state = MT7530_STP_DISABLED; 1282 break; 1283 case BR_STATE_BLOCKING: 1284 stp_state = MT7530_STP_BLOCKING; 1285 break; 1286 case BR_STATE_LISTENING: 1287 stp_state = MT7530_STP_LISTENING; 1288 break; 1289 case BR_STATE_LEARNING: 1290 stp_state = MT7530_STP_LEARNING; 1291 break; 1292 case BR_STATE_FORWARDING: 1293 default: 1294 stp_state = MT7530_STP_FORWARDING; 1295 break; 1296 } 1297 1298 mt7530_rmw(priv, MT7530_SSP_P(port), FID_PST_MASK(FID_BRIDGED), 1299 FID_PST(FID_BRIDGED, stp_state)); 1300 } 1301 1302 static int 1303 mt7530_port_pre_bridge_flags(struct dsa_switch *ds, int port, 1304 struct switchdev_brport_flags flags, 1305 struct netlink_ext_ack *extack) 1306 { 1307 if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 1308 BR_BCAST_FLOOD)) 1309 return -EINVAL; 1310 1311 return 0; 1312 } 1313 1314 static int 1315 mt7530_port_bridge_flags(struct dsa_switch *ds, int port, 1316 struct switchdev_brport_flags flags, 1317 struct netlink_ext_ack *extack) 1318 { 1319 struct mt7530_priv *priv = ds->priv; 1320 1321 if (flags.mask & BR_LEARNING) 1322 mt7530_rmw(priv, MT7530_PSC_P(port), SA_DIS, 1323 flags.val & BR_LEARNING ? 0 : SA_DIS); 1324 1325 if (flags.mask & BR_FLOOD) 1326 mt7530_rmw(priv, MT7530_MFC, UNU_FFP(BIT(port)), 1327 flags.val & BR_FLOOD ? UNU_FFP(BIT(port)) : 0); 1328 1329 if (flags.mask & BR_MCAST_FLOOD) 1330 mt7530_rmw(priv, MT7530_MFC, UNM_FFP(BIT(port)), 1331 flags.val & BR_MCAST_FLOOD ? UNM_FFP(BIT(port)) : 0); 1332 1333 if (flags.mask & BR_BCAST_FLOOD) 1334 mt7530_rmw(priv, MT7530_MFC, BC_FFP(BIT(port)), 1335 flags.val & BR_BCAST_FLOOD ? BC_FFP(BIT(port)) : 0); 1336 1337 return 0; 1338 } 1339 1340 static int 1341 mt7530_port_bridge_join(struct dsa_switch *ds, int port, 1342 struct dsa_bridge bridge, bool *tx_fwd_offload, 1343 struct netlink_ext_ack *extack) 1344 { 1345 struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; 1346 struct dsa_port *cpu_dp = dp->cpu_dp; 1347 u32 port_bitmap = BIT(cpu_dp->index); 1348 struct mt7530_priv *priv = ds->priv; 1349 1350 mutex_lock(&priv->reg_mutex); 1351 1352 dsa_switch_for_each_user_port(other_dp, ds) { 1353 int other_port = other_dp->index; 1354 1355 if (dp == other_dp) 1356 continue; 1357 1358 /* Add this port to the port matrix of the other ports in the 1359 * same bridge. If the port is disabled, port matrix is kept 1360 * and not being setup until the port becomes enabled. 1361 */ 1362 if (!dsa_port_offloads_bridge(other_dp, &bridge)) 1363 continue; 1364 1365 if (priv->ports[other_port].enable) 1366 mt7530_set(priv, MT7530_PCR_P(other_port), 1367 PCR_MATRIX(BIT(port))); 1368 priv->ports[other_port].pm |= PCR_MATRIX(BIT(port)); 1369 1370 port_bitmap |= BIT(other_port); 1371 } 1372 1373 /* Add the all other ports to this port matrix. */ 1374 if (priv->ports[port].enable) 1375 mt7530_rmw(priv, MT7530_PCR_P(port), 1376 PCR_MATRIX_MASK, PCR_MATRIX(port_bitmap)); 1377 priv->ports[port].pm |= PCR_MATRIX(port_bitmap); 1378 1379 /* Set to fallback mode for independent VLAN learning */ 1380 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, 1381 MT7530_PORT_FALLBACK_MODE); 1382 1383 mutex_unlock(&priv->reg_mutex); 1384 1385 return 0; 1386 } 1387 1388 static void 1389 mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port) 1390 { 1391 struct mt7530_priv *priv = ds->priv; 1392 bool all_user_ports_removed = true; 1393 int i; 1394 1395 /* This is called after .port_bridge_leave when leaving a VLAN-aware 1396 * bridge. Don't set standalone ports to fallback mode. 1397 */ 1398 if (dsa_port_bridge_dev_get(dsa_to_port(ds, port))) 1399 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, 1400 MT7530_PORT_FALLBACK_MODE); 1401 1402 mt7530_rmw(priv, MT7530_PVC_P(port), 1403 VLAN_ATTR_MASK | PVC_EG_TAG_MASK | ACC_FRM_MASK, 1404 VLAN_ATTR(MT7530_VLAN_TRANSPARENT) | 1405 PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT) | 1406 MT7530_VLAN_ACC_ALL); 1407 1408 /* Set PVID to 0 */ 1409 mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, 1410 G0_PORT_VID_DEF); 1411 1412 for (i = 0; i < MT7530_NUM_PORTS; i++) { 1413 if (dsa_is_user_port(ds, i) && 1414 dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) { 1415 all_user_ports_removed = false; 1416 break; 1417 } 1418 } 1419 1420 /* CPU port also does the same thing until all user ports belonging to 1421 * the CPU port get out of VLAN filtering mode. 1422 */ 1423 if (all_user_ports_removed) { 1424 struct dsa_port *dp = dsa_to_port(ds, port); 1425 struct dsa_port *cpu_dp = dp->cpu_dp; 1426 1427 mt7530_write(priv, MT7530_PCR_P(cpu_dp->index), 1428 PCR_MATRIX(dsa_user_ports(priv->ds))); 1429 mt7530_write(priv, MT7530_PVC_P(cpu_dp->index), PORT_SPEC_TAG 1430 | PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); 1431 } 1432 } 1433 1434 static void 1435 mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port) 1436 { 1437 struct mt7530_priv *priv = ds->priv; 1438 1439 /* Trapped into security mode allows packet forwarding through VLAN 1440 * table lookup. 1441 */ 1442 if (dsa_is_user_port(ds, port)) { 1443 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, 1444 MT7530_PORT_SECURITY_MODE); 1445 mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, 1446 G0_PORT_VID(priv->ports[port].pvid)); 1447 1448 /* Only accept tagged frames if PVID is not set */ 1449 if (!priv->ports[port].pvid) 1450 mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK, 1451 MT7530_VLAN_ACC_TAGGED); 1452 1453 /* Set the port as a user port which is to be able to recognize 1454 * VID from incoming packets before fetching entry within the 1455 * VLAN table. 1456 */ 1457 mt7530_rmw(priv, MT7530_PVC_P(port), 1458 VLAN_ATTR_MASK | PVC_EG_TAG_MASK, 1459 VLAN_ATTR(MT7530_VLAN_USER) | 1460 PVC_EG_TAG(MT7530_VLAN_EG_DISABLED)); 1461 } else { 1462 /* Also set CPU ports to the "user" VLAN port attribute, to 1463 * allow VLAN classification, but keep the EG_TAG attribute as 1464 * "consistent" (i.o.w. don't change its value) for packets 1465 * received by the switch from the CPU, so that tagged packets 1466 * are forwarded to user ports as tagged, and untagged as 1467 * untagged. 1468 */ 1469 mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK, 1470 VLAN_ATTR(MT7530_VLAN_USER)); 1471 } 1472 } 1473 1474 static void 1475 mt7530_port_bridge_leave(struct dsa_switch *ds, int port, 1476 struct dsa_bridge bridge) 1477 { 1478 struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; 1479 struct dsa_port *cpu_dp = dp->cpu_dp; 1480 struct mt7530_priv *priv = ds->priv; 1481 1482 mutex_lock(&priv->reg_mutex); 1483 1484 dsa_switch_for_each_user_port(other_dp, ds) { 1485 int other_port = other_dp->index; 1486 1487 if (dp == other_dp) 1488 continue; 1489 1490 /* Remove this port from the port matrix of the other ports 1491 * in the same bridge. If the port is disabled, port matrix 1492 * is kept and not being setup until the port becomes enabled. 1493 */ 1494 if (!dsa_port_offloads_bridge(other_dp, &bridge)) 1495 continue; 1496 1497 if (priv->ports[other_port].enable) 1498 mt7530_clear(priv, MT7530_PCR_P(other_port), 1499 PCR_MATRIX(BIT(port))); 1500 priv->ports[other_port].pm &= ~PCR_MATRIX(BIT(port)); 1501 } 1502 1503 /* Set the cpu port to be the only one in the port matrix of 1504 * this port. 1505 */ 1506 if (priv->ports[port].enable) 1507 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK, 1508 PCR_MATRIX(BIT(cpu_dp->index))); 1509 priv->ports[port].pm = PCR_MATRIX(BIT(cpu_dp->index)); 1510 1511 /* When a port is removed from the bridge, the port would be set up 1512 * back to the default as is at initial boot which is a VLAN-unaware 1513 * port. 1514 */ 1515 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, 1516 MT7530_PORT_MATRIX_MODE); 1517 1518 mutex_unlock(&priv->reg_mutex); 1519 } 1520 1521 static int 1522 mt7530_port_fdb_add(struct dsa_switch *ds, int port, 1523 const unsigned char *addr, u16 vid, 1524 struct dsa_db db) 1525 { 1526 struct mt7530_priv *priv = ds->priv; 1527 int ret; 1528 u8 port_mask = BIT(port); 1529 1530 mutex_lock(&priv->reg_mutex); 1531 mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_ENT); 1532 ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL); 1533 mutex_unlock(&priv->reg_mutex); 1534 1535 return ret; 1536 } 1537 1538 static int 1539 mt7530_port_fdb_del(struct dsa_switch *ds, int port, 1540 const unsigned char *addr, u16 vid, 1541 struct dsa_db db) 1542 { 1543 struct mt7530_priv *priv = ds->priv; 1544 int ret; 1545 u8 port_mask = BIT(port); 1546 1547 mutex_lock(&priv->reg_mutex); 1548 mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_EMP); 1549 ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL); 1550 mutex_unlock(&priv->reg_mutex); 1551 1552 return ret; 1553 } 1554 1555 static int 1556 mt7530_port_fdb_dump(struct dsa_switch *ds, int port, 1557 dsa_fdb_dump_cb_t *cb, void *data) 1558 { 1559 struct mt7530_priv *priv = ds->priv; 1560 struct mt7530_fdb _fdb = { 0 }; 1561 int cnt = MT7530_NUM_FDB_RECORDS; 1562 int ret = 0; 1563 u32 rsp = 0; 1564 1565 mutex_lock(&priv->reg_mutex); 1566 1567 ret = mt7530_fdb_cmd(priv, MT7530_FDB_START, &rsp); 1568 if (ret < 0) 1569 goto err; 1570 1571 do { 1572 if (rsp & ATC_SRCH_HIT) { 1573 mt7530_fdb_read(priv, &_fdb); 1574 if (_fdb.port_mask & BIT(port)) { 1575 ret = cb(_fdb.mac, _fdb.vid, _fdb.noarp, 1576 data); 1577 if (ret < 0) 1578 break; 1579 } 1580 } 1581 } while (--cnt && 1582 !(rsp & ATC_SRCH_END) && 1583 !mt7530_fdb_cmd(priv, MT7530_FDB_NEXT, &rsp)); 1584 err: 1585 mutex_unlock(&priv->reg_mutex); 1586 1587 return 0; 1588 } 1589 1590 static int 1591 mt7530_port_mdb_add(struct dsa_switch *ds, int port, 1592 const struct switchdev_obj_port_mdb *mdb, 1593 struct dsa_db db) 1594 { 1595 struct mt7530_priv *priv = ds->priv; 1596 const u8 *addr = mdb->addr; 1597 u16 vid = mdb->vid; 1598 u8 port_mask = 0; 1599 int ret; 1600 1601 mutex_lock(&priv->reg_mutex); 1602 1603 mt7530_fdb_write(priv, vid, 0, addr, 0, STATIC_EMP); 1604 if (!mt7530_fdb_cmd(priv, MT7530_FDB_READ, NULL)) 1605 port_mask = (mt7530_read(priv, MT7530_ATRD) >> PORT_MAP) 1606 & PORT_MAP_MASK; 1607 1608 port_mask |= BIT(port); 1609 mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_ENT); 1610 ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL); 1611 1612 mutex_unlock(&priv->reg_mutex); 1613 1614 return ret; 1615 } 1616 1617 static int 1618 mt7530_port_mdb_del(struct dsa_switch *ds, int port, 1619 const struct switchdev_obj_port_mdb *mdb, 1620 struct dsa_db db) 1621 { 1622 struct mt7530_priv *priv = ds->priv; 1623 const u8 *addr = mdb->addr; 1624 u16 vid = mdb->vid; 1625 u8 port_mask = 0; 1626 int ret; 1627 1628 mutex_lock(&priv->reg_mutex); 1629 1630 mt7530_fdb_write(priv, vid, 0, addr, 0, STATIC_EMP); 1631 if (!mt7530_fdb_cmd(priv, MT7530_FDB_READ, NULL)) 1632 port_mask = (mt7530_read(priv, MT7530_ATRD) >> PORT_MAP) 1633 & PORT_MAP_MASK; 1634 1635 port_mask &= ~BIT(port); 1636 mt7530_fdb_write(priv, vid, port_mask, addr, -1, 1637 port_mask ? STATIC_ENT : STATIC_EMP); 1638 ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL); 1639 1640 mutex_unlock(&priv->reg_mutex); 1641 1642 return ret; 1643 } 1644 1645 static int 1646 mt7530_vlan_cmd(struct mt7530_priv *priv, enum mt7530_vlan_cmd cmd, u16 vid) 1647 { 1648 struct mt7530_dummy_poll p; 1649 u32 val; 1650 int ret; 1651 1652 val = VTCR_BUSY | VTCR_FUNC(cmd) | vid; 1653 mt7530_write(priv, MT7530_VTCR, val); 1654 1655 INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_VTCR); 1656 ret = readx_poll_timeout(_mt7530_read, &p, val, 1657 !(val & VTCR_BUSY), 20, 20000); 1658 if (ret < 0) { 1659 dev_err(priv->dev, "poll timeout\n"); 1660 return ret; 1661 } 1662 1663 val = mt7530_read(priv, MT7530_VTCR); 1664 if (val & VTCR_INVALID) { 1665 dev_err(priv->dev, "read VTCR invalid\n"); 1666 return -EINVAL; 1667 } 1668 1669 return 0; 1670 } 1671 1672 static int 1673 mt7530_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, 1674 struct netlink_ext_ack *extack) 1675 { 1676 struct dsa_port *dp = dsa_to_port(ds, port); 1677 struct dsa_port *cpu_dp = dp->cpu_dp; 1678 1679 if (vlan_filtering) { 1680 /* The port is being kept as VLAN-unaware port when bridge is 1681 * set up with vlan_filtering not being set, Otherwise, the 1682 * port and the corresponding CPU port is required the setup 1683 * for becoming a VLAN-aware port. 1684 */ 1685 mt7530_port_set_vlan_aware(ds, port); 1686 mt7530_port_set_vlan_aware(ds, cpu_dp->index); 1687 } else { 1688 mt7530_port_set_vlan_unaware(ds, port); 1689 } 1690 1691 return 0; 1692 } 1693 1694 static void 1695 mt7530_hw_vlan_add(struct mt7530_priv *priv, 1696 struct mt7530_hw_vlan_entry *entry) 1697 { 1698 struct dsa_port *dp = dsa_to_port(priv->ds, entry->port); 1699 u8 new_members; 1700 u32 val; 1701 1702 new_members = entry->old_members | BIT(entry->port); 1703 1704 /* Validate the entry with independent learning, create egress tag per 1705 * VLAN and joining the port as one of the port members. 1706 */ 1707 val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) | FID(FID_BRIDGED) | 1708 VLAN_VALID; 1709 mt7530_write(priv, MT7530_VAWD1, val); 1710 1711 /* Decide whether adding tag or not for those outgoing packets from the 1712 * port inside the VLAN. 1713 * CPU port is always taken as a tagged port for serving more than one 1714 * VLANs across and also being applied with egress type stack mode for 1715 * that VLAN tags would be appended after hardware special tag used as 1716 * DSA tag. 1717 */ 1718 if (dsa_port_is_cpu(dp)) 1719 val = MT7530_VLAN_EGRESS_STACK; 1720 else if (entry->untagged) 1721 val = MT7530_VLAN_EGRESS_UNTAG; 1722 else 1723 val = MT7530_VLAN_EGRESS_TAG; 1724 mt7530_rmw(priv, MT7530_VAWD2, 1725 ETAG_CTRL_P_MASK(entry->port), 1726 ETAG_CTRL_P(entry->port, val)); 1727 } 1728 1729 static void 1730 mt7530_hw_vlan_del(struct mt7530_priv *priv, 1731 struct mt7530_hw_vlan_entry *entry) 1732 { 1733 u8 new_members; 1734 u32 val; 1735 1736 new_members = entry->old_members & ~BIT(entry->port); 1737 1738 val = mt7530_read(priv, MT7530_VAWD1); 1739 if (!(val & VLAN_VALID)) { 1740 dev_err(priv->dev, 1741 "Cannot be deleted due to invalid entry\n"); 1742 return; 1743 } 1744 1745 if (new_members) { 1746 val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) | 1747 VLAN_VALID; 1748 mt7530_write(priv, MT7530_VAWD1, val); 1749 } else { 1750 mt7530_write(priv, MT7530_VAWD1, 0); 1751 mt7530_write(priv, MT7530_VAWD2, 0); 1752 } 1753 } 1754 1755 static void 1756 mt7530_hw_vlan_update(struct mt7530_priv *priv, u16 vid, 1757 struct mt7530_hw_vlan_entry *entry, 1758 mt7530_vlan_op vlan_op) 1759 { 1760 u32 val; 1761 1762 /* Fetch entry */ 1763 mt7530_vlan_cmd(priv, MT7530_VTCR_RD_VID, vid); 1764 1765 val = mt7530_read(priv, MT7530_VAWD1); 1766 1767 entry->old_members = (val >> PORT_MEM_SHFT) & PORT_MEM_MASK; 1768 1769 /* Manipulate entry */ 1770 vlan_op(priv, entry); 1771 1772 /* Flush result to hardware */ 1773 mt7530_vlan_cmd(priv, MT7530_VTCR_WR_VID, vid); 1774 } 1775 1776 static int 1777 mt7530_setup_vlan0(struct mt7530_priv *priv) 1778 { 1779 u32 val; 1780 1781 /* Validate the entry with independent learning, keep the original 1782 * ingress tag attribute. 1783 */ 1784 val = IVL_MAC | EG_CON | PORT_MEM(MT7530_ALL_MEMBERS) | FID(FID_BRIDGED) | 1785 VLAN_VALID; 1786 mt7530_write(priv, MT7530_VAWD1, val); 1787 1788 return mt7530_vlan_cmd(priv, MT7530_VTCR_WR_VID, 0); 1789 } 1790 1791 static int 1792 mt7530_port_vlan_add(struct dsa_switch *ds, int port, 1793 const struct switchdev_obj_port_vlan *vlan, 1794 struct netlink_ext_ack *extack) 1795 { 1796 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1797 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 1798 struct mt7530_hw_vlan_entry new_entry; 1799 struct mt7530_priv *priv = ds->priv; 1800 1801 mutex_lock(&priv->reg_mutex); 1802 1803 mt7530_hw_vlan_entry_init(&new_entry, port, untagged); 1804 mt7530_hw_vlan_update(priv, vlan->vid, &new_entry, mt7530_hw_vlan_add); 1805 1806 if (pvid) { 1807 priv->ports[port].pvid = vlan->vid; 1808 1809 /* Accept all frames if PVID is set */ 1810 mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK, 1811 MT7530_VLAN_ACC_ALL); 1812 1813 /* Only configure PVID if VLAN filtering is enabled */ 1814 if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) 1815 mt7530_rmw(priv, MT7530_PPBV1_P(port), 1816 G0_PORT_VID_MASK, 1817 G0_PORT_VID(vlan->vid)); 1818 } else if (vlan->vid && priv->ports[port].pvid == vlan->vid) { 1819 /* This VLAN is overwritten without PVID, so unset it */ 1820 priv->ports[port].pvid = G0_PORT_VID_DEF; 1821 1822 /* Only accept tagged frames if the port is VLAN-aware */ 1823 if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) 1824 mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK, 1825 MT7530_VLAN_ACC_TAGGED); 1826 1827 mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, 1828 G0_PORT_VID_DEF); 1829 } 1830 1831 mutex_unlock(&priv->reg_mutex); 1832 1833 return 0; 1834 } 1835 1836 static int 1837 mt7530_port_vlan_del(struct dsa_switch *ds, int port, 1838 const struct switchdev_obj_port_vlan *vlan) 1839 { 1840 struct mt7530_hw_vlan_entry target_entry; 1841 struct mt7530_priv *priv = ds->priv; 1842 1843 mutex_lock(&priv->reg_mutex); 1844 1845 mt7530_hw_vlan_entry_init(&target_entry, port, 0); 1846 mt7530_hw_vlan_update(priv, vlan->vid, &target_entry, 1847 mt7530_hw_vlan_del); 1848 1849 /* PVID is being restored to the default whenever the PVID port 1850 * is being removed from the VLAN. 1851 */ 1852 if (priv->ports[port].pvid == vlan->vid) { 1853 priv->ports[port].pvid = G0_PORT_VID_DEF; 1854 1855 /* Only accept tagged frames if the port is VLAN-aware */ 1856 if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) 1857 mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK, 1858 MT7530_VLAN_ACC_TAGGED); 1859 1860 mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, 1861 G0_PORT_VID_DEF); 1862 } 1863 1864 1865 mutex_unlock(&priv->reg_mutex); 1866 1867 return 0; 1868 } 1869 1870 static int mt753x_mirror_port_get(unsigned int id, u32 val) 1871 { 1872 return (id == ID_MT7531 || id == ID_MT7988) ? 1873 MT7531_MIRROR_PORT_GET(val) : 1874 MIRROR_PORT(val); 1875 } 1876 1877 static int mt753x_mirror_port_set(unsigned int id, u32 val) 1878 { 1879 return (id == ID_MT7531 || id == ID_MT7988) ? 1880 MT7531_MIRROR_PORT_SET(val) : 1881 MIRROR_PORT(val); 1882 } 1883 1884 static int mt753x_port_mirror_add(struct dsa_switch *ds, int port, 1885 struct dsa_mall_mirror_tc_entry *mirror, 1886 bool ingress, struct netlink_ext_ack *extack) 1887 { 1888 struct mt7530_priv *priv = ds->priv; 1889 int monitor_port; 1890 u32 val; 1891 1892 /* Check for existent entry */ 1893 if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port)) 1894 return -EEXIST; 1895 1896 val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id)); 1897 1898 /* MT7530 only supports one monitor port */ 1899 monitor_port = mt753x_mirror_port_get(priv->id, val); 1900 if (val & MT753X_MIRROR_EN(priv->id) && 1901 monitor_port != mirror->to_local_port) 1902 return -EEXIST; 1903 1904 val |= MT753X_MIRROR_EN(priv->id); 1905 val &= ~MT753X_MIRROR_MASK(priv->id); 1906 val |= mt753x_mirror_port_set(priv->id, mirror->to_local_port); 1907 mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val); 1908 1909 val = mt7530_read(priv, MT7530_PCR_P(port)); 1910 if (ingress) { 1911 val |= PORT_RX_MIR; 1912 priv->mirror_rx |= BIT(port); 1913 } else { 1914 val |= PORT_TX_MIR; 1915 priv->mirror_tx |= BIT(port); 1916 } 1917 mt7530_write(priv, MT7530_PCR_P(port), val); 1918 1919 return 0; 1920 } 1921 1922 static void mt753x_port_mirror_del(struct dsa_switch *ds, int port, 1923 struct dsa_mall_mirror_tc_entry *mirror) 1924 { 1925 struct mt7530_priv *priv = ds->priv; 1926 u32 val; 1927 1928 val = mt7530_read(priv, MT7530_PCR_P(port)); 1929 if (mirror->ingress) { 1930 val &= ~PORT_RX_MIR; 1931 priv->mirror_rx &= ~BIT(port); 1932 } else { 1933 val &= ~PORT_TX_MIR; 1934 priv->mirror_tx &= ~BIT(port); 1935 } 1936 mt7530_write(priv, MT7530_PCR_P(port), val); 1937 1938 if (!priv->mirror_rx && !priv->mirror_tx) { 1939 val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id)); 1940 val &= ~MT753X_MIRROR_EN(priv->id); 1941 mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val); 1942 } 1943 } 1944 1945 static enum dsa_tag_protocol 1946 mtk_get_tag_protocol(struct dsa_switch *ds, int port, 1947 enum dsa_tag_protocol mp) 1948 { 1949 return DSA_TAG_PROTO_MTK; 1950 } 1951 1952 #ifdef CONFIG_GPIOLIB 1953 static inline u32 1954 mt7530_gpio_to_bit(unsigned int offset) 1955 { 1956 /* Map GPIO offset to register bit 1957 * [ 2: 0] port 0 LED 0..2 as GPIO 0..2 1958 * [ 6: 4] port 1 LED 0..2 as GPIO 3..5 1959 * [10: 8] port 2 LED 0..2 as GPIO 6..8 1960 * [14:12] port 3 LED 0..2 as GPIO 9..11 1961 * [18:16] port 4 LED 0..2 as GPIO 12..14 1962 */ 1963 return BIT(offset + offset / 3); 1964 } 1965 1966 static int 1967 mt7530_gpio_get(struct gpio_chip *gc, unsigned int offset) 1968 { 1969 struct mt7530_priv *priv = gpiochip_get_data(gc); 1970 u32 bit = mt7530_gpio_to_bit(offset); 1971 1972 return !!(mt7530_read(priv, MT7530_LED_GPIO_DATA) & bit); 1973 } 1974 1975 static void 1976 mt7530_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) 1977 { 1978 struct mt7530_priv *priv = gpiochip_get_data(gc); 1979 u32 bit = mt7530_gpio_to_bit(offset); 1980 1981 if (value) 1982 mt7530_set(priv, MT7530_LED_GPIO_DATA, bit); 1983 else 1984 mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit); 1985 } 1986 1987 static int 1988 mt7530_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) 1989 { 1990 struct mt7530_priv *priv = gpiochip_get_data(gc); 1991 u32 bit = mt7530_gpio_to_bit(offset); 1992 1993 return (mt7530_read(priv, MT7530_LED_GPIO_DIR) & bit) ? 1994 GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN; 1995 } 1996 1997 static int 1998 mt7530_gpio_direction_input(struct gpio_chip *gc, unsigned int offset) 1999 { 2000 struct mt7530_priv *priv = gpiochip_get_data(gc); 2001 u32 bit = mt7530_gpio_to_bit(offset); 2002 2003 mt7530_clear(priv, MT7530_LED_GPIO_OE, bit); 2004 mt7530_clear(priv, MT7530_LED_GPIO_DIR, bit); 2005 2006 return 0; 2007 } 2008 2009 static int 2010 mt7530_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value) 2011 { 2012 struct mt7530_priv *priv = gpiochip_get_data(gc); 2013 u32 bit = mt7530_gpio_to_bit(offset); 2014 2015 mt7530_set(priv, MT7530_LED_GPIO_DIR, bit); 2016 2017 if (value) 2018 mt7530_set(priv, MT7530_LED_GPIO_DATA, bit); 2019 else 2020 mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit); 2021 2022 mt7530_set(priv, MT7530_LED_GPIO_OE, bit); 2023 2024 return 0; 2025 } 2026 2027 static int 2028 mt7530_setup_gpio(struct mt7530_priv *priv) 2029 { 2030 struct device *dev = priv->dev; 2031 struct gpio_chip *gc; 2032 2033 gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL); 2034 if (!gc) 2035 return -ENOMEM; 2036 2037 mt7530_write(priv, MT7530_LED_GPIO_OE, 0); 2038 mt7530_write(priv, MT7530_LED_GPIO_DIR, 0); 2039 mt7530_write(priv, MT7530_LED_IO_MODE, 0); 2040 2041 gc->label = "mt7530"; 2042 gc->parent = dev; 2043 gc->owner = THIS_MODULE; 2044 gc->get_direction = mt7530_gpio_get_direction; 2045 gc->direction_input = mt7530_gpio_direction_input; 2046 gc->direction_output = mt7530_gpio_direction_output; 2047 gc->get = mt7530_gpio_get; 2048 gc->set = mt7530_gpio_set; 2049 gc->base = -1; 2050 gc->ngpio = 15; 2051 gc->can_sleep = true; 2052 2053 return devm_gpiochip_add_data(dev, gc, priv); 2054 } 2055 #endif /* CONFIG_GPIOLIB */ 2056 2057 static irqreturn_t 2058 mt7530_irq_thread_fn(int irq, void *dev_id) 2059 { 2060 struct mt7530_priv *priv = dev_id; 2061 bool handled = false; 2062 u32 val; 2063 int p; 2064 2065 mt7530_mutex_lock(priv); 2066 val = mt7530_mii_read(priv, MT7530_SYS_INT_STS); 2067 mt7530_mii_write(priv, MT7530_SYS_INT_STS, val); 2068 mt7530_mutex_unlock(priv); 2069 2070 for (p = 0; p < MT7530_NUM_PHYS; p++) { 2071 if (BIT(p) & val) { 2072 unsigned int irq; 2073 2074 irq = irq_find_mapping(priv->irq_domain, p); 2075 handle_nested_irq(irq); 2076 handled = true; 2077 } 2078 } 2079 2080 return IRQ_RETVAL(handled); 2081 } 2082 2083 static void 2084 mt7530_irq_mask(struct irq_data *d) 2085 { 2086 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); 2087 2088 priv->irq_enable &= ~BIT(d->hwirq); 2089 } 2090 2091 static void 2092 mt7530_irq_unmask(struct irq_data *d) 2093 { 2094 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); 2095 2096 priv->irq_enable |= BIT(d->hwirq); 2097 } 2098 2099 static void 2100 mt7530_irq_bus_lock(struct irq_data *d) 2101 { 2102 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); 2103 2104 mt7530_mutex_lock(priv); 2105 } 2106 2107 static void 2108 mt7530_irq_bus_sync_unlock(struct irq_data *d) 2109 { 2110 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); 2111 2112 mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable); 2113 mt7530_mutex_unlock(priv); 2114 } 2115 2116 static struct irq_chip mt7530_irq_chip = { 2117 .name = KBUILD_MODNAME, 2118 .irq_mask = mt7530_irq_mask, 2119 .irq_unmask = mt7530_irq_unmask, 2120 .irq_bus_lock = mt7530_irq_bus_lock, 2121 .irq_bus_sync_unlock = mt7530_irq_bus_sync_unlock, 2122 }; 2123 2124 static int 2125 mt7530_irq_map(struct irq_domain *domain, unsigned int irq, 2126 irq_hw_number_t hwirq) 2127 { 2128 irq_set_chip_data(irq, domain->host_data); 2129 irq_set_chip_and_handler(irq, &mt7530_irq_chip, handle_simple_irq); 2130 irq_set_nested_thread(irq, true); 2131 irq_set_noprobe(irq); 2132 2133 return 0; 2134 } 2135 2136 static const struct irq_domain_ops mt7530_irq_domain_ops = { 2137 .map = mt7530_irq_map, 2138 .xlate = irq_domain_xlate_onecell, 2139 }; 2140 2141 static void 2142 mt7988_irq_mask(struct irq_data *d) 2143 { 2144 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); 2145 2146 priv->irq_enable &= ~BIT(d->hwirq); 2147 mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable); 2148 } 2149 2150 static void 2151 mt7988_irq_unmask(struct irq_data *d) 2152 { 2153 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); 2154 2155 priv->irq_enable |= BIT(d->hwirq); 2156 mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable); 2157 } 2158 2159 static struct irq_chip mt7988_irq_chip = { 2160 .name = KBUILD_MODNAME, 2161 .irq_mask = mt7988_irq_mask, 2162 .irq_unmask = mt7988_irq_unmask, 2163 }; 2164 2165 static int 2166 mt7988_irq_map(struct irq_domain *domain, unsigned int irq, 2167 irq_hw_number_t hwirq) 2168 { 2169 irq_set_chip_data(irq, domain->host_data); 2170 irq_set_chip_and_handler(irq, &mt7988_irq_chip, handle_simple_irq); 2171 irq_set_nested_thread(irq, true); 2172 irq_set_noprobe(irq); 2173 2174 return 0; 2175 } 2176 2177 static const struct irq_domain_ops mt7988_irq_domain_ops = { 2178 .map = mt7988_irq_map, 2179 .xlate = irq_domain_xlate_onecell, 2180 }; 2181 2182 static void 2183 mt7530_setup_mdio_irq(struct mt7530_priv *priv) 2184 { 2185 struct dsa_switch *ds = priv->ds; 2186 int p; 2187 2188 for (p = 0; p < MT7530_NUM_PHYS; p++) { 2189 if (BIT(p) & ds->phys_mii_mask) { 2190 unsigned int irq; 2191 2192 irq = irq_create_mapping(priv->irq_domain, p); 2193 ds->user_mii_bus->irq[p] = irq; 2194 } 2195 } 2196 } 2197 2198 static int 2199 mt7530_setup_irq(struct mt7530_priv *priv) 2200 { 2201 struct device *dev = priv->dev; 2202 struct device_node *np = dev->of_node; 2203 int ret; 2204 2205 if (!of_property_read_bool(np, "interrupt-controller")) { 2206 dev_info(dev, "no interrupt support\n"); 2207 return 0; 2208 } 2209 2210 priv->irq = of_irq_get(np, 0); 2211 if (priv->irq <= 0) { 2212 dev_err(dev, "failed to get parent IRQ: %d\n", priv->irq); 2213 return priv->irq ? : -EINVAL; 2214 } 2215 2216 if (priv->id == ID_MT7988) 2217 priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS, 2218 &mt7988_irq_domain_ops, 2219 priv); 2220 else 2221 priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS, 2222 &mt7530_irq_domain_ops, 2223 priv); 2224 2225 if (!priv->irq_domain) { 2226 dev_err(dev, "failed to create IRQ domain\n"); 2227 return -ENOMEM; 2228 } 2229 2230 /* This register must be set for MT7530 to properly fire interrupts */ 2231 if (priv->id == ID_MT7530 || priv->id == ID_MT7621) 2232 mt7530_set(priv, MT7530_TOP_SIG_CTRL, TOP_SIG_CTRL_NORMAL); 2233 2234 ret = request_threaded_irq(priv->irq, NULL, mt7530_irq_thread_fn, 2235 IRQF_ONESHOT, KBUILD_MODNAME, priv); 2236 if (ret) { 2237 irq_domain_remove(priv->irq_domain); 2238 dev_err(dev, "failed to request IRQ: %d\n", ret); 2239 return ret; 2240 } 2241 2242 return 0; 2243 } 2244 2245 static void 2246 mt7530_free_mdio_irq(struct mt7530_priv *priv) 2247 { 2248 int p; 2249 2250 for (p = 0; p < MT7530_NUM_PHYS; p++) { 2251 if (BIT(p) & priv->ds->phys_mii_mask) { 2252 unsigned int irq; 2253 2254 irq = irq_find_mapping(priv->irq_domain, p); 2255 irq_dispose_mapping(irq); 2256 } 2257 } 2258 } 2259 2260 static void 2261 mt7530_free_irq_common(struct mt7530_priv *priv) 2262 { 2263 free_irq(priv->irq, priv); 2264 irq_domain_remove(priv->irq_domain); 2265 } 2266 2267 static void 2268 mt7530_free_irq(struct mt7530_priv *priv) 2269 { 2270 struct device_node *mnp, *np = priv->dev->of_node; 2271 2272 mnp = of_get_child_by_name(np, "mdio"); 2273 if (!mnp) 2274 mt7530_free_mdio_irq(priv); 2275 of_node_put(mnp); 2276 2277 mt7530_free_irq_common(priv); 2278 } 2279 2280 static int 2281 mt7530_setup_mdio(struct mt7530_priv *priv) 2282 { 2283 struct device_node *mnp, *np = priv->dev->of_node; 2284 struct dsa_switch *ds = priv->ds; 2285 struct device *dev = priv->dev; 2286 struct mii_bus *bus; 2287 static int idx; 2288 int ret = 0; 2289 2290 mnp = of_get_child_by_name(np, "mdio"); 2291 2292 if (mnp && !of_device_is_available(mnp)) 2293 goto out; 2294 2295 bus = devm_mdiobus_alloc(dev); 2296 if (!bus) { 2297 ret = -ENOMEM; 2298 goto out; 2299 } 2300 2301 if (!mnp) 2302 ds->user_mii_bus = bus; 2303 2304 bus->priv = priv; 2305 bus->name = KBUILD_MODNAME "-mii"; 2306 snprintf(bus->id, MII_BUS_ID_SIZE, KBUILD_MODNAME "-%d", idx++); 2307 bus->read = mt753x_phy_read_c22; 2308 bus->write = mt753x_phy_write_c22; 2309 bus->read_c45 = mt753x_phy_read_c45; 2310 bus->write_c45 = mt753x_phy_write_c45; 2311 bus->parent = dev; 2312 bus->phy_mask = ~ds->phys_mii_mask; 2313 2314 if (priv->irq && !mnp) 2315 mt7530_setup_mdio_irq(priv); 2316 2317 ret = devm_of_mdiobus_register(dev, bus, mnp); 2318 if (ret) { 2319 dev_err(dev, "failed to register MDIO bus: %d\n", ret); 2320 if (priv->irq && !mnp) 2321 mt7530_free_mdio_irq(priv); 2322 } 2323 2324 out: 2325 of_node_put(mnp); 2326 return ret; 2327 } 2328 2329 static int 2330 mt7530_setup(struct dsa_switch *ds) 2331 { 2332 struct mt7530_priv *priv = ds->priv; 2333 struct device_node *dn = NULL; 2334 struct device_node *phy_node; 2335 struct device_node *mac_np; 2336 struct mt7530_dummy_poll p; 2337 phy_interface_t interface; 2338 struct dsa_port *cpu_dp; 2339 u32 id, val; 2340 int ret, i; 2341 2342 /* The parent node of conduit netdev which holds the common system 2343 * controller also is the container for two GMACs nodes representing 2344 * as two netdev instances. 2345 */ 2346 dsa_switch_for_each_cpu_port(cpu_dp, ds) { 2347 dn = cpu_dp->conduit->dev.of_node->parent; 2348 /* It doesn't matter which CPU port is found first, 2349 * their conduits should share the same parent OF node 2350 */ 2351 break; 2352 } 2353 2354 if (!dn) { 2355 dev_err(ds->dev, "parent OF node of DSA conduit not found"); 2356 return -EINVAL; 2357 } 2358 2359 ds->assisted_learning_on_cpu_port = true; 2360 ds->mtu_enforcement_ingress = true; 2361 2362 if (priv->id == ID_MT7530) { 2363 regulator_set_voltage(priv->core_pwr, 1000000, 1000000); 2364 ret = regulator_enable(priv->core_pwr); 2365 if (ret < 0) { 2366 dev_err(priv->dev, 2367 "Failed to enable core power: %d\n", ret); 2368 return ret; 2369 } 2370 2371 regulator_set_voltage(priv->io_pwr, 3300000, 3300000); 2372 ret = regulator_enable(priv->io_pwr); 2373 if (ret < 0) { 2374 dev_err(priv->dev, "Failed to enable io pwr: %d\n", 2375 ret); 2376 return ret; 2377 } 2378 } 2379 2380 /* Reset whole chip through gpio pin or memory-mapped registers for 2381 * different type of hardware 2382 */ 2383 if (priv->mcm) { 2384 reset_control_assert(priv->rstc); 2385 usleep_range(5000, 5100); 2386 reset_control_deassert(priv->rstc); 2387 } else { 2388 gpiod_set_value_cansleep(priv->reset, 0); 2389 usleep_range(5000, 5100); 2390 gpiod_set_value_cansleep(priv->reset, 1); 2391 } 2392 2393 /* Waiting for MT7530 got to stable */ 2394 INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP); 2395 ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0, 2396 20, 1000000); 2397 if (ret < 0) { 2398 dev_err(priv->dev, "reset timeout\n"); 2399 return ret; 2400 } 2401 2402 id = mt7530_read(priv, MT7530_CREV); 2403 id >>= CHIP_NAME_SHIFT; 2404 if (id != MT7530_ID) { 2405 dev_err(priv->dev, "chip %x can't be supported\n", id); 2406 return -ENODEV; 2407 } 2408 2409 if ((val & HWTRAP_XTAL_MASK) == HWTRAP_XTAL_20MHZ) { 2410 dev_err(priv->dev, 2411 "MT7530 with a 20MHz XTAL is not supported!\n"); 2412 return -EINVAL; 2413 } 2414 2415 /* Reset the switch through internal reset */ 2416 mt7530_write(priv, MT7530_SYS_CTRL, 2417 SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST | 2418 SYS_CTRL_REG_RST); 2419 2420 /* Lower Tx driving for TRGMII path */ 2421 for (i = 0; i < NUM_TRGMII_CTRL; i++) 2422 mt7530_write(priv, MT7530_TRGMII_TD_ODT(i), 2423 TD_DM_DRVP(8) | TD_DM_DRVN(8)); 2424 2425 for (i = 0; i < NUM_TRGMII_CTRL; i++) 2426 mt7530_rmw(priv, MT7530_TRGMII_RD(i), 2427 RD_TAP_MASK, RD_TAP(16)); 2428 2429 /* Enable port 6 */ 2430 val = mt7530_read(priv, MT7530_MHWTRAP); 2431 val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS; 2432 val |= MHWTRAP_MANUAL; 2433 mt7530_write(priv, MT7530_MHWTRAP, val); 2434 2435 if ((val & HWTRAP_XTAL_MASK) == HWTRAP_XTAL_40MHZ) 2436 mt7530_pll_setup(priv); 2437 2438 mt753x_trap_frames(priv); 2439 2440 /* Enable and reset MIB counters */ 2441 mt7530_mib_reset(ds); 2442 2443 for (i = 0; i < MT7530_NUM_PORTS; i++) { 2444 /* Clear link settings and enable force mode to force link down 2445 * on all ports until they're enabled later. 2446 */ 2447 mt7530_rmw(priv, MT7530_PMCR_P(i), PMCR_LINK_SETTINGS_MASK | 2448 PMCR_FORCE_MODE, PMCR_FORCE_MODE); 2449 2450 /* Disable forwarding by default on all ports */ 2451 mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK, 2452 PCR_MATRIX_CLR); 2453 2454 /* Disable learning by default on all ports */ 2455 mt7530_set(priv, MT7530_PSC_P(i), SA_DIS); 2456 2457 if (dsa_is_cpu_port(ds, i)) { 2458 mt753x_cpu_port_enable(ds, i); 2459 } else { 2460 mt7530_port_disable(ds, i); 2461 2462 /* Set default PVID to 0 on all user ports */ 2463 mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK, 2464 G0_PORT_VID_DEF); 2465 } 2466 /* Enable consistent egress tag */ 2467 mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK, 2468 PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); 2469 } 2470 2471 /* Allow mirroring frames received on the local port (monitor port). */ 2472 mt7530_set(priv, MT753X_AGC, LOCAL_EN); 2473 2474 /* Setup VLAN ID 0 for VLAN-unaware bridges */ 2475 ret = mt7530_setup_vlan0(priv); 2476 if (ret) 2477 return ret; 2478 2479 /* Setup port 5 */ 2480 if (!dsa_is_unused_port(ds, 5)) { 2481 priv->p5_intf_sel = P5_INTF_SEL_GMAC5; 2482 } else { 2483 /* Scan the ethernet nodes. Look for GMAC1, lookup the used PHY. 2484 * Set priv->p5_intf_sel to the appropriate value if PHY muxing 2485 * is detected. 2486 */ 2487 for_each_child_of_node(dn, mac_np) { 2488 if (!of_device_is_compatible(mac_np, 2489 "mediatek,eth-mac")) 2490 continue; 2491 2492 ret = of_property_read_u32(mac_np, "reg", &id); 2493 if (ret < 0 || id != 1) 2494 continue; 2495 2496 phy_node = of_parse_phandle(mac_np, "phy-handle", 0); 2497 if (!phy_node) 2498 continue; 2499 2500 if (phy_node->parent == priv->dev->of_node->parent) { 2501 ret = of_get_phy_mode(mac_np, &interface); 2502 if (ret && ret != -ENODEV) { 2503 of_node_put(mac_np); 2504 of_node_put(phy_node); 2505 return ret; 2506 } 2507 id = of_mdio_parse_addr(ds->dev, phy_node); 2508 if (id == 0) 2509 priv->p5_intf_sel = P5_INTF_SEL_PHY_P0; 2510 if (id == 4) 2511 priv->p5_intf_sel = P5_INTF_SEL_PHY_P4; 2512 } 2513 of_node_put(mac_np); 2514 of_node_put(phy_node); 2515 break; 2516 } 2517 2518 if (priv->p5_intf_sel == P5_INTF_SEL_PHY_P0 || 2519 priv->p5_intf_sel == P5_INTF_SEL_PHY_P4) 2520 mt7530_setup_port5(ds, interface); 2521 } 2522 2523 #ifdef CONFIG_GPIOLIB 2524 if (of_property_read_bool(priv->dev->of_node, "gpio-controller")) { 2525 ret = mt7530_setup_gpio(priv); 2526 if (ret) 2527 return ret; 2528 } 2529 #endif /* CONFIG_GPIOLIB */ 2530 2531 /* Flush the FDB table */ 2532 ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL); 2533 if (ret < 0) 2534 return ret; 2535 2536 return 0; 2537 } 2538 2539 static int 2540 mt7531_setup_common(struct dsa_switch *ds) 2541 { 2542 struct mt7530_priv *priv = ds->priv; 2543 int ret, i; 2544 2545 mt753x_trap_frames(priv); 2546 2547 /* Enable and reset MIB counters */ 2548 mt7530_mib_reset(ds); 2549 2550 /* Disable flooding on all ports */ 2551 mt7530_clear(priv, MT7530_MFC, BC_FFP_MASK | UNM_FFP_MASK | 2552 UNU_FFP_MASK); 2553 2554 for (i = 0; i < MT7530_NUM_PORTS; i++) { 2555 /* Clear link settings and enable force mode to force link down 2556 * on all ports until they're enabled later. 2557 */ 2558 mt7530_rmw(priv, MT7530_PMCR_P(i), PMCR_LINK_SETTINGS_MASK | 2559 MT7531_FORCE_MODE, MT7531_FORCE_MODE); 2560 2561 /* Disable forwarding by default on all ports */ 2562 mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK, 2563 PCR_MATRIX_CLR); 2564 2565 /* Disable learning by default on all ports */ 2566 mt7530_set(priv, MT7530_PSC_P(i), SA_DIS); 2567 2568 mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR); 2569 2570 if (dsa_is_cpu_port(ds, i)) { 2571 mt753x_cpu_port_enable(ds, i); 2572 } else { 2573 mt7530_port_disable(ds, i); 2574 2575 /* Set default PVID to 0 on all user ports */ 2576 mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK, 2577 G0_PORT_VID_DEF); 2578 } 2579 2580 /* Enable consistent egress tag */ 2581 mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK, 2582 PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); 2583 } 2584 2585 /* Allow mirroring frames received on the local port (monitor port). */ 2586 mt7530_set(priv, MT753X_AGC, LOCAL_EN); 2587 2588 /* Flush the FDB table */ 2589 ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL); 2590 if (ret < 0) 2591 return ret; 2592 2593 return 0; 2594 } 2595 2596 static int 2597 mt7531_setup(struct dsa_switch *ds) 2598 { 2599 struct mt7530_priv *priv = ds->priv; 2600 struct mt7530_dummy_poll p; 2601 u32 val, id; 2602 int ret, i; 2603 2604 /* Reset whole chip through gpio pin or memory-mapped registers for 2605 * different type of hardware 2606 */ 2607 if (priv->mcm) { 2608 reset_control_assert(priv->rstc); 2609 usleep_range(5000, 5100); 2610 reset_control_deassert(priv->rstc); 2611 } else { 2612 gpiod_set_value_cansleep(priv->reset, 0); 2613 usleep_range(5000, 5100); 2614 gpiod_set_value_cansleep(priv->reset, 1); 2615 } 2616 2617 /* Waiting for MT7530 got to stable */ 2618 INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP); 2619 ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0, 2620 20, 1000000); 2621 if (ret < 0) { 2622 dev_err(priv->dev, "reset timeout\n"); 2623 return ret; 2624 } 2625 2626 id = mt7530_read(priv, MT7531_CREV); 2627 id >>= CHIP_NAME_SHIFT; 2628 2629 if (id != MT7531_ID) { 2630 dev_err(priv->dev, "chip %x can't be supported\n", id); 2631 return -ENODEV; 2632 } 2633 2634 /* MT7531AE has got two SGMII units. One for port 5, one for port 6. 2635 * MT7531BE has got only one SGMII unit which is for port 6. 2636 */ 2637 val = mt7530_read(priv, MT7531_TOP_SIG_SR); 2638 priv->p5_sgmii = !!(val & PAD_DUAL_SGMII_EN); 2639 2640 /* Force link down on all ports before internal reset */ 2641 for (i = 0; i < MT7530_NUM_PORTS; i++) 2642 mt7530_write(priv, MT7530_PMCR_P(i), MT7531_FORCE_LNK); 2643 2644 /* Reset the switch through internal reset */ 2645 mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_SW_RST | SYS_CTRL_REG_RST); 2646 2647 if (!priv->p5_sgmii) { 2648 mt7531_pll_setup(priv); 2649 } else { 2650 /* Let ds->user_mii_bus be able to access external phy. */ 2651 mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO11_RG_RXD2_MASK, 2652 MT7531_EXT_P_MDC_11); 2653 mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO12_RG_RXD3_MASK, 2654 MT7531_EXT_P_MDIO_12); 2655 } 2656 2657 if (!dsa_is_unused_port(ds, 5)) 2658 priv->p5_intf_sel = P5_INTF_SEL_GMAC5; 2659 2660 mt7530_rmw(priv, MT7531_GPIO_MODE0, MT7531_GPIO0_MASK, 2661 MT7531_GPIO0_INTERRUPT); 2662 2663 /* Enable Energy-Efficient Ethernet (EEE) and PHY core PLL, since 2664 * phy_device has not yet been created provided for 2665 * phy_[read,write]_mmd_indirect is called, we provide our own 2666 * mt7531_ind_mmd_phy_[read,write] to complete this function. 2667 */ 2668 val = mt7531_ind_c45_phy_read(priv, 2669 MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr), 2670 MDIO_MMD_VEND2, CORE_PLL_GROUP4); 2671 val |= MT7531_RG_SYSPLL_DMY2 | MT7531_PHY_PLL_BYPASS_MODE; 2672 val &= ~MT7531_PHY_PLL_OFF; 2673 mt7531_ind_c45_phy_write(priv, 2674 MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr), 2675 MDIO_MMD_VEND2, CORE_PLL_GROUP4, val); 2676 2677 /* Disable EEE advertisement on the switch PHYs. */ 2678 for (i = MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr); 2679 i < MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr) + MT7530_NUM_PHYS; 2680 i++) { 2681 mt7531_ind_c45_phy_write(priv, i, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 2682 0); 2683 } 2684 2685 mt7531_setup_common(ds); 2686 2687 /* Setup VLAN ID 0 for VLAN-unaware bridges */ 2688 ret = mt7530_setup_vlan0(priv); 2689 if (ret) 2690 return ret; 2691 2692 ds->assisted_learning_on_cpu_port = true; 2693 ds->mtu_enforcement_ingress = true; 2694 2695 return 0; 2696 } 2697 2698 static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port, 2699 struct phylink_config *config) 2700 { 2701 switch (port) { 2702 /* Ports which are connected to switch PHYs. There is no MII pinout. */ 2703 case 0 ... 4: 2704 __set_bit(PHY_INTERFACE_MODE_GMII, 2705 config->supported_interfaces); 2706 break; 2707 2708 /* Port 5 supports rgmii with delays, mii, and gmii. */ 2709 case 5: 2710 phy_interface_set_rgmii(config->supported_interfaces); 2711 __set_bit(PHY_INTERFACE_MODE_MII, 2712 config->supported_interfaces); 2713 __set_bit(PHY_INTERFACE_MODE_GMII, 2714 config->supported_interfaces); 2715 break; 2716 2717 /* Port 6 supports rgmii and trgmii. */ 2718 case 6: 2719 __set_bit(PHY_INTERFACE_MODE_RGMII, 2720 config->supported_interfaces); 2721 __set_bit(PHY_INTERFACE_MODE_TRGMII, 2722 config->supported_interfaces); 2723 break; 2724 } 2725 } 2726 2727 static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port, 2728 struct phylink_config *config) 2729 { 2730 struct mt7530_priv *priv = ds->priv; 2731 2732 switch (port) { 2733 /* Ports which are connected to switch PHYs. There is no MII pinout. */ 2734 case 0 ... 4: 2735 __set_bit(PHY_INTERFACE_MODE_GMII, 2736 config->supported_interfaces); 2737 break; 2738 2739 /* Port 5 supports rgmii with delays on MT7531BE, sgmii/802.3z on 2740 * MT7531AE. 2741 */ 2742 case 5: 2743 if (!priv->p5_sgmii) { 2744 phy_interface_set_rgmii(config->supported_interfaces); 2745 break; 2746 } 2747 fallthrough; 2748 2749 /* Port 6 supports sgmii/802.3z. */ 2750 case 6: 2751 __set_bit(PHY_INTERFACE_MODE_SGMII, 2752 config->supported_interfaces); 2753 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 2754 config->supported_interfaces); 2755 __set_bit(PHY_INTERFACE_MODE_2500BASEX, 2756 config->supported_interfaces); 2757 2758 config->mac_capabilities |= MAC_2500FD; 2759 break; 2760 } 2761 } 2762 2763 static void mt7988_mac_port_get_caps(struct dsa_switch *ds, int port, 2764 struct phylink_config *config) 2765 { 2766 switch (port) { 2767 /* Ports which are connected to switch PHYs. There is no MII pinout. */ 2768 case 0 ... 3: 2769 __set_bit(PHY_INTERFACE_MODE_INTERNAL, 2770 config->supported_interfaces); 2771 break; 2772 2773 /* Port 6 is connected to SoC's XGMII MAC. There is no MII pinout. */ 2774 case 6: 2775 __set_bit(PHY_INTERFACE_MODE_INTERNAL, 2776 config->supported_interfaces); 2777 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | 2778 MAC_10000FD; 2779 } 2780 } 2781 2782 static void 2783 mt7530_mac_config(struct dsa_switch *ds, int port, unsigned int mode, 2784 phy_interface_t interface) 2785 { 2786 struct mt7530_priv *priv = ds->priv; 2787 2788 if (port == 5) 2789 mt7530_setup_port5(priv->ds, interface); 2790 else if (port == 6) 2791 mt7530_setup_port6(priv->ds, interface); 2792 } 2793 2794 static void mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port, 2795 phy_interface_t interface, 2796 struct phy_device *phydev) 2797 { 2798 u32 val; 2799 2800 val = mt7530_read(priv, MT7531_CLKGEN_CTRL); 2801 val |= GP_CLK_EN; 2802 val &= ~GP_MODE_MASK; 2803 val |= GP_MODE(MT7531_GP_MODE_RGMII); 2804 val &= ~CLK_SKEW_IN_MASK; 2805 val |= CLK_SKEW_IN(MT7531_CLK_SKEW_NO_CHG); 2806 val &= ~CLK_SKEW_OUT_MASK; 2807 val |= CLK_SKEW_OUT(MT7531_CLK_SKEW_NO_CHG); 2808 val |= TXCLK_NO_REVERSE | RXCLK_NO_DELAY; 2809 2810 /* Do not adjust rgmii delay when vendor phy driver presents. */ 2811 if (!phydev || phy_driver_is_genphy(phydev)) { 2812 val &= ~(TXCLK_NO_REVERSE | RXCLK_NO_DELAY); 2813 switch (interface) { 2814 case PHY_INTERFACE_MODE_RGMII: 2815 val |= TXCLK_NO_REVERSE; 2816 val |= RXCLK_NO_DELAY; 2817 break; 2818 case PHY_INTERFACE_MODE_RGMII_RXID: 2819 val |= TXCLK_NO_REVERSE; 2820 break; 2821 case PHY_INTERFACE_MODE_RGMII_TXID: 2822 val |= RXCLK_NO_DELAY; 2823 break; 2824 case PHY_INTERFACE_MODE_RGMII_ID: 2825 break; 2826 default: 2827 break; 2828 } 2829 } 2830 2831 mt7530_write(priv, MT7531_CLKGEN_CTRL, val); 2832 } 2833 2834 static void 2835 mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode, 2836 phy_interface_t interface) 2837 { 2838 struct mt7530_priv *priv = ds->priv; 2839 struct phy_device *phydev; 2840 struct dsa_port *dp; 2841 2842 if (phy_interface_mode_is_rgmii(interface)) { 2843 dp = dsa_to_port(ds, port); 2844 phydev = dp->user->phydev; 2845 mt7531_rgmii_setup(priv, port, interface, phydev); 2846 } 2847 } 2848 2849 static struct phylink_pcs * 2850 mt753x_phylink_mac_select_pcs(struct phylink_config *config, 2851 phy_interface_t interface) 2852 { 2853 struct dsa_port *dp = dsa_phylink_to_port(config); 2854 struct mt7530_priv *priv = dp->ds->priv; 2855 2856 switch (interface) { 2857 case PHY_INTERFACE_MODE_TRGMII: 2858 return &priv->pcs[dp->index].pcs; 2859 case PHY_INTERFACE_MODE_SGMII: 2860 case PHY_INTERFACE_MODE_1000BASEX: 2861 case PHY_INTERFACE_MODE_2500BASEX: 2862 return priv->ports[dp->index].sgmii_pcs; 2863 default: 2864 return NULL; 2865 } 2866 } 2867 2868 static void 2869 mt753x_phylink_mac_config(struct phylink_config *config, unsigned int mode, 2870 const struct phylink_link_state *state) 2871 { 2872 struct dsa_port *dp = dsa_phylink_to_port(config); 2873 struct dsa_switch *ds = dp->ds; 2874 struct mt7530_priv *priv; 2875 int port = dp->index; 2876 2877 priv = ds->priv; 2878 2879 if ((port == 5 || port == 6) && priv->info->mac_port_config) 2880 priv->info->mac_port_config(ds, port, mode, state->interface); 2881 2882 /* Are we connected to external phy */ 2883 if (port == 5 && dsa_is_user_port(ds, 5)) 2884 mt7530_set(priv, MT7530_PMCR_P(port), PMCR_EXT_PHY); 2885 } 2886 2887 static void mt753x_phylink_mac_link_down(struct phylink_config *config, 2888 unsigned int mode, 2889 phy_interface_t interface) 2890 { 2891 struct dsa_port *dp = dsa_phylink_to_port(config); 2892 struct mt7530_priv *priv = dp->ds->priv; 2893 2894 mt7530_clear(priv, MT7530_PMCR_P(dp->index), PMCR_LINK_SETTINGS_MASK); 2895 } 2896 2897 static void mt753x_phylink_mac_link_up(struct phylink_config *config, 2898 struct phy_device *phydev, 2899 unsigned int mode, 2900 phy_interface_t interface, 2901 int speed, int duplex, 2902 bool tx_pause, bool rx_pause) 2903 { 2904 struct dsa_port *dp = dsa_phylink_to_port(config); 2905 struct mt7530_priv *priv = dp->ds->priv; 2906 u32 mcr; 2907 2908 mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK; 2909 2910 switch (speed) { 2911 case SPEED_1000: 2912 case SPEED_2500: 2913 case SPEED_10000: 2914 mcr |= PMCR_FORCE_SPEED_1000; 2915 break; 2916 case SPEED_100: 2917 mcr |= PMCR_FORCE_SPEED_100; 2918 break; 2919 } 2920 if (duplex == DUPLEX_FULL) { 2921 mcr |= PMCR_FORCE_FDX; 2922 if (tx_pause) 2923 mcr |= PMCR_TX_FC_EN; 2924 if (rx_pause) 2925 mcr |= PMCR_RX_FC_EN; 2926 } 2927 2928 if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, false) >= 0) { 2929 switch (speed) { 2930 case SPEED_1000: 2931 case SPEED_2500: 2932 mcr |= PMCR_FORCE_EEE1G; 2933 break; 2934 case SPEED_100: 2935 mcr |= PMCR_FORCE_EEE100; 2936 break; 2937 } 2938 } 2939 2940 mt7530_set(priv, MT7530_PMCR_P(dp->index), mcr); 2941 } 2942 2943 static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port, 2944 struct phylink_config *config) 2945 { 2946 struct mt7530_priv *priv = ds->priv; 2947 2948 /* This switch only supports full-duplex at 1Gbps */ 2949 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | 2950 MAC_10 | MAC_100 | MAC_1000FD; 2951 2952 priv->info->mac_port_get_caps(ds, port, config); 2953 } 2954 2955 static int mt753x_pcs_validate(struct phylink_pcs *pcs, 2956 unsigned long *supported, 2957 const struct phylink_link_state *state) 2958 { 2959 /* Autonegotiation is not supported in TRGMII nor 802.3z modes */ 2960 if (state->interface == PHY_INTERFACE_MODE_TRGMII || 2961 phy_interface_mode_is_8023z(state->interface)) 2962 phylink_clear(supported, Autoneg); 2963 2964 return 0; 2965 } 2966 2967 static void mt7530_pcs_get_state(struct phylink_pcs *pcs, 2968 struct phylink_link_state *state) 2969 { 2970 struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv; 2971 int port = pcs_to_mt753x_pcs(pcs)->port; 2972 u32 pmsr; 2973 2974 pmsr = mt7530_read(priv, MT7530_PMSR_P(port)); 2975 2976 state->link = (pmsr & PMSR_LINK); 2977 state->an_complete = state->link; 2978 state->duplex = !!(pmsr & PMSR_DPX); 2979 2980 switch (pmsr & PMSR_SPEED_MASK) { 2981 case PMSR_SPEED_10: 2982 state->speed = SPEED_10; 2983 break; 2984 case PMSR_SPEED_100: 2985 state->speed = SPEED_100; 2986 break; 2987 case PMSR_SPEED_1000: 2988 state->speed = SPEED_1000; 2989 break; 2990 default: 2991 state->speed = SPEED_UNKNOWN; 2992 break; 2993 } 2994 2995 state->pause &= ~(MLO_PAUSE_RX | MLO_PAUSE_TX); 2996 if (pmsr & PMSR_RX_FC) 2997 state->pause |= MLO_PAUSE_RX; 2998 if (pmsr & PMSR_TX_FC) 2999 state->pause |= MLO_PAUSE_TX; 3000 } 3001 3002 static int mt753x_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, 3003 phy_interface_t interface, 3004 const unsigned long *advertising, 3005 bool permit_pause_to_mac) 3006 { 3007 return 0; 3008 } 3009 3010 static void mt7530_pcs_an_restart(struct phylink_pcs *pcs) 3011 { 3012 } 3013 3014 static const struct phylink_pcs_ops mt7530_pcs_ops = { 3015 .pcs_validate = mt753x_pcs_validate, 3016 .pcs_get_state = mt7530_pcs_get_state, 3017 .pcs_config = mt753x_pcs_config, 3018 .pcs_an_restart = mt7530_pcs_an_restart, 3019 }; 3020 3021 static int 3022 mt753x_setup(struct dsa_switch *ds) 3023 { 3024 struct mt7530_priv *priv = ds->priv; 3025 int ret = priv->info->sw_setup(ds); 3026 int i; 3027 3028 if (ret) 3029 return ret; 3030 3031 ret = mt7530_setup_irq(priv); 3032 if (ret) 3033 return ret; 3034 3035 ret = mt7530_setup_mdio(priv); 3036 if (ret && priv->irq) 3037 mt7530_free_irq_common(priv); 3038 3039 /* Initialise the PCS devices */ 3040 for (i = 0; i < priv->ds->num_ports; i++) { 3041 priv->pcs[i].pcs.ops = priv->info->pcs_ops; 3042 priv->pcs[i].pcs.neg_mode = true; 3043 priv->pcs[i].priv = priv; 3044 priv->pcs[i].port = i; 3045 } 3046 3047 if (priv->create_sgmii) { 3048 ret = priv->create_sgmii(priv); 3049 if (ret && priv->irq) 3050 mt7530_free_irq(priv); 3051 } 3052 3053 return ret; 3054 } 3055 3056 static int mt753x_get_mac_eee(struct dsa_switch *ds, int port, 3057 struct ethtool_keee *e) 3058 { 3059 struct mt7530_priv *priv = ds->priv; 3060 u32 eeecr = mt7530_read(priv, MT7530_PMEEECR_P(port)); 3061 3062 e->tx_lpi_enabled = !(eeecr & LPI_MODE_EN); 3063 e->tx_lpi_timer = GET_LPI_THRESH(eeecr); 3064 3065 return 0; 3066 } 3067 3068 static int mt753x_set_mac_eee(struct dsa_switch *ds, int port, 3069 struct ethtool_keee *e) 3070 { 3071 struct mt7530_priv *priv = ds->priv; 3072 u32 set, mask = LPI_THRESH_MASK | LPI_MODE_EN; 3073 3074 if (e->tx_lpi_timer > 0xFFF) 3075 return -EINVAL; 3076 3077 set = SET_LPI_THRESH(e->tx_lpi_timer); 3078 if (!e->tx_lpi_enabled) 3079 /* Force LPI Mode without a delay */ 3080 set |= LPI_MODE_EN; 3081 mt7530_rmw(priv, MT7530_PMEEECR_P(port), mask, set); 3082 3083 return 0; 3084 } 3085 3086 static void 3087 mt753x_conduit_state_change(struct dsa_switch *ds, 3088 const struct net_device *conduit, 3089 bool operational) 3090 { 3091 struct dsa_port *cpu_dp = conduit->dsa_ptr; 3092 struct mt7530_priv *priv = ds->priv; 3093 int val = 0; 3094 u8 mask; 3095 3096 /* Set the CPU port to trap frames to for MT7530. Trapped frames will be 3097 * forwarded to the numerically smallest CPU port whose conduit 3098 * interface is up. 3099 */ 3100 if (priv->id != ID_MT7530 && priv->id != ID_MT7621) 3101 return; 3102 3103 mask = BIT(cpu_dp->index); 3104 3105 if (operational) 3106 priv->active_cpu_ports |= mask; 3107 else 3108 priv->active_cpu_ports &= ~mask; 3109 3110 if (priv->active_cpu_ports) 3111 val = CPU_EN | CPU_PORT(__ffs(priv->active_cpu_ports)); 3112 3113 mt7530_rmw(priv, MT7530_MFC, CPU_EN | CPU_PORT_MASK, val); 3114 } 3115 3116 static int mt7988_setup(struct dsa_switch *ds) 3117 { 3118 struct mt7530_priv *priv = ds->priv; 3119 3120 /* Reset the switch */ 3121 reset_control_assert(priv->rstc); 3122 usleep_range(20, 50); 3123 reset_control_deassert(priv->rstc); 3124 usleep_range(20, 50); 3125 3126 /* Reset the switch PHYs */ 3127 mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_PHY_RST); 3128 3129 return mt7531_setup_common(ds); 3130 } 3131 3132 const struct dsa_switch_ops mt7530_switch_ops = { 3133 .get_tag_protocol = mtk_get_tag_protocol, 3134 .setup = mt753x_setup, 3135 .preferred_default_local_cpu_port = mt753x_preferred_default_local_cpu_port, 3136 .get_strings = mt7530_get_strings, 3137 .get_ethtool_stats = mt7530_get_ethtool_stats, 3138 .get_sset_count = mt7530_get_sset_count, 3139 .set_ageing_time = mt7530_set_ageing_time, 3140 .port_enable = mt7530_port_enable, 3141 .port_disable = mt7530_port_disable, 3142 .port_change_mtu = mt7530_port_change_mtu, 3143 .port_max_mtu = mt7530_port_max_mtu, 3144 .port_stp_state_set = mt7530_stp_state_set, 3145 .port_pre_bridge_flags = mt7530_port_pre_bridge_flags, 3146 .port_bridge_flags = mt7530_port_bridge_flags, 3147 .port_bridge_join = mt7530_port_bridge_join, 3148 .port_bridge_leave = mt7530_port_bridge_leave, 3149 .port_fdb_add = mt7530_port_fdb_add, 3150 .port_fdb_del = mt7530_port_fdb_del, 3151 .port_fdb_dump = mt7530_port_fdb_dump, 3152 .port_mdb_add = mt7530_port_mdb_add, 3153 .port_mdb_del = mt7530_port_mdb_del, 3154 .port_vlan_filtering = mt7530_port_vlan_filtering, 3155 .port_vlan_add = mt7530_port_vlan_add, 3156 .port_vlan_del = mt7530_port_vlan_del, 3157 .port_mirror_add = mt753x_port_mirror_add, 3158 .port_mirror_del = mt753x_port_mirror_del, 3159 .phylink_get_caps = mt753x_phylink_get_caps, 3160 .get_mac_eee = mt753x_get_mac_eee, 3161 .set_mac_eee = mt753x_set_mac_eee, 3162 .conduit_state_change = mt753x_conduit_state_change, 3163 }; 3164 EXPORT_SYMBOL_GPL(mt7530_switch_ops); 3165 3166 static const struct phylink_mac_ops mt753x_phylink_mac_ops = { 3167 .mac_select_pcs = mt753x_phylink_mac_select_pcs, 3168 .mac_config = mt753x_phylink_mac_config, 3169 .mac_link_down = mt753x_phylink_mac_link_down, 3170 .mac_link_up = mt753x_phylink_mac_link_up, 3171 }; 3172 3173 const struct mt753x_info mt753x_table[] = { 3174 [ID_MT7621] = { 3175 .id = ID_MT7621, 3176 .pcs_ops = &mt7530_pcs_ops, 3177 .sw_setup = mt7530_setup, 3178 .phy_read_c22 = mt7530_phy_read_c22, 3179 .phy_write_c22 = mt7530_phy_write_c22, 3180 .phy_read_c45 = mt7530_phy_read_c45, 3181 .phy_write_c45 = mt7530_phy_write_c45, 3182 .mac_port_get_caps = mt7530_mac_port_get_caps, 3183 .mac_port_config = mt7530_mac_config, 3184 }, 3185 [ID_MT7530] = { 3186 .id = ID_MT7530, 3187 .pcs_ops = &mt7530_pcs_ops, 3188 .sw_setup = mt7530_setup, 3189 .phy_read_c22 = mt7530_phy_read_c22, 3190 .phy_write_c22 = mt7530_phy_write_c22, 3191 .phy_read_c45 = mt7530_phy_read_c45, 3192 .phy_write_c45 = mt7530_phy_write_c45, 3193 .mac_port_get_caps = mt7530_mac_port_get_caps, 3194 .mac_port_config = mt7530_mac_config, 3195 }, 3196 [ID_MT7531] = { 3197 .id = ID_MT7531, 3198 .pcs_ops = &mt7530_pcs_ops, 3199 .sw_setup = mt7531_setup, 3200 .phy_read_c22 = mt7531_ind_c22_phy_read, 3201 .phy_write_c22 = mt7531_ind_c22_phy_write, 3202 .phy_read_c45 = mt7531_ind_c45_phy_read, 3203 .phy_write_c45 = mt7531_ind_c45_phy_write, 3204 .mac_port_get_caps = mt7531_mac_port_get_caps, 3205 .mac_port_config = mt7531_mac_config, 3206 }, 3207 [ID_MT7988] = { 3208 .id = ID_MT7988, 3209 .pcs_ops = &mt7530_pcs_ops, 3210 .sw_setup = mt7988_setup, 3211 .phy_read_c22 = mt7531_ind_c22_phy_read, 3212 .phy_write_c22 = mt7531_ind_c22_phy_write, 3213 .phy_read_c45 = mt7531_ind_c45_phy_read, 3214 .phy_write_c45 = mt7531_ind_c45_phy_write, 3215 .mac_port_get_caps = mt7988_mac_port_get_caps, 3216 }, 3217 }; 3218 EXPORT_SYMBOL_GPL(mt753x_table); 3219 3220 int 3221 mt7530_probe_common(struct mt7530_priv *priv) 3222 { 3223 struct device *dev = priv->dev; 3224 3225 priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL); 3226 if (!priv->ds) 3227 return -ENOMEM; 3228 3229 priv->ds->dev = dev; 3230 priv->ds->num_ports = MT7530_NUM_PORTS; 3231 3232 /* Get the hardware identifier from the devicetree node. 3233 * We will need it for some of the clock and regulator setup. 3234 */ 3235 priv->info = of_device_get_match_data(dev); 3236 if (!priv->info) 3237 return -EINVAL; 3238 3239 /* Sanity check if these required device operations are filled 3240 * properly. 3241 */ 3242 if (!priv->info->sw_setup || !priv->info->phy_read_c22 || 3243 !priv->info->phy_write_c22 || !priv->info->mac_port_get_caps) 3244 return -EINVAL; 3245 3246 priv->id = priv->info->id; 3247 priv->dev = dev; 3248 priv->ds->priv = priv; 3249 priv->ds->ops = &mt7530_switch_ops; 3250 priv->ds->phylink_mac_ops = &mt753x_phylink_mac_ops; 3251 mutex_init(&priv->reg_mutex); 3252 dev_set_drvdata(dev, priv); 3253 3254 return 0; 3255 } 3256 EXPORT_SYMBOL_GPL(mt7530_probe_common); 3257 3258 void 3259 mt7530_remove_common(struct mt7530_priv *priv) 3260 { 3261 if (priv->irq) 3262 mt7530_free_irq(priv); 3263 3264 dsa_unregister_switch(priv->ds); 3265 3266 mutex_destroy(&priv->reg_mutex); 3267 } 3268 EXPORT_SYMBOL_GPL(mt7530_remove_common); 3269 3270 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); 3271 MODULE_DESCRIPTION("Driver for Mediatek MT7530 Switch"); 3272 MODULE_LICENSE("GPL"); 3273