1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Mediatek MT7530 DSA Switch driver 4 * Copyright (C) 2017 Sean Wang <sean.wang@mediatek.com> 5 */ 6 #include <linux/etherdevice.h> 7 #include <linux/if_bridge.h> 8 #include <linux/iopoll.h> 9 #include <linux/mdio.h> 10 #include <linux/mfd/syscon.h> 11 #include <linux/module.h> 12 #include <linux/netdevice.h> 13 #include <linux/of_irq.h> 14 #include <linux/of_mdio.h> 15 #include <linux/of_net.h> 16 #include <linux/of_platform.h> 17 #include <linux/phylink.h> 18 #include <linux/regmap.h> 19 #include <linux/regulator/consumer.h> 20 #include <linux/reset.h> 21 #include <linux/gpio/consumer.h> 22 #include <linux/gpio/driver.h> 23 #include <net/dsa.h> 24 25 #include "mt7530.h" 26 27 static struct mt753x_pcs *pcs_to_mt753x_pcs(struct phylink_pcs *pcs) 28 { 29 return container_of(pcs, struct mt753x_pcs, pcs); 30 } 31 32 /* String, offset, and register size in bytes if different from 4 bytes */ 33 static const struct mt7530_mib_desc mt7530_mib[] = { 34 MIB_DESC(1, 0x00, "TxDrop"), 35 MIB_DESC(1, 0x04, "TxCrcErr"), 36 MIB_DESC(1, 0x08, "TxUnicast"), 37 MIB_DESC(1, 0x0c, "TxMulticast"), 38 MIB_DESC(1, 0x10, "TxBroadcast"), 39 MIB_DESC(1, 0x14, "TxCollision"), 40 MIB_DESC(1, 0x18, "TxSingleCollision"), 41 MIB_DESC(1, 0x1c, "TxMultipleCollision"), 42 MIB_DESC(1, 0x20, "TxDeferred"), 43 MIB_DESC(1, 0x24, "TxLateCollision"), 44 MIB_DESC(1, 0x28, "TxExcessiveCollistion"), 45 MIB_DESC(1, 0x2c, "TxPause"), 46 MIB_DESC(1, 0x30, "TxPktSz64"), 47 MIB_DESC(1, 0x34, "TxPktSz65To127"), 48 MIB_DESC(1, 0x38, "TxPktSz128To255"), 49 MIB_DESC(1, 0x3c, "TxPktSz256To511"), 50 MIB_DESC(1, 0x40, "TxPktSz512To1023"), 51 MIB_DESC(1, 0x44, "Tx1024ToMax"), 52 MIB_DESC(2, 0x48, "TxBytes"), 53 MIB_DESC(1, 0x60, "RxDrop"), 54 MIB_DESC(1, 0x64, "RxFiltering"), 55 MIB_DESC(1, 0x68, "RxUnicast"), 56 MIB_DESC(1, 0x6c, "RxMulticast"), 57 MIB_DESC(1, 0x70, "RxBroadcast"), 58 MIB_DESC(1, 0x74, "RxAlignErr"), 59 MIB_DESC(1, 0x78, "RxCrcErr"), 60 MIB_DESC(1, 0x7c, "RxUnderSizeErr"), 61 MIB_DESC(1, 0x80, "RxFragErr"), 62 MIB_DESC(1, 0x84, "RxOverSzErr"), 63 MIB_DESC(1, 0x88, "RxJabberErr"), 64 MIB_DESC(1, 0x8c, "RxPause"), 65 MIB_DESC(1, 0x90, "RxPktSz64"), 66 MIB_DESC(1, 0x94, "RxPktSz65To127"), 67 MIB_DESC(1, 0x98, "RxPktSz128To255"), 68 MIB_DESC(1, 0x9c, "RxPktSz256To511"), 69 MIB_DESC(1, 0xa0, "RxPktSz512To1023"), 70 MIB_DESC(1, 0xa4, "RxPktSz1024ToMax"), 71 MIB_DESC(2, 0xa8, "RxBytes"), 72 MIB_DESC(1, 0xb0, "RxCtrlDrop"), 73 MIB_DESC(1, 0xb4, "RxIngressDrop"), 74 MIB_DESC(1, 0xb8, "RxArlDrop"), 75 }; 76 77 /* Since phy_device has not yet been created and 78 * phy_{read,write}_mmd_indirect is not available, we provide our own 79 * core_{read,write}_mmd_indirect with core_{clear,write,set} wrappers 80 * to complete this function. 81 */ 82 static int 83 core_read_mmd_indirect(struct mt7530_priv *priv, int prtad, int devad) 84 { 85 struct mii_bus *bus = priv->bus; 86 int value, ret; 87 88 /* Write the desired MMD Devad */ 89 ret = bus->write(bus, 0, MII_MMD_CTRL, devad); 90 if (ret < 0) 91 goto err; 92 93 /* Write the desired MMD register address */ 94 ret = bus->write(bus, 0, MII_MMD_DATA, prtad); 95 if (ret < 0) 96 goto err; 97 98 /* Select the Function : DATA with no post increment */ 99 ret = bus->write(bus, 0, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR)); 100 if (ret < 0) 101 goto err; 102 103 /* Read the content of the MMD's selected register */ 104 value = bus->read(bus, 0, MII_MMD_DATA); 105 106 return value; 107 err: 108 dev_err(&bus->dev, "failed to read mmd register\n"); 109 110 return ret; 111 } 112 113 static int 114 core_write_mmd_indirect(struct mt7530_priv *priv, int prtad, 115 int devad, u32 data) 116 { 117 struct mii_bus *bus = priv->bus; 118 int ret; 119 120 /* Write the desired MMD Devad */ 121 ret = bus->write(bus, 0, MII_MMD_CTRL, devad); 122 if (ret < 0) 123 goto err; 124 125 /* Write the desired MMD register address */ 126 ret = bus->write(bus, 0, MII_MMD_DATA, prtad); 127 if (ret < 0) 128 goto err; 129 130 /* Select the Function : DATA with no post increment */ 131 ret = bus->write(bus, 0, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR)); 132 if (ret < 0) 133 goto err; 134 135 /* Write the data into MMD's selected register */ 136 ret = bus->write(bus, 0, MII_MMD_DATA, data); 137 err: 138 if (ret < 0) 139 dev_err(&bus->dev, 140 "failed to write mmd register\n"); 141 return ret; 142 } 143 144 static void 145 mt7530_mutex_lock(struct mt7530_priv *priv) 146 { 147 if (priv->bus) 148 mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED); 149 } 150 151 static void 152 mt7530_mutex_unlock(struct mt7530_priv *priv) 153 { 154 if (priv->bus) 155 mutex_unlock(&priv->bus->mdio_lock); 156 } 157 158 static void 159 core_write(struct mt7530_priv *priv, u32 reg, u32 val) 160 { 161 mt7530_mutex_lock(priv); 162 163 core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val); 164 165 mt7530_mutex_unlock(priv); 166 } 167 168 static void 169 core_rmw(struct mt7530_priv *priv, u32 reg, u32 mask, u32 set) 170 { 171 u32 val; 172 173 mt7530_mutex_lock(priv); 174 175 val = core_read_mmd_indirect(priv, reg, MDIO_MMD_VEND2); 176 val &= ~mask; 177 val |= set; 178 core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val); 179 180 mt7530_mutex_unlock(priv); 181 } 182 183 static void 184 core_set(struct mt7530_priv *priv, u32 reg, u32 val) 185 { 186 core_rmw(priv, reg, 0, val); 187 } 188 189 static void 190 core_clear(struct mt7530_priv *priv, u32 reg, u32 val) 191 { 192 core_rmw(priv, reg, val, 0); 193 } 194 195 static int 196 mt7530_mii_write(struct mt7530_priv *priv, u32 reg, u32 val) 197 { 198 int ret; 199 200 ret = regmap_write(priv->regmap, reg, val); 201 202 if (ret < 0) 203 dev_err(priv->dev, 204 "failed to write mt7530 register\n"); 205 206 return ret; 207 } 208 209 static u32 210 mt7530_mii_read(struct mt7530_priv *priv, u32 reg) 211 { 212 int ret; 213 u32 val; 214 215 ret = regmap_read(priv->regmap, reg, &val); 216 if (ret) { 217 WARN_ON_ONCE(1); 218 dev_err(priv->dev, 219 "failed to read mt7530 register\n"); 220 return 0; 221 } 222 223 return val; 224 } 225 226 static void 227 mt7530_write(struct mt7530_priv *priv, u32 reg, u32 val) 228 { 229 mt7530_mutex_lock(priv); 230 231 mt7530_mii_write(priv, reg, val); 232 233 mt7530_mutex_unlock(priv); 234 } 235 236 static u32 237 _mt7530_unlocked_read(struct mt7530_dummy_poll *p) 238 { 239 return mt7530_mii_read(p->priv, p->reg); 240 } 241 242 static u32 243 _mt7530_read(struct mt7530_dummy_poll *p) 244 { 245 u32 val; 246 247 mt7530_mutex_lock(p->priv); 248 249 val = mt7530_mii_read(p->priv, p->reg); 250 251 mt7530_mutex_unlock(p->priv); 252 253 return val; 254 } 255 256 static u32 257 mt7530_read(struct mt7530_priv *priv, u32 reg) 258 { 259 struct mt7530_dummy_poll p; 260 261 INIT_MT7530_DUMMY_POLL(&p, priv, reg); 262 return _mt7530_read(&p); 263 } 264 265 static void 266 mt7530_rmw(struct mt7530_priv *priv, u32 reg, 267 u32 mask, u32 set) 268 { 269 mt7530_mutex_lock(priv); 270 271 regmap_update_bits(priv->regmap, reg, mask, set); 272 273 mt7530_mutex_unlock(priv); 274 } 275 276 static void 277 mt7530_set(struct mt7530_priv *priv, u32 reg, u32 val) 278 { 279 mt7530_rmw(priv, reg, val, val); 280 } 281 282 static void 283 mt7530_clear(struct mt7530_priv *priv, u32 reg, u32 val) 284 { 285 mt7530_rmw(priv, reg, val, 0); 286 } 287 288 static int 289 mt7530_fdb_cmd(struct mt7530_priv *priv, enum mt7530_fdb_cmd cmd, u32 *rsp) 290 { 291 u32 val; 292 int ret; 293 struct mt7530_dummy_poll p; 294 295 /* Set the command operating upon the MAC address entries */ 296 val = ATC_BUSY | ATC_MAT(0) | cmd; 297 mt7530_write(priv, MT7530_ATC, val); 298 299 INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_ATC); 300 ret = readx_poll_timeout(_mt7530_read, &p, val, 301 !(val & ATC_BUSY), 20, 20000); 302 if (ret < 0) { 303 dev_err(priv->dev, "reset timeout\n"); 304 return ret; 305 } 306 307 /* Additional sanity for read command if the specified 308 * entry is invalid 309 */ 310 val = mt7530_read(priv, MT7530_ATC); 311 if ((cmd == MT7530_FDB_READ) && (val & ATC_INVALID)) 312 return -EINVAL; 313 314 if (rsp) 315 *rsp = val; 316 317 return 0; 318 } 319 320 static void 321 mt7530_fdb_read(struct mt7530_priv *priv, struct mt7530_fdb *fdb) 322 { 323 u32 reg[3]; 324 int i; 325 326 /* Read from ARL table into an array */ 327 for (i = 0; i < 3; i++) { 328 reg[i] = mt7530_read(priv, MT7530_TSRA1 + (i * 4)); 329 330 dev_dbg(priv->dev, "%s(%d) reg[%d]=0x%x\n", 331 __func__, __LINE__, i, reg[i]); 332 } 333 334 fdb->vid = (reg[1] >> CVID) & CVID_MASK; 335 fdb->aging = (reg[2] >> AGE_TIMER) & AGE_TIMER_MASK; 336 fdb->port_mask = (reg[2] >> PORT_MAP) & PORT_MAP_MASK; 337 fdb->mac[0] = (reg[0] >> MAC_BYTE_0) & MAC_BYTE_MASK; 338 fdb->mac[1] = (reg[0] >> MAC_BYTE_1) & MAC_BYTE_MASK; 339 fdb->mac[2] = (reg[0] >> MAC_BYTE_2) & MAC_BYTE_MASK; 340 fdb->mac[3] = (reg[0] >> MAC_BYTE_3) & MAC_BYTE_MASK; 341 fdb->mac[4] = (reg[1] >> MAC_BYTE_4) & MAC_BYTE_MASK; 342 fdb->mac[5] = (reg[1] >> MAC_BYTE_5) & MAC_BYTE_MASK; 343 fdb->noarp = ((reg[2] >> ENT_STATUS) & ENT_STATUS_MASK) == STATIC_ENT; 344 } 345 346 static void 347 mt7530_fdb_write(struct mt7530_priv *priv, u16 vid, 348 u8 port_mask, const u8 *mac, 349 u8 aging, u8 type) 350 { 351 u32 reg[3] = { 0 }; 352 int i; 353 354 reg[1] |= vid & CVID_MASK; 355 reg[1] |= ATA2_IVL; 356 reg[1] |= ATA2_FID(FID_BRIDGED); 357 reg[2] |= (aging & AGE_TIMER_MASK) << AGE_TIMER; 358 reg[2] |= (port_mask & PORT_MAP_MASK) << PORT_MAP; 359 /* STATIC_ENT indicate that entry is static wouldn't 360 * be aged out and STATIC_EMP specified as erasing an 361 * entry 362 */ 363 reg[2] |= (type & ENT_STATUS_MASK) << ENT_STATUS; 364 reg[1] |= mac[5] << MAC_BYTE_5; 365 reg[1] |= mac[4] << MAC_BYTE_4; 366 reg[0] |= mac[3] << MAC_BYTE_3; 367 reg[0] |= mac[2] << MAC_BYTE_2; 368 reg[0] |= mac[1] << MAC_BYTE_1; 369 reg[0] |= mac[0] << MAC_BYTE_0; 370 371 /* Write array into the ARL table */ 372 for (i = 0; i < 3; i++) 373 mt7530_write(priv, MT7530_ATA1 + (i * 4), reg[i]); 374 } 375 376 /* Set up switch core clock for MT7530 */ 377 static void mt7530_pll_setup(struct mt7530_priv *priv) 378 { 379 /* Disable core clock */ 380 core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); 381 382 /* Disable PLL */ 383 core_write(priv, CORE_GSWPLL_GRP1, 0); 384 385 /* Set core clock into 500Mhz */ 386 core_write(priv, CORE_GSWPLL_GRP2, 387 RG_GSWPLL_POSDIV_500M(1) | 388 RG_GSWPLL_FBKDIV_500M(25)); 389 390 /* Enable PLL */ 391 core_write(priv, CORE_GSWPLL_GRP1, 392 RG_GSWPLL_EN_PRE | 393 RG_GSWPLL_POSDIV_200M(2) | 394 RG_GSWPLL_FBKDIV_200M(32)); 395 396 udelay(20); 397 398 /* Enable core clock */ 399 core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); 400 } 401 402 /* If port 6 is available as a CPU port, always prefer that as the default, 403 * otherwise don't care. 404 */ 405 static struct dsa_port * 406 mt753x_preferred_default_local_cpu_port(struct dsa_switch *ds) 407 { 408 struct dsa_port *cpu_dp = dsa_to_port(ds, 6); 409 410 if (dsa_port_is_cpu(cpu_dp)) 411 return cpu_dp; 412 413 return NULL; 414 } 415 416 /* Setup port 6 interface mode and TRGMII TX circuit */ 417 static int 418 mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface) 419 { 420 struct mt7530_priv *priv = ds->priv; 421 u32 ncpo1, ssc_delta, trgint, xtal; 422 423 xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK; 424 425 if (xtal == HWTRAP_XTAL_20MHZ) { 426 dev_err(priv->dev, 427 "%s: MT7530 with a 20MHz XTAL is not supported!\n", 428 __func__); 429 return -EINVAL; 430 } 431 432 switch (interface) { 433 case PHY_INTERFACE_MODE_RGMII: 434 trgint = 0; 435 break; 436 case PHY_INTERFACE_MODE_TRGMII: 437 trgint = 1; 438 if (xtal == HWTRAP_XTAL_25MHZ) 439 ssc_delta = 0x57; 440 else 441 ssc_delta = 0x87; 442 if (priv->id == ID_MT7621) { 443 /* PLL frequency: 125MHz: 1.0GBit */ 444 if (xtal == HWTRAP_XTAL_40MHZ) 445 ncpo1 = 0x0640; 446 if (xtal == HWTRAP_XTAL_25MHZ) 447 ncpo1 = 0x0a00; 448 } else { /* PLL frequency: 250MHz: 2.0Gbit */ 449 if (xtal == HWTRAP_XTAL_40MHZ) 450 ncpo1 = 0x0c80; 451 if (xtal == HWTRAP_XTAL_25MHZ) 452 ncpo1 = 0x1400; 453 } 454 break; 455 default: 456 dev_err(priv->dev, "xMII interface %d not supported\n", 457 interface); 458 return -EINVAL; 459 } 460 461 mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK, 462 P6_INTF_MODE(trgint)); 463 464 if (trgint) { 465 /* Disable the MT7530 TRGMII clocks */ 466 core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN); 467 468 /* Setup the MT7530 TRGMII Tx Clock */ 469 core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1)); 470 core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0)); 471 core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta)); 472 core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta)); 473 core_write(priv, CORE_PLL_GROUP4, 474 RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN | 475 RG_SYSPLL_BIAS_LPF_EN); 476 core_write(priv, CORE_PLL_GROUP2, 477 RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN | 478 RG_SYSPLL_POSDIV(1)); 479 core_write(priv, CORE_PLL_GROUP7, 480 RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) | 481 RG_LCDDS_PWDB | RG_LCDDS_ISO_EN); 482 483 /* Enable the MT7530 TRGMII clocks */ 484 core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN); 485 } 486 487 return 0; 488 } 489 490 static bool mt7531_dual_sgmii_supported(struct mt7530_priv *priv) 491 { 492 u32 val; 493 494 val = mt7530_read(priv, MT7531_TOP_SIG_SR); 495 496 return (val & PAD_DUAL_SGMII_EN) != 0; 497 } 498 499 static int 500 mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface) 501 { 502 return 0; 503 } 504 505 static void 506 mt7531_pll_setup(struct mt7530_priv *priv) 507 { 508 u32 top_sig; 509 u32 hwstrap; 510 u32 xtal; 511 u32 val; 512 513 if (mt7531_dual_sgmii_supported(priv)) 514 return; 515 516 val = mt7530_read(priv, MT7531_CREV); 517 top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR); 518 hwstrap = mt7530_read(priv, MT7531_HWTRAP); 519 if ((val & CHIP_REV_M) > 0) 520 xtal = (top_sig & PAD_MCM_SMI_EN) ? HWTRAP_XTAL_FSEL_40MHZ : 521 HWTRAP_XTAL_FSEL_25MHZ; 522 else 523 xtal = hwstrap & HWTRAP_XTAL_FSEL_MASK; 524 525 /* Step 1 : Disable MT7531 COREPLL */ 526 val = mt7530_read(priv, MT7531_PLLGP_EN); 527 val &= ~EN_COREPLL; 528 mt7530_write(priv, MT7531_PLLGP_EN, val); 529 530 /* Step 2: switch to XTAL output */ 531 val = mt7530_read(priv, MT7531_PLLGP_EN); 532 val |= SW_CLKSW; 533 mt7530_write(priv, MT7531_PLLGP_EN, val); 534 535 val = mt7530_read(priv, MT7531_PLLGP_CR0); 536 val &= ~RG_COREPLL_EN; 537 mt7530_write(priv, MT7531_PLLGP_CR0, val); 538 539 /* Step 3: disable PLLGP and enable program PLLGP */ 540 val = mt7530_read(priv, MT7531_PLLGP_EN); 541 val |= SW_PLLGP; 542 mt7530_write(priv, MT7531_PLLGP_EN, val); 543 544 /* Step 4: program COREPLL output frequency to 500MHz */ 545 val = mt7530_read(priv, MT7531_PLLGP_CR0); 546 val &= ~RG_COREPLL_POSDIV_M; 547 val |= 2 << RG_COREPLL_POSDIV_S; 548 mt7530_write(priv, MT7531_PLLGP_CR0, val); 549 usleep_range(25, 35); 550 551 switch (xtal) { 552 case HWTRAP_XTAL_FSEL_25MHZ: 553 val = mt7530_read(priv, MT7531_PLLGP_CR0); 554 val &= ~RG_COREPLL_SDM_PCW_M; 555 val |= 0x140000 << RG_COREPLL_SDM_PCW_S; 556 mt7530_write(priv, MT7531_PLLGP_CR0, val); 557 break; 558 case HWTRAP_XTAL_FSEL_40MHZ: 559 val = mt7530_read(priv, MT7531_PLLGP_CR0); 560 val &= ~RG_COREPLL_SDM_PCW_M; 561 val |= 0x190000 << RG_COREPLL_SDM_PCW_S; 562 mt7530_write(priv, MT7531_PLLGP_CR0, val); 563 break; 564 } 565 566 /* Set feedback divide ratio update signal to high */ 567 val = mt7530_read(priv, MT7531_PLLGP_CR0); 568 val |= RG_COREPLL_SDM_PCW_CHG; 569 mt7530_write(priv, MT7531_PLLGP_CR0, val); 570 /* Wait for at least 16 XTAL clocks */ 571 usleep_range(10, 20); 572 573 /* Step 5: set feedback divide ratio update signal to low */ 574 val = mt7530_read(priv, MT7531_PLLGP_CR0); 575 val &= ~RG_COREPLL_SDM_PCW_CHG; 576 mt7530_write(priv, MT7531_PLLGP_CR0, val); 577 578 /* Enable 325M clock for SGMII */ 579 mt7530_write(priv, MT7531_ANA_PLLGP_CR5, 0xad0000); 580 581 /* Enable 250SSC clock for RGMII */ 582 mt7530_write(priv, MT7531_ANA_PLLGP_CR2, 0x4f40000); 583 584 /* Step 6: Enable MT7531 PLL */ 585 val = mt7530_read(priv, MT7531_PLLGP_CR0); 586 val |= RG_COREPLL_EN; 587 mt7530_write(priv, MT7531_PLLGP_CR0, val); 588 589 val = mt7530_read(priv, MT7531_PLLGP_EN); 590 val |= EN_COREPLL; 591 mt7530_write(priv, MT7531_PLLGP_EN, val); 592 usleep_range(25, 35); 593 } 594 595 static void 596 mt7530_mib_reset(struct dsa_switch *ds) 597 { 598 struct mt7530_priv *priv = ds->priv; 599 600 mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_FLUSH); 601 mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_ACTIVATE); 602 } 603 604 static int mt7530_phy_read_c22(struct mt7530_priv *priv, int port, int regnum) 605 { 606 return mdiobus_read_nested(priv->bus, port, regnum); 607 } 608 609 static int mt7530_phy_write_c22(struct mt7530_priv *priv, int port, int regnum, 610 u16 val) 611 { 612 return mdiobus_write_nested(priv->bus, port, regnum, val); 613 } 614 615 static int mt7530_phy_read_c45(struct mt7530_priv *priv, int port, 616 int devad, int regnum) 617 { 618 return mdiobus_c45_read_nested(priv->bus, port, devad, regnum); 619 } 620 621 static int mt7530_phy_write_c45(struct mt7530_priv *priv, int port, int devad, 622 int regnum, u16 val) 623 { 624 return mdiobus_c45_write_nested(priv->bus, port, devad, regnum, val); 625 } 626 627 static int 628 mt7531_ind_c45_phy_read(struct mt7530_priv *priv, int port, int devad, 629 int regnum) 630 { 631 struct mt7530_dummy_poll p; 632 u32 reg, val; 633 int ret; 634 635 INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC); 636 637 mt7530_mutex_lock(priv); 638 639 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 640 !(val & MT7531_PHY_ACS_ST), 20, 100000); 641 if (ret < 0) { 642 dev_err(priv->dev, "poll timeout\n"); 643 goto out; 644 } 645 646 reg = MT7531_MDIO_CL45_ADDR | MT7531_MDIO_PHY_ADDR(port) | 647 MT7531_MDIO_DEV_ADDR(devad) | regnum; 648 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST); 649 650 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 651 !(val & MT7531_PHY_ACS_ST), 20, 100000); 652 if (ret < 0) { 653 dev_err(priv->dev, "poll timeout\n"); 654 goto out; 655 } 656 657 reg = MT7531_MDIO_CL45_READ | MT7531_MDIO_PHY_ADDR(port) | 658 MT7531_MDIO_DEV_ADDR(devad); 659 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST); 660 661 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 662 !(val & MT7531_PHY_ACS_ST), 20, 100000); 663 if (ret < 0) { 664 dev_err(priv->dev, "poll timeout\n"); 665 goto out; 666 } 667 668 ret = val & MT7531_MDIO_RW_DATA_MASK; 669 out: 670 mt7530_mutex_unlock(priv); 671 672 return ret; 673 } 674 675 static int 676 mt7531_ind_c45_phy_write(struct mt7530_priv *priv, int port, int devad, 677 int regnum, u16 data) 678 { 679 struct mt7530_dummy_poll p; 680 u32 val, reg; 681 int ret; 682 683 INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC); 684 685 mt7530_mutex_lock(priv); 686 687 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 688 !(val & MT7531_PHY_ACS_ST), 20, 100000); 689 if (ret < 0) { 690 dev_err(priv->dev, "poll timeout\n"); 691 goto out; 692 } 693 694 reg = MT7531_MDIO_CL45_ADDR | MT7531_MDIO_PHY_ADDR(port) | 695 MT7531_MDIO_DEV_ADDR(devad) | regnum; 696 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST); 697 698 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 699 !(val & MT7531_PHY_ACS_ST), 20, 100000); 700 if (ret < 0) { 701 dev_err(priv->dev, "poll timeout\n"); 702 goto out; 703 } 704 705 reg = MT7531_MDIO_CL45_WRITE | MT7531_MDIO_PHY_ADDR(port) | 706 MT7531_MDIO_DEV_ADDR(devad) | data; 707 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST); 708 709 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 710 !(val & MT7531_PHY_ACS_ST), 20, 100000); 711 if (ret < 0) { 712 dev_err(priv->dev, "poll timeout\n"); 713 goto out; 714 } 715 716 out: 717 mt7530_mutex_unlock(priv); 718 719 return ret; 720 } 721 722 static int 723 mt7531_ind_c22_phy_read(struct mt7530_priv *priv, int port, int regnum) 724 { 725 struct mt7530_dummy_poll p; 726 int ret; 727 u32 val; 728 729 INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC); 730 731 mt7530_mutex_lock(priv); 732 733 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 734 !(val & MT7531_PHY_ACS_ST), 20, 100000); 735 if (ret < 0) { 736 dev_err(priv->dev, "poll timeout\n"); 737 goto out; 738 } 739 740 val = MT7531_MDIO_CL22_READ | MT7531_MDIO_PHY_ADDR(port) | 741 MT7531_MDIO_REG_ADDR(regnum); 742 743 mt7530_mii_write(priv, MT7531_PHY_IAC, val | MT7531_PHY_ACS_ST); 744 745 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 746 !(val & MT7531_PHY_ACS_ST), 20, 100000); 747 if (ret < 0) { 748 dev_err(priv->dev, "poll timeout\n"); 749 goto out; 750 } 751 752 ret = val & MT7531_MDIO_RW_DATA_MASK; 753 out: 754 mt7530_mutex_unlock(priv); 755 756 return ret; 757 } 758 759 static int 760 mt7531_ind_c22_phy_write(struct mt7530_priv *priv, int port, int regnum, 761 u16 data) 762 { 763 struct mt7530_dummy_poll p; 764 int ret; 765 u32 reg; 766 767 INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC); 768 769 mt7530_mutex_lock(priv); 770 771 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, reg, 772 !(reg & MT7531_PHY_ACS_ST), 20, 100000); 773 if (ret < 0) { 774 dev_err(priv->dev, "poll timeout\n"); 775 goto out; 776 } 777 778 reg = MT7531_MDIO_CL22_WRITE | MT7531_MDIO_PHY_ADDR(port) | 779 MT7531_MDIO_REG_ADDR(regnum) | data; 780 781 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST); 782 783 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, reg, 784 !(reg & MT7531_PHY_ACS_ST), 20, 100000); 785 if (ret < 0) { 786 dev_err(priv->dev, "poll timeout\n"); 787 goto out; 788 } 789 790 out: 791 mt7530_mutex_unlock(priv); 792 793 return ret; 794 } 795 796 static int 797 mt753x_phy_read_c22(struct mii_bus *bus, int port, int regnum) 798 { 799 struct mt7530_priv *priv = bus->priv; 800 801 return priv->info->phy_read_c22(priv, port, regnum); 802 } 803 804 static int 805 mt753x_phy_read_c45(struct mii_bus *bus, int port, int devad, int regnum) 806 { 807 struct mt7530_priv *priv = bus->priv; 808 809 return priv->info->phy_read_c45(priv, port, devad, regnum); 810 } 811 812 static int 813 mt753x_phy_write_c22(struct mii_bus *bus, int port, int regnum, u16 val) 814 { 815 struct mt7530_priv *priv = bus->priv; 816 817 return priv->info->phy_write_c22(priv, port, regnum, val); 818 } 819 820 static int 821 mt753x_phy_write_c45(struct mii_bus *bus, int port, int devad, int regnum, 822 u16 val) 823 { 824 struct mt7530_priv *priv = bus->priv; 825 826 return priv->info->phy_write_c45(priv, port, devad, regnum, val); 827 } 828 829 static void 830 mt7530_get_strings(struct dsa_switch *ds, int port, u32 stringset, 831 uint8_t *data) 832 { 833 int i; 834 835 if (stringset != ETH_SS_STATS) 836 return; 837 838 for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++) 839 ethtool_sprintf(&data, "%s", mt7530_mib[i].name); 840 } 841 842 static void 843 mt7530_get_ethtool_stats(struct dsa_switch *ds, int port, 844 uint64_t *data) 845 { 846 struct mt7530_priv *priv = ds->priv; 847 const struct mt7530_mib_desc *mib; 848 u32 reg, i; 849 u64 hi; 850 851 for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++) { 852 mib = &mt7530_mib[i]; 853 reg = MT7530_PORT_MIB_COUNTER(port) + mib->offset; 854 855 data[i] = mt7530_read(priv, reg); 856 if (mib->size == 2) { 857 hi = mt7530_read(priv, reg + 4); 858 data[i] |= hi << 32; 859 } 860 } 861 } 862 863 static int 864 mt7530_get_sset_count(struct dsa_switch *ds, int port, int sset) 865 { 866 if (sset != ETH_SS_STATS) 867 return 0; 868 869 return ARRAY_SIZE(mt7530_mib); 870 } 871 872 static int 873 mt7530_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) 874 { 875 struct mt7530_priv *priv = ds->priv; 876 unsigned int secs = msecs / 1000; 877 unsigned int tmp_age_count; 878 unsigned int error = -1; 879 unsigned int age_count; 880 unsigned int age_unit; 881 882 /* Applied timer is (AGE_CNT + 1) * (AGE_UNIT + 1) seconds */ 883 if (secs < 1 || secs > (AGE_CNT_MAX + 1) * (AGE_UNIT_MAX + 1)) 884 return -ERANGE; 885 886 /* iterate through all possible age_count to find the closest pair */ 887 for (tmp_age_count = 0; tmp_age_count <= AGE_CNT_MAX; ++tmp_age_count) { 888 unsigned int tmp_age_unit = secs / (tmp_age_count + 1) - 1; 889 890 if (tmp_age_unit <= AGE_UNIT_MAX) { 891 unsigned int tmp_error = secs - 892 (tmp_age_count + 1) * (tmp_age_unit + 1); 893 894 /* found a closer pair */ 895 if (error > tmp_error) { 896 error = tmp_error; 897 age_count = tmp_age_count; 898 age_unit = tmp_age_unit; 899 } 900 901 /* found the exact match, so break the loop */ 902 if (!error) 903 break; 904 } 905 } 906 907 mt7530_write(priv, MT7530_AAC, AGE_CNT(age_count) | AGE_UNIT(age_unit)); 908 909 return 0; 910 } 911 912 static const char *p5_intf_modes(unsigned int p5_interface) 913 { 914 switch (p5_interface) { 915 case P5_DISABLED: 916 return "DISABLED"; 917 case P5_INTF_SEL_PHY_P0: 918 return "PHY P0"; 919 case P5_INTF_SEL_PHY_P4: 920 return "PHY P4"; 921 case P5_INTF_SEL_GMAC5: 922 return "GMAC5"; 923 case P5_INTF_SEL_GMAC5_SGMII: 924 return "GMAC5_SGMII"; 925 default: 926 return "unknown"; 927 } 928 } 929 930 static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface) 931 { 932 struct mt7530_priv *priv = ds->priv; 933 u8 tx_delay = 0; 934 int val; 935 936 mutex_lock(&priv->reg_mutex); 937 938 val = mt7530_read(priv, MT7530_MHWTRAP); 939 940 val |= MHWTRAP_MANUAL | MHWTRAP_P5_MAC_SEL | MHWTRAP_P5_DIS; 941 val &= ~MHWTRAP_P5_RGMII_MODE & ~MHWTRAP_PHY0_SEL; 942 943 switch (priv->p5_intf_sel) { 944 case P5_INTF_SEL_PHY_P0: 945 /* MT7530_P5_MODE_GPHY_P0: 2nd GMAC -> P5 -> P0 */ 946 val |= MHWTRAP_PHY0_SEL; 947 fallthrough; 948 case P5_INTF_SEL_PHY_P4: 949 /* MT7530_P5_MODE_GPHY_P4: 2nd GMAC -> P5 -> P4 */ 950 val &= ~MHWTRAP_P5_MAC_SEL & ~MHWTRAP_P5_DIS; 951 952 /* Setup the MAC by default for the cpu port */ 953 mt7530_write(priv, MT7530_PMCR_P(5), 0x56300); 954 break; 955 case P5_INTF_SEL_GMAC5: 956 /* MT7530_P5_MODE_GMAC: P5 -> External phy or 2nd GMAC */ 957 val &= ~MHWTRAP_P5_DIS; 958 break; 959 case P5_DISABLED: 960 interface = PHY_INTERFACE_MODE_NA; 961 break; 962 default: 963 dev_err(ds->dev, "Unsupported p5_intf_sel %d\n", 964 priv->p5_intf_sel); 965 goto unlock_exit; 966 } 967 968 /* Setup RGMII settings */ 969 if (phy_interface_mode_is_rgmii(interface)) { 970 val |= MHWTRAP_P5_RGMII_MODE; 971 972 /* P5 RGMII RX Clock Control: delay setting for 1000M */ 973 mt7530_write(priv, MT7530_P5RGMIIRXCR, CSR_RGMII_EDGE_ALIGN); 974 975 /* Don't set delay in DSA mode */ 976 if (!dsa_is_dsa_port(priv->ds, 5) && 977 (interface == PHY_INTERFACE_MODE_RGMII_TXID || 978 interface == PHY_INTERFACE_MODE_RGMII_ID)) 979 tx_delay = 4; /* n * 0.5 ns */ 980 981 /* P5 RGMII TX Clock Control: delay x */ 982 mt7530_write(priv, MT7530_P5RGMIITXCR, 983 CSR_RGMII_TXC_CFG(0x10 + tx_delay)); 984 985 /* reduce P5 RGMII Tx driving, 8mA */ 986 mt7530_write(priv, MT7530_IO_DRV_CR, 987 P5_IO_CLK_DRV(1) | P5_IO_DATA_DRV(1)); 988 } 989 990 mt7530_write(priv, MT7530_MHWTRAP, val); 991 992 dev_dbg(ds->dev, "Setup P5, HWTRAP=0x%x, intf_sel=%s, phy-mode=%s\n", 993 val, p5_intf_modes(priv->p5_intf_sel), phy_modes(interface)); 994 995 priv->p5_interface = interface; 996 997 unlock_exit: 998 mutex_unlock(&priv->reg_mutex); 999 } 1000 1001 static void 1002 mt753x_trap_frames(struct mt7530_priv *priv) 1003 { 1004 /* Trap BPDUs to the CPU port(s) */ 1005 mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK, 1006 MT753X_BPDU_CPU_ONLY); 1007 1008 /* Trap 802.1X PAE frames to the CPU port(s) */ 1009 mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_PORT_FW_MASK, 1010 MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY)); 1011 1012 /* Trap LLDP frames with :0E MAC DA to the CPU port(s) */ 1013 mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_PORT_FW_MASK, 1014 MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY)); 1015 } 1016 1017 static int 1018 mt753x_cpu_port_enable(struct dsa_switch *ds, int port) 1019 { 1020 struct mt7530_priv *priv = ds->priv; 1021 int ret; 1022 1023 /* Setup max capability of CPU port at first */ 1024 if (priv->info->cpu_port_config) { 1025 ret = priv->info->cpu_port_config(ds, port); 1026 if (ret) 1027 return ret; 1028 } 1029 1030 /* Enable Mediatek header mode on the cpu port */ 1031 mt7530_write(priv, MT7530_PVC_P(port), 1032 PORT_SPEC_TAG); 1033 1034 /* Enable flooding on the CPU port */ 1035 mt7530_set(priv, MT7530_MFC, BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) | 1036 UNU_FFP(BIT(port))); 1037 1038 /* Set CPU port number */ 1039 if (priv->id == ID_MT7530 || priv->id == ID_MT7621) 1040 mt7530_rmw(priv, MT7530_MFC, CPU_MASK, CPU_EN | CPU_PORT(port)); 1041 1042 /* Add the CPU port to the CPU port bitmap for MT7531 and the switch on 1043 * the MT7988 SoC. Trapped frames will be forwarded to the CPU port that 1044 * is affine to the inbound user port. 1045 */ 1046 if (priv->id == ID_MT7531 || priv->id == ID_MT7988) 1047 mt7530_set(priv, MT7531_CFC, MT7531_CPU_PMAP(BIT(port))); 1048 1049 /* CPU port gets connected to all user ports of 1050 * the switch. 1051 */ 1052 mt7530_write(priv, MT7530_PCR_P(port), 1053 PCR_MATRIX(dsa_user_ports(priv->ds))); 1054 1055 /* Set to fallback mode for independent VLAN learning */ 1056 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, 1057 MT7530_PORT_FALLBACK_MODE); 1058 1059 return 0; 1060 } 1061 1062 static int 1063 mt7530_port_enable(struct dsa_switch *ds, int port, 1064 struct phy_device *phy) 1065 { 1066 struct dsa_port *dp = dsa_to_port(ds, port); 1067 struct mt7530_priv *priv = ds->priv; 1068 1069 mutex_lock(&priv->reg_mutex); 1070 1071 /* Allow the user port gets connected to the cpu port and also 1072 * restore the port matrix if the port is the member of a certain 1073 * bridge. 1074 */ 1075 if (dsa_port_is_user(dp)) { 1076 struct dsa_port *cpu_dp = dp->cpu_dp; 1077 1078 priv->ports[port].pm |= PCR_MATRIX(BIT(cpu_dp->index)); 1079 } 1080 priv->ports[port].enable = true; 1081 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK, 1082 priv->ports[port].pm); 1083 mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK); 1084 1085 mutex_unlock(&priv->reg_mutex); 1086 1087 return 0; 1088 } 1089 1090 static void 1091 mt7530_port_disable(struct dsa_switch *ds, int port) 1092 { 1093 struct mt7530_priv *priv = ds->priv; 1094 1095 mutex_lock(&priv->reg_mutex); 1096 1097 /* Clear up all port matrix which could be restored in the next 1098 * enablement for the port. 1099 */ 1100 priv->ports[port].enable = false; 1101 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK, 1102 PCR_MATRIX_CLR); 1103 mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK); 1104 1105 mutex_unlock(&priv->reg_mutex); 1106 } 1107 1108 static int 1109 mt7530_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) 1110 { 1111 struct mt7530_priv *priv = ds->priv; 1112 int length; 1113 u32 val; 1114 1115 /* When a new MTU is set, DSA always set the CPU port's MTU to the 1116 * largest MTU of the user ports. Because the switch only has a global 1117 * RX length register, only allowing CPU port here is enough. 1118 */ 1119 if (!dsa_is_cpu_port(ds, port)) 1120 return 0; 1121 1122 mt7530_mutex_lock(priv); 1123 1124 val = mt7530_mii_read(priv, MT7530_GMACCR); 1125 val &= ~MAX_RX_PKT_LEN_MASK; 1126 1127 /* RX length also includes Ethernet header, MTK tag, and FCS length */ 1128 length = new_mtu + ETH_HLEN + MTK_HDR_LEN + ETH_FCS_LEN; 1129 if (length <= 1522) { 1130 val |= MAX_RX_PKT_LEN_1522; 1131 } else if (length <= 1536) { 1132 val |= MAX_RX_PKT_LEN_1536; 1133 } else if (length <= 1552) { 1134 val |= MAX_RX_PKT_LEN_1552; 1135 } else { 1136 val &= ~MAX_RX_JUMBO_MASK; 1137 val |= MAX_RX_JUMBO(DIV_ROUND_UP(length, 1024)); 1138 val |= MAX_RX_PKT_LEN_JUMBO; 1139 } 1140 1141 mt7530_mii_write(priv, MT7530_GMACCR, val); 1142 1143 mt7530_mutex_unlock(priv); 1144 1145 return 0; 1146 } 1147 1148 static int 1149 mt7530_port_max_mtu(struct dsa_switch *ds, int port) 1150 { 1151 return MT7530_MAX_MTU; 1152 } 1153 1154 static void 1155 mt7530_stp_state_set(struct dsa_switch *ds, int port, u8 state) 1156 { 1157 struct mt7530_priv *priv = ds->priv; 1158 u32 stp_state; 1159 1160 switch (state) { 1161 case BR_STATE_DISABLED: 1162 stp_state = MT7530_STP_DISABLED; 1163 break; 1164 case BR_STATE_BLOCKING: 1165 stp_state = MT7530_STP_BLOCKING; 1166 break; 1167 case BR_STATE_LISTENING: 1168 stp_state = MT7530_STP_LISTENING; 1169 break; 1170 case BR_STATE_LEARNING: 1171 stp_state = MT7530_STP_LEARNING; 1172 break; 1173 case BR_STATE_FORWARDING: 1174 default: 1175 stp_state = MT7530_STP_FORWARDING; 1176 break; 1177 } 1178 1179 mt7530_rmw(priv, MT7530_SSP_P(port), FID_PST_MASK(FID_BRIDGED), 1180 FID_PST(FID_BRIDGED, stp_state)); 1181 } 1182 1183 static int 1184 mt7530_port_pre_bridge_flags(struct dsa_switch *ds, int port, 1185 struct switchdev_brport_flags flags, 1186 struct netlink_ext_ack *extack) 1187 { 1188 if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 1189 BR_BCAST_FLOOD)) 1190 return -EINVAL; 1191 1192 return 0; 1193 } 1194 1195 static int 1196 mt7530_port_bridge_flags(struct dsa_switch *ds, int port, 1197 struct switchdev_brport_flags flags, 1198 struct netlink_ext_ack *extack) 1199 { 1200 struct mt7530_priv *priv = ds->priv; 1201 1202 if (flags.mask & BR_LEARNING) 1203 mt7530_rmw(priv, MT7530_PSC_P(port), SA_DIS, 1204 flags.val & BR_LEARNING ? 0 : SA_DIS); 1205 1206 if (flags.mask & BR_FLOOD) 1207 mt7530_rmw(priv, MT7530_MFC, UNU_FFP(BIT(port)), 1208 flags.val & BR_FLOOD ? UNU_FFP(BIT(port)) : 0); 1209 1210 if (flags.mask & BR_MCAST_FLOOD) 1211 mt7530_rmw(priv, MT7530_MFC, UNM_FFP(BIT(port)), 1212 flags.val & BR_MCAST_FLOOD ? UNM_FFP(BIT(port)) : 0); 1213 1214 if (flags.mask & BR_BCAST_FLOOD) 1215 mt7530_rmw(priv, MT7530_MFC, BC_FFP(BIT(port)), 1216 flags.val & BR_BCAST_FLOOD ? BC_FFP(BIT(port)) : 0); 1217 1218 return 0; 1219 } 1220 1221 static int 1222 mt7530_port_bridge_join(struct dsa_switch *ds, int port, 1223 struct dsa_bridge bridge, bool *tx_fwd_offload, 1224 struct netlink_ext_ack *extack) 1225 { 1226 struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; 1227 struct dsa_port *cpu_dp = dp->cpu_dp; 1228 u32 port_bitmap = BIT(cpu_dp->index); 1229 struct mt7530_priv *priv = ds->priv; 1230 1231 mutex_lock(&priv->reg_mutex); 1232 1233 dsa_switch_for_each_user_port(other_dp, ds) { 1234 int other_port = other_dp->index; 1235 1236 if (dp == other_dp) 1237 continue; 1238 1239 /* Add this port to the port matrix of the other ports in the 1240 * same bridge. If the port is disabled, port matrix is kept 1241 * and not being setup until the port becomes enabled. 1242 */ 1243 if (!dsa_port_offloads_bridge(other_dp, &bridge)) 1244 continue; 1245 1246 if (priv->ports[other_port].enable) 1247 mt7530_set(priv, MT7530_PCR_P(other_port), 1248 PCR_MATRIX(BIT(port))); 1249 priv->ports[other_port].pm |= PCR_MATRIX(BIT(port)); 1250 1251 port_bitmap |= BIT(other_port); 1252 } 1253 1254 /* Add the all other ports to this port matrix. */ 1255 if (priv->ports[port].enable) 1256 mt7530_rmw(priv, MT7530_PCR_P(port), 1257 PCR_MATRIX_MASK, PCR_MATRIX(port_bitmap)); 1258 priv->ports[port].pm |= PCR_MATRIX(port_bitmap); 1259 1260 /* Set to fallback mode for independent VLAN learning */ 1261 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, 1262 MT7530_PORT_FALLBACK_MODE); 1263 1264 mutex_unlock(&priv->reg_mutex); 1265 1266 return 0; 1267 } 1268 1269 static void 1270 mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port) 1271 { 1272 struct mt7530_priv *priv = ds->priv; 1273 bool all_user_ports_removed = true; 1274 int i; 1275 1276 /* This is called after .port_bridge_leave when leaving a VLAN-aware 1277 * bridge. Don't set standalone ports to fallback mode. 1278 */ 1279 if (dsa_port_bridge_dev_get(dsa_to_port(ds, port))) 1280 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, 1281 MT7530_PORT_FALLBACK_MODE); 1282 1283 mt7530_rmw(priv, MT7530_PVC_P(port), 1284 VLAN_ATTR_MASK | PVC_EG_TAG_MASK | ACC_FRM_MASK, 1285 VLAN_ATTR(MT7530_VLAN_TRANSPARENT) | 1286 PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT) | 1287 MT7530_VLAN_ACC_ALL); 1288 1289 /* Set PVID to 0 */ 1290 mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, 1291 G0_PORT_VID_DEF); 1292 1293 for (i = 0; i < MT7530_NUM_PORTS; i++) { 1294 if (dsa_is_user_port(ds, i) && 1295 dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) { 1296 all_user_ports_removed = false; 1297 break; 1298 } 1299 } 1300 1301 /* CPU port also does the same thing until all user ports belonging to 1302 * the CPU port get out of VLAN filtering mode. 1303 */ 1304 if (all_user_ports_removed) { 1305 struct dsa_port *dp = dsa_to_port(ds, port); 1306 struct dsa_port *cpu_dp = dp->cpu_dp; 1307 1308 mt7530_write(priv, MT7530_PCR_P(cpu_dp->index), 1309 PCR_MATRIX(dsa_user_ports(priv->ds))); 1310 mt7530_write(priv, MT7530_PVC_P(cpu_dp->index), PORT_SPEC_TAG 1311 | PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); 1312 } 1313 } 1314 1315 static void 1316 mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port) 1317 { 1318 struct mt7530_priv *priv = ds->priv; 1319 1320 /* Trapped into security mode allows packet forwarding through VLAN 1321 * table lookup. 1322 */ 1323 if (dsa_is_user_port(ds, port)) { 1324 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, 1325 MT7530_PORT_SECURITY_MODE); 1326 mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, 1327 G0_PORT_VID(priv->ports[port].pvid)); 1328 1329 /* Only accept tagged frames if PVID is not set */ 1330 if (!priv->ports[port].pvid) 1331 mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK, 1332 MT7530_VLAN_ACC_TAGGED); 1333 1334 /* Set the port as a user port which is to be able to recognize 1335 * VID from incoming packets before fetching entry within the 1336 * VLAN table. 1337 */ 1338 mt7530_rmw(priv, MT7530_PVC_P(port), 1339 VLAN_ATTR_MASK | PVC_EG_TAG_MASK, 1340 VLAN_ATTR(MT7530_VLAN_USER) | 1341 PVC_EG_TAG(MT7530_VLAN_EG_DISABLED)); 1342 } else { 1343 /* Also set CPU ports to the "user" VLAN port attribute, to 1344 * allow VLAN classification, but keep the EG_TAG attribute as 1345 * "consistent" (i.o.w. don't change its value) for packets 1346 * received by the switch from the CPU, so that tagged packets 1347 * are forwarded to user ports as tagged, and untagged as 1348 * untagged. 1349 */ 1350 mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK, 1351 VLAN_ATTR(MT7530_VLAN_USER)); 1352 } 1353 } 1354 1355 static void 1356 mt7530_port_bridge_leave(struct dsa_switch *ds, int port, 1357 struct dsa_bridge bridge) 1358 { 1359 struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; 1360 struct dsa_port *cpu_dp = dp->cpu_dp; 1361 struct mt7530_priv *priv = ds->priv; 1362 1363 mutex_lock(&priv->reg_mutex); 1364 1365 dsa_switch_for_each_user_port(other_dp, ds) { 1366 int other_port = other_dp->index; 1367 1368 if (dp == other_dp) 1369 continue; 1370 1371 /* Remove this port from the port matrix of the other ports 1372 * in the same bridge. If the port is disabled, port matrix 1373 * is kept and not being setup until the port becomes enabled. 1374 */ 1375 if (!dsa_port_offloads_bridge(other_dp, &bridge)) 1376 continue; 1377 1378 if (priv->ports[other_port].enable) 1379 mt7530_clear(priv, MT7530_PCR_P(other_port), 1380 PCR_MATRIX(BIT(port))); 1381 priv->ports[other_port].pm &= ~PCR_MATRIX(BIT(port)); 1382 } 1383 1384 /* Set the cpu port to be the only one in the port matrix of 1385 * this port. 1386 */ 1387 if (priv->ports[port].enable) 1388 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK, 1389 PCR_MATRIX(BIT(cpu_dp->index))); 1390 priv->ports[port].pm = PCR_MATRIX(BIT(cpu_dp->index)); 1391 1392 /* When a port is removed from the bridge, the port would be set up 1393 * back to the default as is at initial boot which is a VLAN-unaware 1394 * port. 1395 */ 1396 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, 1397 MT7530_PORT_MATRIX_MODE); 1398 1399 mutex_unlock(&priv->reg_mutex); 1400 } 1401 1402 static int 1403 mt7530_port_fdb_add(struct dsa_switch *ds, int port, 1404 const unsigned char *addr, u16 vid, 1405 struct dsa_db db) 1406 { 1407 struct mt7530_priv *priv = ds->priv; 1408 int ret; 1409 u8 port_mask = BIT(port); 1410 1411 mutex_lock(&priv->reg_mutex); 1412 mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_ENT); 1413 ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL); 1414 mutex_unlock(&priv->reg_mutex); 1415 1416 return ret; 1417 } 1418 1419 static int 1420 mt7530_port_fdb_del(struct dsa_switch *ds, int port, 1421 const unsigned char *addr, u16 vid, 1422 struct dsa_db db) 1423 { 1424 struct mt7530_priv *priv = ds->priv; 1425 int ret; 1426 u8 port_mask = BIT(port); 1427 1428 mutex_lock(&priv->reg_mutex); 1429 mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_EMP); 1430 ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL); 1431 mutex_unlock(&priv->reg_mutex); 1432 1433 return ret; 1434 } 1435 1436 static int 1437 mt7530_port_fdb_dump(struct dsa_switch *ds, int port, 1438 dsa_fdb_dump_cb_t *cb, void *data) 1439 { 1440 struct mt7530_priv *priv = ds->priv; 1441 struct mt7530_fdb _fdb = { 0 }; 1442 int cnt = MT7530_NUM_FDB_RECORDS; 1443 int ret = 0; 1444 u32 rsp = 0; 1445 1446 mutex_lock(&priv->reg_mutex); 1447 1448 ret = mt7530_fdb_cmd(priv, MT7530_FDB_START, &rsp); 1449 if (ret < 0) 1450 goto err; 1451 1452 do { 1453 if (rsp & ATC_SRCH_HIT) { 1454 mt7530_fdb_read(priv, &_fdb); 1455 if (_fdb.port_mask & BIT(port)) { 1456 ret = cb(_fdb.mac, _fdb.vid, _fdb.noarp, 1457 data); 1458 if (ret < 0) 1459 break; 1460 } 1461 } 1462 } while (--cnt && 1463 !(rsp & ATC_SRCH_END) && 1464 !mt7530_fdb_cmd(priv, MT7530_FDB_NEXT, &rsp)); 1465 err: 1466 mutex_unlock(&priv->reg_mutex); 1467 1468 return 0; 1469 } 1470 1471 static int 1472 mt7530_port_mdb_add(struct dsa_switch *ds, int port, 1473 const struct switchdev_obj_port_mdb *mdb, 1474 struct dsa_db db) 1475 { 1476 struct mt7530_priv *priv = ds->priv; 1477 const u8 *addr = mdb->addr; 1478 u16 vid = mdb->vid; 1479 u8 port_mask = 0; 1480 int ret; 1481 1482 mutex_lock(&priv->reg_mutex); 1483 1484 mt7530_fdb_write(priv, vid, 0, addr, 0, STATIC_EMP); 1485 if (!mt7530_fdb_cmd(priv, MT7530_FDB_READ, NULL)) 1486 port_mask = (mt7530_read(priv, MT7530_ATRD) >> PORT_MAP) 1487 & PORT_MAP_MASK; 1488 1489 port_mask |= BIT(port); 1490 mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_ENT); 1491 ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL); 1492 1493 mutex_unlock(&priv->reg_mutex); 1494 1495 return ret; 1496 } 1497 1498 static int 1499 mt7530_port_mdb_del(struct dsa_switch *ds, int port, 1500 const struct switchdev_obj_port_mdb *mdb, 1501 struct dsa_db db) 1502 { 1503 struct mt7530_priv *priv = ds->priv; 1504 const u8 *addr = mdb->addr; 1505 u16 vid = mdb->vid; 1506 u8 port_mask = 0; 1507 int ret; 1508 1509 mutex_lock(&priv->reg_mutex); 1510 1511 mt7530_fdb_write(priv, vid, 0, addr, 0, STATIC_EMP); 1512 if (!mt7530_fdb_cmd(priv, MT7530_FDB_READ, NULL)) 1513 port_mask = (mt7530_read(priv, MT7530_ATRD) >> PORT_MAP) 1514 & PORT_MAP_MASK; 1515 1516 port_mask &= ~BIT(port); 1517 mt7530_fdb_write(priv, vid, port_mask, addr, -1, 1518 port_mask ? STATIC_ENT : STATIC_EMP); 1519 ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL); 1520 1521 mutex_unlock(&priv->reg_mutex); 1522 1523 return ret; 1524 } 1525 1526 static int 1527 mt7530_vlan_cmd(struct mt7530_priv *priv, enum mt7530_vlan_cmd cmd, u16 vid) 1528 { 1529 struct mt7530_dummy_poll p; 1530 u32 val; 1531 int ret; 1532 1533 val = VTCR_BUSY | VTCR_FUNC(cmd) | vid; 1534 mt7530_write(priv, MT7530_VTCR, val); 1535 1536 INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_VTCR); 1537 ret = readx_poll_timeout(_mt7530_read, &p, val, 1538 !(val & VTCR_BUSY), 20, 20000); 1539 if (ret < 0) { 1540 dev_err(priv->dev, "poll timeout\n"); 1541 return ret; 1542 } 1543 1544 val = mt7530_read(priv, MT7530_VTCR); 1545 if (val & VTCR_INVALID) { 1546 dev_err(priv->dev, "read VTCR invalid\n"); 1547 return -EINVAL; 1548 } 1549 1550 return 0; 1551 } 1552 1553 static int 1554 mt7530_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, 1555 struct netlink_ext_ack *extack) 1556 { 1557 struct dsa_port *dp = dsa_to_port(ds, port); 1558 struct dsa_port *cpu_dp = dp->cpu_dp; 1559 1560 if (vlan_filtering) { 1561 /* The port is being kept as VLAN-unaware port when bridge is 1562 * set up with vlan_filtering not being set, Otherwise, the 1563 * port and the corresponding CPU port is required the setup 1564 * for becoming a VLAN-aware port. 1565 */ 1566 mt7530_port_set_vlan_aware(ds, port); 1567 mt7530_port_set_vlan_aware(ds, cpu_dp->index); 1568 } else { 1569 mt7530_port_set_vlan_unaware(ds, port); 1570 } 1571 1572 return 0; 1573 } 1574 1575 static void 1576 mt7530_hw_vlan_add(struct mt7530_priv *priv, 1577 struct mt7530_hw_vlan_entry *entry) 1578 { 1579 struct dsa_port *dp = dsa_to_port(priv->ds, entry->port); 1580 u8 new_members; 1581 u32 val; 1582 1583 new_members = entry->old_members | BIT(entry->port); 1584 1585 /* Validate the entry with independent learning, create egress tag per 1586 * VLAN and joining the port as one of the port members. 1587 */ 1588 val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) | FID(FID_BRIDGED) | 1589 VLAN_VALID; 1590 mt7530_write(priv, MT7530_VAWD1, val); 1591 1592 /* Decide whether adding tag or not for those outgoing packets from the 1593 * port inside the VLAN. 1594 * CPU port is always taken as a tagged port for serving more than one 1595 * VLANs across and also being applied with egress type stack mode for 1596 * that VLAN tags would be appended after hardware special tag used as 1597 * DSA tag. 1598 */ 1599 if (dsa_port_is_cpu(dp)) 1600 val = MT7530_VLAN_EGRESS_STACK; 1601 else if (entry->untagged) 1602 val = MT7530_VLAN_EGRESS_UNTAG; 1603 else 1604 val = MT7530_VLAN_EGRESS_TAG; 1605 mt7530_rmw(priv, MT7530_VAWD2, 1606 ETAG_CTRL_P_MASK(entry->port), 1607 ETAG_CTRL_P(entry->port, val)); 1608 } 1609 1610 static void 1611 mt7530_hw_vlan_del(struct mt7530_priv *priv, 1612 struct mt7530_hw_vlan_entry *entry) 1613 { 1614 u8 new_members; 1615 u32 val; 1616 1617 new_members = entry->old_members & ~BIT(entry->port); 1618 1619 val = mt7530_read(priv, MT7530_VAWD1); 1620 if (!(val & VLAN_VALID)) { 1621 dev_err(priv->dev, 1622 "Cannot be deleted due to invalid entry\n"); 1623 return; 1624 } 1625 1626 if (new_members) { 1627 val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) | 1628 VLAN_VALID; 1629 mt7530_write(priv, MT7530_VAWD1, val); 1630 } else { 1631 mt7530_write(priv, MT7530_VAWD1, 0); 1632 mt7530_write(priv, MT7530_VAWD2, 0); 1633 } 1634 } 1635 1636 static void 1637 mt7530_hw_vlan_update(struct mt7530_priv *priv, u16 vid, 1638 struct mt7530_hw_vlan_entry *entry, 1639 mt7530_vlan_op vlan_op) 1640 { 1641 u32 val; 1642 1643 /* Fetch entry */ 1644 mt7530_vlan_cmd(priv, MT7530_VTCR_RD_VID, vid); 1645 1646 val = mt7530_read(priv, MT7530_VAWD1); 1647 1648 entry->old_members = (val >> PORT_MEM_SHFT) & PORT_MEM_MASK; 1649 1650 /* Manipulate entry */ 1651 vlan_op(priv, entry); 1652 1653 /* Flush result to hardware */ 1654 mt7530_vlan_cmd(priv, MT7530_VTCR_WR_VID, vid); 1655 } 1656 1657 static int 1658 mt7530_setup_vlan0(struct mt7530_priv *priv) 1659 { 1660 u32 val; 1661 1662 /* Validate the entry with independent learning, keep the original 1663 * ingress tag attribute. 1664 */ 1665 val = IVL_MAC | EG_CON | PORT_MEM(MT7530_ALL_MEMBERS) | FID(FID_BRIDGED) | 1666 VLAN_VALID; 1667 mt7530_write(priv, MT7530_VAWD1, val); 1668 1669 return mt7530_vlan_cmd(priv, MT7530_VTCR_WR_VID, 0); 1670 } 1671 1672 static int 1673 mt7530_port_vlan_add(struct dsa_switch *ds, int port, 1674 const struct switchdev_obj_port_vlan *vlan, 1675 struct netlink_ext_ack *extack) 1676 { 1677 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1678 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 1679 struct mt7530_hw_vlan_entry new_entry; 1680 struct mt7530_priv *priv = ds->priv; 1681 1682 mutex_lock(&priv->reg_mutex); 1683 1684 mt7530_hw_vlan_entry_init(&new_entry, port, untagged); 1685 mt7530_hw_vlan_update(priv, vlan->vid, &new_entry, mt7530_hw_vlan_add); 1686 1687 if (pvid) { 1688 priv->ports[port].pvid = vlan->vid; 1689 1690 /* Accept all frames if PVID is set */ 1691 mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK, 1692 MT7530_VLAN_ACC_ALL); 1693 1694 /* Only configure PVID if VLAN filtering is enabled */ 1695 if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) 1696 mt7530_rmw(priv, MT7530_PPBV1_P(port), 1697 G0_PORT_VID_MASK, 1698 G0_PORT_VID(vlan->vid)); 1699 } else if (vlan->vid && priv->ports[port].pvid == vlan->vid) { 1700 /* This VLAN is overwritten without PVID, so unset it */ 1701 priv->ports[port].pvid = G0_PORT_VID_DEF; 1702 1703 /* Only accept tagged frames if the port is VLAN-aware */ 1704 if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) 1705 mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK, 1706 MT7530_VLAN_ACC_TAGGED); 1707 1708 mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, 1709 G0_PORT_VID_DEF); 1710 } 1711 1712 mutex_unlock(&priv->reg_mutex); 1713 1714 return 0; 1715 } 1716 1717 static int 1718 mt7530_port_vlan_del(struct dsa_switch *ds, int port, 1719 const struct switchdev_obj_port_vlan *vlan) 1720 { 1721 struct mt7530_hw_vlan_entry target_entry; 1722 struct mt7530_priv *priv = ds->priv; 1723 1724 mutex_lock(&priv->reg_mutex); 1725 1726 mt7530_hw_vlan_entry_init(&target_entry, port, 0); 1727 mt7530_hw_vlan_update(priv, vlan->vid, &target_entry, 1728 mt7530_hw_vlan_del); 1729 1730 /* PVID is being restored to the default whenever the PVID port 1731 * is being removed from the VLAN. 1732 */ 1733 if (priv->ports[port].pvid == vlan->vid) { 1734 priv->ports[port].pvid = G0_PORT_VID_DEF; 1735 1736 /* Only accept tagged frames if the port is VLAN-aware */ 1737 if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) 1738 mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK, 1739 MT7530_VLAN_ACC_TAGGED); 1740 1741 mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, 1742 G0_PORT_VID_DEF); 1743 } 1744 1745 1746 mutex_unlock(&priv->reg_mutex); 1747 1748 return 0; 1749 } 1750 1751 static int mt753x_mirror_port_get(unsigned int id, u32 val) 1752 { 1753 return (id == ID_MT7531) ? MT7531_MIRROR_PORT_GET(val) : 1754 MIRROR_PORT(val); 1755 } 1756 1757 static int mt753x_mirror_port_set(unsigned int id, u32 val) 1758 { 1759 return (id == ID_MT7531) ? MT7531_MIRROR_PORT_SET(val) : 1760 MIRROR_PORT(val); 1761 } 1762 1763 static int mt753x_port_mirror_add(struct dsa_switch *ds, int port, 1764 struct dsa_mall_mirror_tc_entry *mirror, 1765 bool ingress, struct netlink_ext_ack *extack) 1766 { 1767 struct mt7530_priv *priv = ds->priv; 1768 int monitor_port; 1769 u32 val; 1770 1771 /* Check for existent entry */ 1772 if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port)) 1773 return -EEXIST; 1774 1775 val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id)); 1776 1777 /* MT7530 only supports one monitor port */ 1778 monitor_port = mt753x_mirror_port_get(priv->id, val); 1779 if (val & MT753X_MIRROR_EN(priv->id) && 1780 monitor_port != mirror->to_local_port) 1781 return -EEXIST; 1782 1783 val |= MT753X_MIRROR_EN(priv->id); 1784 val &= ~MT753X_MIRROR_MASK(priv->id); 1785 val |= mt753x_mirror_port_set(priv->id, mirror->to_local_port); 1786 mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val); 1787 1788 val = mt7530_read(priv, MT7530_PCR_P(port)); 1789 if (ingress) { 1790 val |= PORT_RX_MIR; 1791 priv->mirror_rx |= BIT(port); 1792 } else { 1793 val |= PORT_TX_MIR; 1794 priv->mirror_tx |= BIT(port); 1795 } 1796 mt7530_write(priv, MT7530_PCR_P(port), val); 1797 1798 return 0; 1799 } 1800 1801 static void mt753x_port_mirror_del(struct dsa_switch *ds, int port, 1802 struct dsa_mall_mirror_tc_entry *mirror) 1803 { 1804 struct mt7530_priv *priv = ds->priv; 1805 u32 val; 1806 1807 val = mt7530_read(priv, MT7530_PCR_P(port)); 1808 if (mirror->ingress) { 1809 val &= ~PORT_RX_MIR; 1810 priv->mirror_rx &= ~BIT(port); 1811 } else { 1812 val &= ~PORT_TX_MIR; 1813 priv->mirror_tx &= ~BIT(port); 1814 } 1815 mt7530_write(priv, MT7530_PCR_P(port), val); 1816 1817 if (!priv->mirror_rx && !priv->mirror_tx) { 1818 val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id)); 1819 val &= ~MT753X_MIRROR_EN(priv->id); 1820 mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val); 1821 } 1822 } 1823 1824 static enum dsa_tag_protocol 1825 mtk_get_tag_protocol(struct dsa_switch *ds, int port, 1826 enum dsa_tag_protocol mp) 1827 { 1828 return DSA_TAG_PROTO_MTK; 1829 } 1830 1831 #ifdef CONFIG_GPIOLIB 1832 static inline u32 1833 mt7530_gpio_to_bit(unsigned int offset) 1834 { 1835 /* Map GPIO offset to register bit 1836 * [ 2: 0] port 0 LED 0..2 as GPIO 0..2 1837 * [ 6: 4] port 1 LED 0..2 as GPIO 3..5 1838 * [10: 8] port 2 LED 0..2 as GPIO 6..8 1839 * [14:12] port 3 LED 0..2 as GPIO 9..11 1840 * [18:16] port 4 LED 0..2 as GPIO 12..14 1841 */ 1842 return BIT(offset + offset / 3); 1843 } 1844 1845 static int 1846 mt7530_gpio_get(struct gpio_chip *gc, unsigned int offset) 1847 { 1848 struct mt7530_priv *priv = gpiochip_get_data(gc); 1849 u32 bit = mt7530_gpio_to_bit(offset); 1850 1851 return !!(mt7530_read(priv, MT7530_LED_GPIO_DATA) & bit); 1852 } 1853 1854 static void 1855 mt7530_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) 1856 { 1857 struct mt7530_priv *priv = gpiochip_get_data(gc); 1858 u32 bit = mt7530_gpio_to_bit(offset); 1859 1860 if (value) 1861 mt7530_set(priv, MT7530_LED_GPIO_DATA, bit); 1862 else 1863 mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit); 1864 } 1865 1866 static int 1867 mt7530_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) 1868 { 1869 struct mt7530_priv *priv = gpiochip_get_data(gc); 1870 u32 bit = mt7530_gpio_to_bit(offset); 1871 1872 return (mt7530_read(priv, MT7530_LED_GPIO_DIR) & bit) ? 1873 GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN; 1874 } 1875 1876 static int 1877 mt7530_gpio_direction_input(struct gpio_chip *gc, unsigned int offset) 1878 { 1879 struct mt7530_priv *priv = gpiochip_get_data(gc); 1880 u32 bit = mt7530_gpio_to_bit(offset); 1881 1882 mt7530_clear(priv, MT7530_LED_GPIO_OE, bit); 1883 mt7530_clear(priv, MT7530_LED_GPIO_DIR, bit); 1884 1885 return 0; 1886 } 1887 1888 static int 1889 mt7530_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value) 1890 { 1891 struct mt7530_priv *priv = gpiochip_get_data(gc); 1892 u32 bit = mt7530_gpio_to_bit(offset); 1893 1894 mt7530_set(priv, MT7530_LED_GPIO_DIR, bit); 1895 1896 if (value) 1897 mt7530_set(priv, MT7530_LED_GPIO_DATA, bit); 1898 else 1899 mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit); 1900 1901 mt7530_set(priv, MT7530_LED_GPIO_OE, bit); 1902 1903 return 0; 1904 } 1905 1906 static int 1907 mt7530_setup_gpio(struct mt7530_priv *priv) 1908 { 1909 struct device *dev = priv->dev; 1910 struct gpio_chip *gc; 1911 1912 gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL); 1913 if (!gc) 1914 return -ENOMEM; 1915 1916 mt7530_write(priv, MT7530_LED_GPIO_OE, 0); 1917 mt7530_write(priv, MT7530_LED_GPIO_DIR, 0); 1918 mt7530_write(priv, MT7530_LED_IO_MODE, 0); 1919 1920 gc->label = "mt7530"; 1921 gc->parent = dev; 1922 gc->owner = THIS_MODULE; 1923 gc->get_direction = mt7530_gpio_get_direction; 1924 gc->direction_input = mt7530_gpio_direction_input; 1925 gc->direction_output = mt7530_gpio_direction_output; 1926 gc->get = mt7530_gpio_get; 1927 gc->set = mt7530_gpio_set; 1928 gc->base = -1; 1929 gc->ngpio = 15; 1930 gc->can_sleep = true; 1931 1932 return devm_gpiochip_add_data(dev, gc, priv); 1933 } 1934 #endif /* CONFIG_GPIOLIB */ 1935 1936 static irqreturn_t 1937 mt7530_irq_thread_fn(int irq, void *dev_id) 1938 { 1939 struct mt7530_priv *priv = dev_id; 1940 bool handled = false; 1941 u32 val; 1942 int p; 1943 1944 mt7530_mutex_lock(priv); 1945 val = mt7530_mii_read(priv, MT7530_SYS_INT_STS); 1946 mt7530_mii_write(priv, MT7530_SYS_INT_STS, val); 1947 mt7530_mutex_unlock(priv); 1948 1949 for (p = 0; p < MT7530_NUM_PHYS; p++) { 1950 if (BIT(p) & val) { 1951 unsigned int irq; 1952 1953 irq = irq_find_mapping(priv->irq_domain, p); 1954 handle_nested_irq(irq); 1955 handled = true; 1956 } 1957 } 1958 1959 return IRQ_RETVAL(handled); 1960 } 1961 1962 static void 1963 mt7530_irq_mask(struct irq_data *d) 1964 { 1965 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); 1966 1967 priv->irq_enable &= ~BIT(d->hwirq); 1968 } 1969 1970 static void 1971 mt7530_irq_unmask(struct irq_data *d) 1972 { 1973 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); 1974 1975 priv->irq_enable |= BIT(d->hwirq); 1976 } 1977 1978 static void 1979 mt7530_irq_bus_lock(struct irq_data *d) 1980 { 1981 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); 1982 1983 mt7530_mutex_lock(priv); 1984 } 1985 1986 static void 1987 mt7530_irq_bus_sync_unlock(struct irq_data *d) 1988 { 1989 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); 1990 1991 mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable); 1992 mt7530_mutex_unlock(priv); 1993 } 1994 1995 static struct irq_chip mt7530_irq_chip = { 1996 .name = KBUILD_MODNAME, 1997 .irq_mask = mt7530_irq_mask, 1998 .irq_unmask = mt7530_irq_unmask, 1999 .irq_bus_lock = mt7530_irq_bus_lock, 2000 .irq_bus_sync_unlock = mt7530_irq_bus_sync_unlock, 2001 }; 2002 2003 static int 2004 mt7530_irq_map(struct irq_domain *domain, unsigned int irq, 2005 irq_hw_number_t hwirq) 2006 { 2007 irq_set_chip_data(irq, domain->host_data); 2008 irq_set_chip_and_handler(irq, &mt7530_irq_chip, handle_simple_irq); 2009 irq_set_nested_thread(irq, true); 2010 irq_set_noprobe(irq); 2011 2012 return 0; 2013 } 2014 2015 static const struct irq_domain_ops mt7530_irq_domain_ops = { 2016 .map = mt7530_irq_map, 2017 .xlate = irq_domain_xlate_onecell, 2018 }; 2019 2020 static void 2021 mt7988_irq_mask(struct irq_data *d) 2022 { 2023 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); 2024 2025 priv->irq_enable &= ~BIT(d->hwirq); 2026 mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable); 2027 } 2028 2029 static void 2030 mt7988_irq_unmask(struct irq_data *d) 2031 { 2032 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); 2033 2034 priv->irq_enable |= BIT(d->hwirq); 2035 mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable); 2036 } 2037 2038 static struct irq_chip mt7988_irq_chip = { 2039 .name = KBUILD_MODNAME, 2040 .irq_mask = mt7988_irq_mask, 2041 .irq_unmask = mt7988_irq_unmask, 2042 }; 2043 2044 static int 2045 mt7988_irq_map(struct irq_domain *domain, unsigned int irq, 2046 irq_hw_number_t hwirq) 2047 { 2048 irq_set_chip_data(irq, domain->host_data); 2049 irq_set_chip_and_handler(irq, &mt7988_irq_chip, handle_simple_irq); 2050 irq_set_nested_thread(irq, true); 2051 irq_set_noprobe(irq); 2052 2053 return 0; 2054 } 2055 2056 static const struct irq_domain_ops mt7988_irq_domain_ops = { 2057 .map = mt7988_irq_map, 2058 .xlate = irq_domain_xlate_onecell, 2059 }; 2060 2061 static void 2062 mt7530_setup_mdio_irq(struct mt7530_priv *priv) 2063 { 2064 struct dsa_switch *ds = priv->ds; 2065 int p; 2066 2067 for (p = 0; p < MT7530_NUM_PHYS; p++) { 2068 if (BIT(p) & ds->phys_mii_mask) { 2069 unsigned int irq; 2070 2071 irq = irq_create_mapping(priv->irq_domain, p); 2072 ds->user_mii_bus->irq[p] = irq; 2073 } 2074 } 2075 } 2076 2077 static int 2078 mt7530_setup_irq(struct mt7530_priv *priv) 2079 { 2080 struct device *dev = priv->dev; 2081 struct device_node *np = dev->of_node; 2082 int ret; 2083 2084 if (!of_property_read_bool(np, "interrupt-controller")) { 2085 dev_info(dev, "no interrupt support\n"); 2086 return 0; 2087 } 2088 2089 priv->irq = of_irq_get(np, 0); 2090 if (priv->irq <= 0) { 2091 dev_err(dev, "failed to get parent IRQ: %d\n", priv->irq); 2092 return priv->irq ? : -EINVAL; 2093 } 2094 2095 if (priv->id == ID_MT7988) 2096 priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS, 2097 &mt7988_irq_domain_ops, 2098 priv); 2099 else 2100 priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS, 2101 &mt7530_irq_domain_ops, 2102 priv); 2103 2104 if (!priv->irq_domain) { 2105 dev_err(dev, "failed to create IRQ domain\n"); 2106 return -ENOMEM; 2107 } 2108 2109 /* This register must be set for MT7530 to properly fire interrupts */ 2110 if (priv->id != ID_MT7531) 2111 mt7530_set(priv, MT7530_TOP_SIG_CTRL, TOP_SIG_CTRL_NORMAL); 2112 2113 ret = request_threaded_irq(priv->irq, NULL, mt7530_irq_thread_fn, 2114 IRQF_ONESHOT, KBUILD_MODNAME, priv); 2115 if (ret) { 2116 irq_domain_remove(priv->irq_domain); 2117 dev_err(dev, "failed to request IRQ: %d\n", ret); 2118 return ret; 2119 } 2120 2121 return 0; 2122 } 2123 2124 static void 2125 mt7530_free_mdio_irq(struct mt7530_priv *priv) 2126 { 2127 int p; 2128 2129 for (p = 0; p < MT7530_NUM_PHYS; p++) { 2130 if (BIT(p) & priv->ds->phys_mii_mask) { 2131 unsigned int irq; 2132 2133 irq = irq_find_mapping(priv->irq_domain, p); 2134 irq_dispose_mapping(irq); 2135 } 2136 } 2137 } 2138 2139 static void 2140 mt7530_free_irq_common(struct mt7530_priv *priv) 2141 { 2142 free_irq(priv->irq, priv); 2143 irq_domain_remove(priv->irq_domain); 2144 } 2145 2146 static void 2147 mt7530_free_irq(struct mt7530_priv *priv) 2148 { 2149 mt7530_free_mdio_irq(priv); 2150 mt7530_free_irq_common(priv); 2151 } 2152 2153 static int 2154 mt7530_setup_mdio(struct mt7530_priv *priv) 2155 { 2156 struct dsa_switch *ds = priv->ds; 2157 struct device *dev = priv->dev; 2158 struct mii_bus *bus; 2159 static int idx; 2160 int ret; 2161 2162 bus = devm_mdiobus_alloc(dev); 2163 if (!bus) 2164 return -ENOMEM; 2165 2166 ds->user_mii_bus = bus; 2167 bus->priv = priv; 2168 bus->name = KBUILD_MODNAME "-mii"; 2169 snprintf(bus->id, MII_BUS_ID_SIZE, KBUILD_MODNAME "-%d", idx++); 2170 bus->read = mt753x_phy_read_c22; 2171 bus->write = mt753x_phy_write_c22; 2172 bus->read_c45 = mt753x_phy_read_c45; 2173 bus->write_c45 = mt753x_phy_write_c45; 2174 bus->parent = dev; 2175 bus->phy_mask = ~ds->phys_mii_mask; 2176 2177 if (priv->irq) 2178 mt7530_setup_mdio_irq(priv); 2179 2180 ret = devm_mdiobus_register(dev, bus); 2181 if (ret) { 2182 dev_err(dev, "failed to register MDIO bus: %d\n", ret); 2183 if (priv->irq) 2184 mt7530_free_mdio_irq(priv); 2185 } 2186 2187 return ret; 2188 } 2189 2190 static int 2191 mt7530_setup(struct dsa_switch *ds) 2192 { 2193 struct mt7530_priv *priv = ds->priv; 2194 struct device_node *dn = NULL; 2195 struct device_node *phy_node; 2196 struct device_node *mac_np; 2197 struct mt7530_dummy_poll p; 2198 phy_interface_t interface; 2199 struct dsa_port *cpu_dp; 2200 u32 id, val; 2201 int ret, i; 2202 2203 /* The parent node of conduit netdev which holds the common system 2204 * controller also is the container for two GMACs nodes representing 2205 * as two netdev instances. 2206 */ 2207 dsa_switch_for_each_cpu_port(cpu_dp, ds) { 2208 dn = cpu_dp->conduit->dev.of_node->parent; 2209 /* It doesn't matter which CPU port is found first, 2210 * their conduits should share the same parent OF node 2211 */ 2212 break; 2213 } 2214 2215 if (!dn) { 2216 dev_err(ds->dev, "parent OF node of DSA conduit not found"); 2217 return -EINVAL; 2218 } 2219 2220 ds->assisted_learning_on_cpu_port = true; 2221 ds->mtu_enforcement_ingress = true; 2222 2223 if (priv->id == ID_MT7530) { 2224 regulator_set_voltage(priv->core_pwr, 1000000, 1000000); 2225 ret = regulator_enable(priv->core_pwr); 2226 if (ret < 0) { 2227 dev_err(priv->dev, 2228 "Failed to enable core power: %d\n", ret); 2229 return ret; 2230 } 2231 2232 regulator_set_voltage(priv->io_pwr, 3300000, 3300000); 2233 ret = regulator_enable(priv->io_pwr); 2234 if (ret < 0) { 2235 dev_err(priv->dev, "Failed to enable io pwr: %d\n", 2236 ret); 2237 return ret; 2238 } 2239 } 2240 2241 /* Reset whole chip through gpio pin or memory-mapped registers for 2242 * different type of hardware 2243 */ 2244 if (priv->mcm) { 2245 reset_control_assert(priv->rstc); 2246 usleep_range(1000, 1100); 2247 reset_control_deassert(priv->rstc); 2248 } else { 2249 gpiod_set_value_cansleep(priv->reset, 0); 2250 usleep_range(1000, 1100); 2251 gpiod_set_value_cansleep(priv->reset, 1); 2252 } 2253 2254 /* Waiting for MT7530 got to stable */ 2255 INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP); 2256 ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0, 2257 20, 1000000); 2258 if (ret < 0) { 2259 dev_err(priv->dev, "reset timeout\n"); 2260 return ret; 2261 } 2262 2263 id = mt7530_read(priv, MT7530_CREV); 2264 id >>= CHIP_NAME_SHIFT; 2265 if (id != MT7530_ID) { 2266 dev_err(priv->dev, "chip %x can't be supported\n", id); 2267 return -ENODEV; 2268 } 2269 2270 /* Reset the switch through internal reset */ 2271 mt7530_write(priv, MT7530_SYS_CTRL, 2272 SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST | 2273 SYS_CTRL_REG_RST); 2274 2275 mt7530_pll_setup(priv); 2276 2277 /* Lower Tx driving for TRGMII path */ 2278 for (i = 0; i < NUM_TRGMII_CTRL; i++) 2279 mt7530_write(priv, MT7530_TRGMII_TD_ODT(i), 2280 TD_DM_DRVP(8) | TD_DM_DRVN(8)); 2281 2282 for (i = 0; i < NUM_TRGMII_CTRL; i++) 2283 mt7530_rmw(priv, MT7530_TRGMII_RD(i), 2284 RD_TAP_MASK, RD_TAP(16)); 2285 2286 /* Enable port 6 */ 2287 val = mt7530_read(priv, MT7530_MHWTRAP); 2288 val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS; 2289 val |= MHWTRAP_MANUAL; 2290 mt7530_write(priv, MT7530_MHWTRAP, val); 2291 2292 priv->p6_interface = PHY_INTERFACE_MODE_NA; 2293 2294 mt753x_trap_frames(priv); 2295 2296 /* Enable and reset MIB counters */ 2297 mt7530_mib_reset(ds); 2298 2299 for (i = 0; i < MT7530_NUM_PORTS; i++) { 2300 /* Disable forwarding by default on all ports */ 2301 mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK, 2302 PCR_MATRIX_CLR); 2303 2304 /* Disable learning by default on all ports */ 2305 mt7530_set(priv, MT7530_PSC_P(i), SA_DIS); 2306 2307 if (dsa_is_cpu_port(ds, i)) { 2308 ret = mt753x_cpu_port_enable(ds, i); 2309 if (ret) 2310 return ret; 2311 } else { 2312 mt7530_port_disable(ds, i); 2313 2314 /* Set default PVID to 0 on all user ports */ 2315 mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK, 2316 G0_PORT_VID_DEF); 2317 } 2318 /* Enable consistent egress tag */ 2319 mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK, 2320 PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); 2321 } 2322 2323 /* Setup VLAN ID 0 for VLAN-unaware bridges */ 2324 ret = mt7530_setup_vlan0(priv); 2325 if (ret) 2326 return ret; 2327 2328 /* Setup port 5 */ 2329 priv->p5_intf_sel = P5_DISABLED; 2330 interface = PHY_INTERFACE_MODE_NA; 2331 2332 if (!dsa_is_unused_port(ds, 5)) { 2333 priv->p5_intf_sel = P5_INTF_SEL_GMAC5; 2334 ret = of_get_phy_mode(dsa_to_port(ds, 5)->dn, &interface); 2335 if (ret && ret != -ENODEV) 2336 return ret; 2337 } else { 2338 /* Scan the ethernet nodes. look for GMAC1, lookup used phy */ 2339 for_each_child_of_node(dn, mac_np) { 2340 if (!of_device_is_compatible(mac_np, 2341 "mediatek,eth-mac")) 2342 continue; 2343 2344 ret = of_property_read_u32(mac_np, "reg", &id); 2345 if (ret < 0 || id != 1) 2346 continue; 2347 2348 phy_node = of_parse_phandle(mac_np, "phy-handle", 0); 2349 if (!phy_node) 2350 continue; 2351 2352 if (phy_node->parent == priv->dev->of_node->parent) { 2353 ret = of_get_phy_mode(mac_np, &interface); 2354 if (ret && ret != -ENODEV) { 2355 of_node_put(mac_np); 2356 of_node_put(phy_node); 2357 return ret; 2358 } 2359 id = of_mdio_parse_addr(ds->dev, phy_node); 2360 if (id == 0) 2361 priv->p5_intf_sel = P5_INTF_SEL_PHY_P0; 2362 if (id == 4) 2363 priv->p5_intf_sel = P5_INTF_SEL_PHY_P4; 2364 } 2365 of_node_put(mac_np); 2366 of_node_put(phy_node); 2367 break; 2368 } 2369 } 2370 2371 #ifdef CONFIG_GPIOLIB 2372 if (of_property_read_bool(priv->dev->of_node, "gpio-controller")) { 2373 ret = mt7530_setup_gpio(priv); 2374 if (ret) 2375 return ret; 2376 } 2377 #endif /* CONFIG_GPIOLIB */ 2378 2379 mt7530_setup_port5(ds, interface); 2380 2381 /* Flush the FDB table */ 2382 ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL); 2383 if (ret < 0) 2384 return ret; 2385 2386 return 0; 2387 } 2388 2389 static int 2390 mt7531_setup_common(struct dsa_switch *ds) 2391 { 2392 struct mt7530_priv *priv = ds->priv; 2393 int ret, i; 2394 2395 mt753x_trap_frames(priv); 2396 2397 /* Enable and reset MIB counters */ 2398 mt7530_mib_reset(ds); 2399 2400 /* Disable flooding on all ports */ 2401 mt7530_clear(priv, MT7530_MFC, BC_FFP_MASK | UNM_FFP_MASK | 2402 UNU_FFP_MASK); 2403 2404 for (i = 0; i < MT7530_NUM_PORTS; i++) { 2405 /* Disable forwarding by default on all ports */ 2406 mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK, 2407 PCR_MATRIX_CLR); 2408 2409 /* Disable learning by default on all ports */ 2410 mt7530_set(priv, MT7530_PSC_P(i), SA_DIS); 2411 2412 mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR); 2413 2414 if (dsa_is_cpu_port(ds, i)) { 2415 ret = mt753x_cpu_port_enable(ds, i); 2416 if (ret) 2417 return ret; 2418 } else { 2419 mt7530_port_disable(ds, i); 2420 2421 /* Set default PVID to 0 on all user ports */ 2422 mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK, 2423 G0_PORT_VID_DEF); 2424 } 2425 2426 /* Enable consistent egress tag */ 2427 mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK, 2428 PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); 2429 } 2430 2431 /* Flush the FDB table */ 2432 ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL); 2433 if (ret < 0) 2434 return ret; 2435 2436 return 0; 2437 } 2438 2439 static int 2440 mt7531_setup(struct dsa_switch *ds) 2441 { 2442 struct mt7530_priv *priv = ds->priv; 2443 struct mt7530_dummy_poll p; 2444 u32 val, id; 2445 int ret, i; 2446 2447 /* Reset whole chip through gpio pin or memory-mapped registers for 2448 * different type of hardware 2449 */ 2450 if (priv->mcm) { 2451 reset_control_assert(priv->rstc); 2452 usleep_range(1000, 1100); 2453 reset_control_deassert(priv->rstc); 2454 } else { 2455 gpiod_set_value_cansleep(priv->reset, 0); 2456 usleep_range(1000, 1100); 2457 gpiod_set_value_cansleep(priv->reset, 1); 2458 } 2459 2460 /* Waiting for MT7530 got to stable */ 2461 INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP); 2462 ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0, 2463 20, 1000000); 2464 if (ret < 0) { 2465 dev_err(priv->dev, "reset timeout\n"); 2466 return ret; 2467 } 2468 2469 id = mt7530_read(priv, MT7531_CREV); 2470 id >>= CHIP_NAME_SHIFT; 2471 2472 if (id != MT7531_ID) { 2473 dev_err(priv->dev, "chip %x can't be supported\n", id); 2474 return -ENODEV; 2475 } 2476 2477 /* all MACs must be forced link-down before sw reset */ 2478 for (i = 0; i < MT7530_NUM_PORTS; i++) 2479 mt7530_write(priv, MT7530_PMCR_P(i), MT7531_FORCE_LNK); 2480 2481 /* Reset the switch through internal reset */ 2482 mt7530_write(priv, MT7530_SYS_CTRL, 2483 SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST | 2484 SYS_CTRL_REG_RST); 2485 2486 mt7531_pll_setup(priv); 2487 2488 if (mt7531_dual_sgmii_supported(priv)) { 2489 priv->p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII; 2490 2491 /* Let ds->user_mii_bus be able to access external phy. */ 2492 mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO11_RG_RXD2_MASK, 2493 MT7531_EXT_P_MDC_11); 2494 mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO12_RG_RXD3_MASK, 2495 MT7531_EXT_P_MDIO_12); 2496 } else { 2497 priv->p5_intf_sel = P5_INTF_SEL_GMAC5; 2498 } 2499 dev_dbg(ds->dev, "P5 support %s interface\n", 2500 p5_intf_modes(priv->p5_intf_sel)); 2501 2502 mt7530_rmw(priv, MT7531_GPIO_MODE0, MT7531_GPIO0_MASK, 2503 MT7531_GPIO0_INTERRUPT); 2504 2505 /* Let phylink decide the interface later. */ 2506 priv->p5_interface = PHY_INTERFACE_MODE_NA; 2507 priv->p6_interface = PHY_INTERFACE_MODE_NA; 2508 2509 /* Enable PHY core PLL, since phy_device has not yet been created 2510 * provided for phy_[read,write]_mmd_indirect is called, we provide 2511 * our own mt7531_ind_mmd_phy_[read,write] to complete this 2512 * function. 2513 */ 2514 val = mt7531_ind_c45_phy_read(priv, MT753X_CTRL_PHY_ADDR, 2515 MDIO_MMD_VEND2, CORE_PLL_GROUP4); 2516 val |= MT7531_PHY_PLL_BYPASS_MODE; 2517 val &= ~MT7531_PHY_PLL_OFF; 2518 mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2, 2519 CORE_PLL_GROUP4, val); 2520 2521 mt7531_setup_common(ds); 2522 2523 /* Setup VLAN ID 0 for VLAN-unaware bridges */ 2524 ret = mt7530_setup_vlan0(priv); 2525 if (ret) 2526 return ret; 2527 2528 ds->assisted_learning_on_cpu_port = true; 2529 ds->mtu_enforcement_ingress = true; 2530 2531 return 0; 2532 } 2533 2534 static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port, 2535 struct phylink_config *config) 2536 { 2537 switch (port) { 2538 case 0 ... 4: /* Internal phy */ 2539 __set_bit(PHY_INTERFACE_MODE_GMII, 2540 config->supported_interfaces); 2541 break; 2542 2543 case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */ 2544 phy_interface_set_rgmii(config->supported_interfaces); 2545 __set_bit(PHY_INTERFACE_MODE_MII, 2546 config->supported_interfaces); 2547 __set_bit(PHY_INTERFACE_MODE_GMII, 2548 config->supported_interfaces); 2549 break; 2550 2551 case 6: /* 1st cpu port */ 2552 __set_bit(PHY_INTERFACE_MODE_RGMII, 2553 config->supported_interfaces); 2554 __set_bit(PHY_INTERFACE_MODE_TRGMII, 2555 config->supported_interfaces); 2556 break; 2557 } 2558 } 2559 2560 static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port) 2561 { 2562 return (port == 5) && (priv->p5_intf_sel != P5_INTF_SEL_GMAC5_SGMII); 2563 } 2564 2565 static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port, 2566 struct phylink_config *config) 2567 { 2568 struct mt7530_priv *priv = ds->priv; 2569 2570 switch (port) { 2571 case 0 ... 4: /* Internal phy */ 2572 __set_bit(PHY_INTERFACE_MODE_GMII, 2573 config->supported_interfaces); 2574 break; 2575 2576 case 5: /* 2nd cpu port supports either rgmii or sgmii/8023z */ 2577 if (mt7531_is_rgmii_port(priv, port)) { 2578 phy_interface_set_rgmii(config->supported_interfaces); 2579 break; 2580 } 2581 fallthrough; 2582 2583 case 6: /* 1st cpu port supports sgmii/8023z only */ 2584 __set_bit(PHY_INTERFACE_MODE_SGMII, 2585 config->supported_interfaces); 2586 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 2587 config->supported_interfaces); 2588 __set_bit(PHY_INTERFACE_MODE_2500BASEX, 2589 config->supported_interfaces); 2590 2591 config->mac_capabilities |= MAC_2500FD; 2592 break; 2593 } 2594 } 2595 2596 static void mt7988_mac_port_get_caps(struct dsa_switch *ds, int port, 2597 struct phylink_config *config) 2598 { 2599 phy_interface_zero(config->supported_interfaces); 2600 2601 switch (port) { 2602 case 0 ... 4: /* Internal phy */ 2603 __set_bit(PHY_INTERFACE_MODE_INTERNAL, 2604 config->supported_interfaces); 2605 break; 2606 2607 case 6: 2608 __set_bit(PHY_INTERFACE_MODE_INTERNAL, 2609 config->supported_interfaces); 2610 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | 2611 MAC_10000FD; 2612 } 2613 } 2614 2615 static int 2616 mt753x_pad_setup(struct dsa_switch *ds, const struct phylink_link_state *state) 2617 { 2618 struct mt7530_priv *priv = ds->priv; 2619 2620 return priv->info->pad_setup(ds, state->interface); 2621 } 2622 2623 static int 2624 mt7530_mac_config(struct dsa_switch *ds, int port, unsigned int mode, 2625 phy_interface_t interface) 2626 { 2627 struct mt7530_priv *priv = ds->priv; 2628 2629 /* Only need to setup port5. */ 2630 if (port != 5) 2631 return 0; 2632 2633 mt7530_setup_port5(priv->ds, interface); 2634 2635 return 0; 2636 } 2637 2638 static int mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port, 2639 phy_interface_t interface, 2640 struct phy_device *phydev) 2641 { 2642 u32 val; 2643 2644 if (!mt7531_is_rgmii_port(priv, port)) { 2645 dev_err(priv->dev, "RGMII mode is not available for port %d\n", 2646 port); 2647 return -EINVAL; 2648 } 2649 2650 val = mt7530_read(priv, MT7531_CLKGEN_CTRL); 2651 val |= GP_CLK_EN; 2652 val &= ~GP_MODE_MASK; 2653 val |= GP_MODE(MT7531_GP_MODE_RGMII); 2654 val &= ~CLK_SKEW_IN_MASK; 2655 val |= CLK_SKEW_IN(MT7531_CLK_SKEW_NO_CHG); 2656 val &= ~CLK_SKEW_OUT_MASK; 2657 val |= CLK_SKEW_OUT(MT7531_CLK_SKEW_NO_CHG); 2658 val |= TXCLK_NO_REVERSE | RXCLK_NO_DELAY; 2659 2660 /* Do not adjust rgmii delay when vendor phy driver presents. */ 2661 if (!phydev || phy_driver_is_genphy(phydev)) { 2662 val &= ~(TXCLK_NO_REVERSE | RXCLK_NO_DELAY); 2663 switch (interface) { 2664 case PHY_INTERFACE_MODE_RGMII: 2665 val |= TXCLK_NO_REVERSE; 2666 val |= RXCLK_NO_DELAY; 2667 break; 2668 case PHY_INTERFACE_MODE_RGMII_RXID: 2669 val |= TXCLK_NO_REVERSE; 2670 break; 2671 case PHY_INTERFACE_MODE_RGMII_TXID: 2672 val |= RXCLK_NO_DELAY; 2673 break; 2674 case PHY_INTERFACE_MODE_RGMII_ID: 2675 break; 2676 default: 2677 return -EINVAL; 2678 } 2679 } 2680 mt7530_write(priv, MT7531_CLKGEN_CTRL, val); 2681 2682 return 0; 2683 } 2684 2685 static bool mt753x_is_mac_port(u32 port) 2686 { 2687 return (port == 5 || port == 6); 2688 } 2689 2690 static int 2691 mt7988_mac_config(struct dsa_switch *ds, int port, unsigned int mode, 2692 phy_interface_t interface) 2693 { 2694 if (dsa_is_cpu_port(ds, port) && 2695 interface == PHY_INTERFACE_MODE_INTERNAL) 2696 return 0; 2697 2698 return -EINVAL; 2699 } 2700 2701 static int 2702 mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode, 2703 phy_interface_t interface) 2704 { 2705 struct mt7530_priv *priv = ds->priv; 2706 struct phy_device *phydev; 2707 struct dsa_port *dp; 2708 2709 if (!mt753x_is_mac_port(port)) { 2710 dev_err(priv->dev, "port %d is not a MAC port\n", port); 2711 return -EINVAL; 2712 } 2713 2714 switch (interface) { 2715 case PHY_INTERFACE_MODE_RGMII: 2716 case PHY_INTERFACE_MODE_RGMII_ID: 2717 case PHY_INTERFACE_MODE_RGMII_RXID: 2718 case PHY_INTERFACE_MODE_RGMII_TXID: 2719 dp = dsa_to_port(ds, port); 2720 phydev = dp->user->phydev; 2721 return mt7531_rgmii_setup(priv, port, interface, phydev); 2722 case PHY_INTERFACE_MODE_SGMII: 2723 case PHY_INTERFACE_MODE_NA: 2724 case PHY_INTERFACE_MODE_1000BASEX: 2725 case PHY_INTERFACE_MODE_2500BASEX: 2726 /* handled in SGMII PCS driver */ 2727 return 0; 2728 default: 2729 return -EINVAL; 2730 } 2731 2732 return -EINVAL; 2733 } 2734 2735 static int 2736 mt753x_mac_config(struct dsa_switch *ds, int port, unsigned int mode, 2737 const struct phylink_link_state *state) 2738 { 2739 struct mt7530_priv *priv = ds->priv; 2740 2741 return priv->info->mac_port_config(ds, port, mode, state->interface); 2742 } 2743 2744 static struct phylink_pcs * 2745 mt753x_phylink_mac_select_pcs(struct dsa_switch *ds, int port, 2746 phy_interface_t interface) 2747 { 2748 struct mt7530_priv *priv = ds->priv; 2749 2750 switch (interface) { 2751 case PHY_INTERFACE_MODE_TRGMII: 2752 return &priv->pcs[port].pcs; 2753 case PHY_INTERFACE_MODE_SGMII: 2754 case PHY_INTERFACE_MODE_1000BASEX: 2755 case PHY_INTERFACE_MODE_2500BASEX: 2756 return priv->ports[port].sgmii_pcs; 2757 default: 2758 return NULL; 2759 } 2760 } 2761 2762 static void 2763 mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, 2764 const struct phylink_link_state *state) 2765 { 2766 struct mt7530_priv *priv = ds->priv; 2767 u32 mcr_cur, mcr_new; 2768 2769 switch (port) { 2770 case 0 ... 4: /* Internal phy */ 2771 if (state->interface != PHY_INTERFACE_MODE_GMII && 2772 state->interface != PHY_INTERFACE_MODE_INTERNAL) 2773 goto unsupported; 2774 break; 2775 case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */ 2776 if (priv->p5_interface == state->interface) 2777 break; 2778 2779 if (mt753x_mac_config(ds, port, mode, state) < 0) 2780 goto unsupported; 2781 2782 if (priv->p5_intf_sel != P5_DISABLED) 2783 priv->p5_interface = state->interface; 2784 break; 2785 case 6: /* 1st cpu port */ 2786 if (priv->p6_interface == state->interface) 2787 break; 2788 2789 mt753x_pad_setup(ds, state); 2790 2791 if (mt753x_mac_config(ds, port, mode, state) < 0) 2792 goto unsupported; 2793 2794 priv->p6_interface = state->interface; 2795 break; 2796 default: 2797 unsupported: 2798 dev_err(ds->dev, "%s: unsupported %s port: %i\n", 2799 __func__, phy_modes(state->interface), port); 2800 return; 2801 } 2802 2803 mcr_cur = mt7530_read(priv, MT7530_PMCR_P(port)); 2804 mcr_new = mcr_cur; 2805 mcr_new &= ~PMCR_LINK_SETTINGS_MASK; 2806 mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN | 2807 PMCR_BACKPR_EN | PMCR_FORCE_MODE_ID(priv->id); 2808 2809 /* Are we connected to external phy */ 2810 if (port == 5 && dsa_is_user_port(ds, 5)) 2811 mcr_new |= PMCR_EXT_PHY; 2812 2813 if (mcr_new != mcr_cur) 2814 mt7530_write(priv, MT7530_PMCR_P(port), mcr_new); 2815 } 2816 2817 static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port, 2818 unsigned int mode, 2819 phy_interface_t interface) 2820 { 2821 struct mt7530_priv *priv = ds->priv; 2822 2823 mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK); 2824 } 2825 2826 static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port, 2827 unsigned int mode, 2828 phy_interface_t interface, 2829 struct phy_device *phydev, 2830 int speed, int duplex, 2831 bool tx_pause, bool rx_pause) 2832 { 2833 struct mt7530_priv *priv = ds->priv; 2834 u32 mcr; 2835 2836 mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK; 2837 2838 /* MT753x MAC works in 1G full duplex mode for all up-clocked 2839 * variants. 2840 */ 2841 if (interface == PHY_INTERFACE_MODE_INTERNAL || 2842 interface == PHY_INTERFACE_MODE_TRGMII || 2843 (phy_interface_mode_is_8023z(interface))) { 2844 speed = SPEED_1000; 2845 duplex = DUPLEX_FULL; 2846 } 2847 2848 switch (speed) { 2849 case SPEED_1000: 2850 mcr |= PMCR_FORCE_SPEED_1000; 2851 break; 2852 case SPEED_100: 2853 mcr |= PMCR_FORCE_SPEED_100; 2854 break; 2855 } 2856 if (duplex == DUPLEX_FULL) { 2857 mcr |= PMCR_FORCE_FDX; 2858 if (tx_pause) 2859 mcr |= PMCR_TX_FC_EN; 2860 if (rx_pause) 2861 mcr |= PMCR_RX_FC_EN; 2862 } 2863 2864 if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, false) >= 0) { 2865 switch (speed) { 2866 case SPEED_1000: 2867 mcr |= PMCR_FORCE_EEE1G; 2868 break; 2869 case SPEED_100: 2870 mcr |= PMCR_FORCE_EEE100; 2871 break; 2872 } 2873 } 2874 2875 mt7530_set(priv, MT7530_PMCR_P(port), mcr); 2876 } 2877 2878 static int 2879 mt7531_cpu_port_config(struct dsa_switch *ds, int port) 2880 { 2881 struct mt7530_priv *priv = ds->priv; 2882 phy_interface_t interface; 2883 int speed; 2884 int ret; 2885 2886 switch (port) { 2887 case 5: 2888 if (mt7531_is_rgmii_port(priv, port)) 2889 interface = PHY_INTERFACE_MODE_RGMII; 2890 else 2891 interface = PHY_INTERFACE_MODE_2500BASEX; 2892 2893 priv->p5_interface = interface; 2894 break; 2895 case 6: 2896 interface = PHY_INTERFACE_MODE_2500BASEX; 2897 2898 priv->p6_interface = interface; 2899 break; 2900 default: 2901 return -EINVAL; 2902 } 2903 2904 if (interface == PHY_INTERFACE_MODE_2500BASEX) 2905 speed = SPEED_2500; 2906 else 2907 speed = SPEED_1000; 2908 2909 ret = mt7531_mac_config(ds, port, MLO_AN_FIXED, interface); 2910 if (ret) 2911 return ret; 2912 mt7530_write(priv, MT7530_PMCR_P(port), 2913 PMCR_CPU_PORT_SETTING(priv->id)); 2914 mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED, interface, NULL, 2915 speed, DUPLEX_FULL, true, true); 2916 2917 return 0; 2918 } 2919 2920 static int 2921 mt7988_cpu_port_config(struct dsa_switch *ds, int port) 2922 { 2923 struct mt7530_priv *priv = ds->priv; 2924 2925 mt7530_write(priv, MT7530_PMCR_P(port), 2926 PMCR_CPU_PORT_SETTING(priv->id)); 2927 2928 mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED, 2929 PHY_INTERFACE_MODE_INTERNAL, NULL, 2930 SPEED_10000, DUPLEX_FULL, true, true); 2931 2932 return 0; 2933 } 2934 2935 static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port, 2936 struct phylink_config *config) 2937 { 2938 struct mt7530_priv *priv = ds->priv; 2939 2940 /* This switch only supports full-duplex at 1Gbps */ 2941 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | 2942 MAC_10 | MAC_100 | MAC_1000FD; 2943 2944 priv->info->mac_port_get_caps(ds, port, config); 2945 } 2946 2947 static int mt753x_pcs_validate(struct phylink_pcs *pcs, 2948 unsigned long *supported, 2949 const struct phylink_link_state *state) 2950 { 2951 /* Autonegotiation is not supported in TRGMII nor 802.3z modes */ 2952 if (state->interface == PHY_INTERFACE_MODE_TRGMII || 2953 phy_interface_mode_is_8023z(state->interface)) 2954 phylink_clear(supported, Autoneg); 2955 2956 return 0; 2957 } 2958 2959 static void mt7530_pcs_get_state(struct phylink_pcs *pcs, 2960 struct phylink_link_state *state) 2961 { 2962 struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv; 2963 int port = pcs_to_mt753x_pcs(pcs)->port; 2964 u32 pmsr; 2965 2966 pmsr = mt7530_read(priv, MT7530_PMSR_P(port)); 2967 2968 state->link = (pmsr & PMSR_LINK); 2969 state->an_complete = state->link; 2970 state->duplex = !!(pmsr & PMSR_DPX); 2971 2972 switch (pmsr & PMSR_SPEED_MASK) { 2973 case PMSR_SPEED_10: 2974 state->speed = SPEED_10; 2975 break; 2976 case PMSR_SPEED_100: 2977 state->speed = SPEED_100; 2978 break; 2979 case PMSR_SPEED_1000: 2980 state->speed = SPEED_1000; 2981 break; 2982 default: 2983 state->speed = SPEED_UNKNOWN; 2984 break; 2985 } 2986 2987 state->pause &= ~(MLO_PAUSE_RX | MLO_PAUSE_TX); 2988 if (pmsr & PMSR_RX_FC) 2989 state->pause |= MLO_PAUSE_RX; 2990 if (pmsr & PMSR_TX_FC) 2991 state->pause |= MLO_PAUSE_TX; 2992 } 2993 2994 static int mt753x_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, 2995 phy_interface_t interface, 2996 const unsigned long *advertising, 2997 bool permit_pause_to_mac) 2998 { 2999 return 0; 3000 } 3001 3002 static void mt7530_pcs_an_restart(struct phylink_pcs *pcs) 3003 { 3004 } 3005 3006 static const struct phylink_pcs_ops mt7530_pcs_ops = { 3007 .pcs_validate = mt753x_pcs_validate, 3008 .pcs_get_state = mt7530_pcs_get_state, 3009 .pcs_config = mt753x_pcs_config, 3010 .pcs_an_restart = mt7530_pcs_an_restart, 3011 }; 3012 3013 static int 3014 mt753x_setup(struct dsa_switch *ds) 3015 { 3016 struct mt7530_priv *priv = ds->priv; 3017 int i, ret; 3018 3019 /* Initialise the PCS devices */ 3020 for (i = 0; i < priv->ds->num_ports; i++) { 3021 priv->pcs[i].pcs.ops = priv->info->pcs_ops; 3022 priv->pcs[i].pcs.neg_mode = true; 3023 priv->pcs[i].priv = priv; 3024 priv->pcs[i].port = i; 3025 } 3026 3027 ret = priv->info->sw_setup(ds); 3028 if (ret) 3029 return ret; 3030 3031 ret = mt7530_setup_irq(priv); 3032 if (ret) 3033 return ret; 3034 3035 ret = mt7530_setup_mdio(priv); 3036 if (ret && priv->irq) 3037 mt7530_free_irq_common(priv); 3038 3039 if (priv->create_sgmii) { 3040 ret = priv->create_sgmii(priv, mt7531_dual_sgmii_supported(priv)); 3041 if (ret && priv->irq) 3042 mt7530_free_irq(priv); 3043 } 3044 3045 return ret; 3046 } 3047 3048 static int mt753x_get_mac_eee(struct dsa_switch *ds, int port, 3049 struct ethtool_eee *e) 3050 { 3051 struct mt7530_priv *priv = ds->priv; 3052 u32 eeecr = mt7530_read(priv, MT7530_PMEEECR_P(port)); 3053 3054 e->tx_lpi_enabled = !(eeecr & LPI_MODE_EN); 3055 e->tx_lpi_timer = GET_LPI_THRESH(eeecr); 3056 3057 return 0; 3058 } 3059 3060 static int mt753x_set_mac_eee(struct dsa_switch *ds, int port, 3061 struct ethtool_eee *e) 3062 { 3063 struct mt7530_priv *priv = ds->priv; 3064 u32 set, mask = LPI_THRESH_MASK | LPI_MODE_EN; 3065 3066 if (e->tx_lpi_timer > 0xFFF) 3067 return -EINVAL; 3068 3069 set = SET_LPI_THRESH(e->tx_lpi_timer); 3070 if (!e->tx_lpi_enabled) 3071 /* Force LPI Mode without a delay */ 3072 set |= LPI_MODE_EN; 3073 mt7530_rmw(priv, MT7530_PMEEECR_P(port), mask, set); 3074 3075 return 0; 3076 } 3077 3078 static int mt7988_pad_setup(struct dsa_switch *ds, phy_interface_t interface) 3079 { 3080 return 0; 3081 } 3082 3083 static int mt7988_setup(struct dsa_switch *ds) 3084 { 3085 struct mt7530_priv *priv = ds->priv; 3086 3087 /* Reset the switch */ 3088 reset_control_assert(priv->rstc); 3089 usleep_range(20, 50); 3090 reset_control_deassert(priv->rstc); 3091 usleep_range(20, 50); 3092 3093 /* Reset the switch PHYs */ 3094 mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_PHY_RST); 3095 3096 return mt7531_setup_common(ds); 3097 } 3098 3099 const struct dsa_switch_ops mt7530_switch_ops = { 3100 .get_tag_protocol = mtk_get_tag_protocol, 3101 .setup = mt753x_setup, 3102 .preferred_default_local_cpu_port = mt753x_preferred_default_local_cpu_port, 3103 .get_strings = mt7530_get_strings, 3104 .get_ethtool_stats = mt7530_get_ethtool_stats, 3105 .get_sset_count = mt7530_get_sset_count, 3106 .set_ageing_time = mt7530_set_ageing_time, 3107 .port_enable = mt7530_port_enable, 3108 .port_disable = mt7530_port_disable, 3109 .port_change_mtu = mt7530_port_change_mtu, 3110 .port_max_mtu = mt7530_port_max_mtu, 3111 .port_stp_state_set = mt7530_stp_state_set, 3112 .port_pre_bridge_flags = mt7530_port_pre_bridge_flags, 3113 .port_bridge_flags = mt7530_port_bridge_flags, 3114 .port_bridge_join = mt7530_port_bridge_join, 3115 .port_bridge_leave = mt7530_port_bridge_leave, 3116 .port_fdb_add = mt7530_port_fdb_add, 3117 .port_fdb_del = mt7530_port_fdb_del, 3118 .port_fdb_dump = mt7530_port_fdb_dump, 3119 .port_mdb_add = mt7530_port_mdb_add, 3120 .port_mdb_del = mt7530_port_mdb_del, 3121 .port_vlan_filtering = mt7530_port_vlan_filtering, 3122 .port_vlan_add = mt7530_port_vlan_add, 3123 .port_vlan_del = mt7530_port_vlan_del, 3124 .port_mirror_add = mt753x_port_mirror_add, 3125 .port_mirror_del = mt753x_port_mirror_del, 3126 .phylink_get_caps = mt753x_phylink_get_caps, 3127 .phylink_mac_select_pcs = mt753x_phylink_mac_select_pcs, 3128 .phylink_mac_config = mt753x_phylink_mac_config, 3129 .phylink_mac_link_down = mt753x_phylink_mac_link_down, 3130 .phylink_mac_link_up = mt753x_phylink_mac_link_up, 3131 .get_mac_eee = mt753x_get_mac_eee, 3132 .set_mac_eee = mt753x_set_mac_eee, 3133 }; 3134 EXPORT_SYMBOL_GPL(mt7530_switch_ops); 3135 3136 const struct mt753x_info mt753x_table[] = { 3137 [ID_MT7621] = { 3138 .id = ID_MT7621, 3139 .pcs_ops = &mt7530_pcs_ops, 3140 .sw_setup = mt7530_setup, 3141 .phy_read_c22 = mt7530_phy_read_c22, 3142 .phy_write_c22 = mt7530_phy_write_c22, 3143 .phy_read_c45 = mt7530_phy_read_c45, 3144 .phy_write_c45 = mt7530_phy_write_c45, 3145 .pad_setup = mt7530_pad_clk_setup, 3146 .mac_port_get_caps = mt7530_mac_port_get_caps, 3147 .mac_port_config = mt7530_mac_config, 3148 }, 3149 [ID_MT7530] = { 3150 .id = ID_MT7530, 3151 .pcs_ops = &mt7530_pcs_ops, 3152 .sw_setup = mt7530_setup, 3153 .phy_read_c22 = mt7530_phy_read_c22, 3154 .phy_write_c22 = mt7530_phy_write_c22, 3155 .phy_read_c45 = mt7530_phy_read_c45, 3156 .phy_write_c45 = mt7530_phy_write_c45, 3157 .pad_setup = mt7530_pad_clk_setup, 3158 .mac_port_get_caps = mt7530_mac_port_get_caps, 3159 .mac_port_config = mt7530_mac_config, 3160 }, 3161 [ID_MT7531] = { 3162 .id = ID_MT7531, 3163 .pcs_ops = &mt7530_pcs_ops, 3164 .sw_setup = mt7531_setup, 3165 .phy_read_c22 = mt7531_ind_c22_phy_read, 3166 .phy_write_c22 = mt7531_ind_c22_phy_write, 3167 .phy_read_c45 = mt7531_ind_c45_phy_read, 3168 .phy_write_c45 = mt7531_ind_c45_phy_write, 3169 .pad_setup = mt7531_pad_setup, 3170 .cpu_port_config = mt7531_cpu_port_config, 3171 .mac_port_get_caps = mt7531_mac_port_get_caps, 3172 .mac_port_config = mt7531_mac_config, 3173 }, 3174 [ID_MT7988] = { 3175 .id = ID_MT7988, 3176 .pcs_ops = &mt7530_pcs_ops, 3177 .sw_setup = mt7988_setup, 3178 .phy_read_c22 = mt7531_ind_c22_phy_read, 3179 .phy_write_c22 = mt7531_ind_c22_phy_write, 3180 .phy_read_c45 = mt7531_ind_c45_phy_read, 3181 .phy_write_c45 = mt7531_ind_c45_phy_write, 3182 .pad_setup = mt7988_pad_setup, 3183 .cpu_port_config = mt7988_cpu_port_config, 3184 .mac_port_get_caps = mt7988_mac_port_get_caps, 3185 .mac_port_config = mt7988_mac_config, 3186 }, 3187 }; 3188 EXPORT_SYMBOL_GPL(mt753x_table); 3189 3190 int 3191 mt7530_probe_common(struct mt7530_priv *priv) 3192 { 3193 struct device *dev = priv->dev; 3194 3195 priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL); 3196 if (!priv->ds) 3197 return -ENOMEM; 3198 3199 priv->ds->dev = dev; 3200 priv->ds->num_ports = MT7530_NUM_PORTS; 3201 3202 /* Get the hardware identifier from the devicetree node. 3203 * We will need it for some of the clock and regulator setup. 3204 */ 3205 priv->info = of_device_get_match_data(dev); 3206 if (!priv->info) 3207 return -EINVAL; 3208 3209 /* Sanity check if these required device operations are filled 3210 * properly. 3211 */ 3212 if (!priv->info->sw_setup || !priv->info->pad_setup || 3213 !priv->info->phy_read_c22 || !priv->info->phy_write_c22 || 3214 !priv->info->mac_port_get_caps || 3215 !priv->info->mac_port_config) 3216 return -EINVAL; 3217 3218 priv->id = priv->info->id; 3219 priv->dev = dev; 3220 priv->ds->priv = priv; 3221 priv->ds->ops = &mt7530_switch_ops; 3222 mutex_init(&priv->reg_mutex); 3223 dev_set_drvdata(dev, priv); 3224 3225 return 0; 3226 } 3227 EXPORT_SYMBOL_GPL(mt7530_probe_common); 3228 3229 void 3230 mt7530_remove_common(struct mt7530_priv *priv) 3231 { 3232 if (priv->irq) 3233 mt7530_free_irq(priv); 3234 3235 dsa_unregister_switch(priv->ds); 3236 3237 mutex_destroy(&priv->reg_mutex); 3238 } 3239 EXPORT_SYMBOL_GPL(mt7530_remove_common); 3240 3241 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); 3242 MODULE_DESCRIPTION("Driver for Mediatek MT7530 Switch"); 3243 MODULE_LICENSE("GPL"); 3244