1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Mediatek MT7530 DSA Switch driver 4 * Copyright (C) 2017 Sean Wang <sean.wang@mediatek.com> 5 */ 6 #include <linux/etherdevice.h> 7 #include <linux/if_bridge.h> 8 #include <linux/iopoll.h> 9 #include <linux/mdio.h> 10 #include <linux/mfd/syscon.h> 11 #include <linux/module.h> 12 #include <linux/netdevice.h> 13 #include <linux/of_irq.h> 14 #include <linux/of_mdio.h> 15 #include <linux/of_net.h> 16 #include <linux/of_platform.h> 17 #include <linux/phylink.h> 18 #include <linux/regmap.h> 19 #include <linux/regulator/consumer.h> 20 #include <linux/reset.h> 21 #include <linux/gpio/consumer.h> 22 #include <linux/gpio/driver.h> 23 #include <net/dsa.h> 24 25 #include "mt7530.h" 26 27 static struct mt753x_pcs *pcs_to_mt753x_pcs(struct phylink_pcs *pcs) 28 { 29 return container_of(pcs, struct mt753x_pcs, pcs); 30 } 31 32 /* String, offset, and register size in bytes if different from 4 bytes */ 33 static const struct mt7530_mib_desc mt7530_mib[] = { 34 MIB_DESC(1, 0x00, "TxDrop"), 35 MIB_DESC(1, 0x04, "TxCrcErr"), 36 MIB_DESC(1, 0x08, "TxUnicast"), 37 MIB_DESC(1, 0x0c, "TxMulticast"), 38 MIB_DESC(1, 0x10, "TxBroadcast"), 39 MIB_DESC(1, 0x14, "TxCollision"), 40 MIB_DESC(1, 0x18, "TxSingleCollision"), 41 MIB_DESC(1, 0x1c, "TxMultipleCollision"), 42 MIB_DESC(1, 0x20, "TxDeferred"), 43 MIB_DESC(1, 0x24, "TxLateCollision"), 44 MIB_DESC(1, 0x28, "TxExcessiveCollistion"), 45 MIB_DESC(1, 0x2c, "TxPause"), 46 MIB_DESC(1, 0x30, "TxPktSz64"), 47 MIB_DESC(1, 0x34, "TxPktSz65To127"), 48 MIB_DESC(1, 0x38, "TxPktSz128To255"), 49 MIB_DESC(1, 0x3c, "TxPktSz256To511"), 50 MIB_DESC(1, 0x40, "TxPktSz512To1023"), 51 MIB_DESC(1, 0x44, "Tx1024ToMax"), 52 MIB_DESC(2, 0x48, "TxBytes"), 53 MIB_DESC(1, 0x60, "RxDrop"), 54 MIB_DESC(1, 0x64, "RxFiltering"), 55 MIB_DESC(1, 0x68, "RxUnicast"), 56 MIB_DESC(1, 0x6c, "RxMulticast"), 57 MIB_DESC(1, 0x70, "RxBroadcast"), 58 MIB_DESC(1, 0x74, "RxAlignErr"), 59 MIB_DESC(1, 0x78, "RxCrcErr"), 60 MIB_DESC(1, 0x7c, "RxUnderSizeErr"), 61 MIB_DESC(1, 0x80, "RxFragErr"), 62 MIB_DESC(1, 0x84, "RxOverSzErr"), 63 MIB_DESC(1, 0x88, "RxJabberErr"), 64 MIB_DESC(1, 0x8c, "RxPause"), 65 MIB_DESC(1, 0x90, "RxPktSz64"), 66 MIB_DESC(1, 0x94, "RxPktSz65To127"), 67 MIB_DESC(1, 0x98, "RxPktSz128To255"), 68 MIB_DESC(1, 0x9c, "RxPktSz256To511"), 69 MIB_DESC(1, 0xa0, "RxPktSz512To1023"), 70 MIB_DESC(1, 0xa4, "RxPktSz1024ToMax"), 71 MIB_DESC(2, 0xa8, "RxBytes"), 72 MIB_DESC(1, 0xb0, "RxCtrlDrop"), 73 MIB_DESC(1, 0xb4, "RxIngressDrop"), 74 MIB_DESC(1, 0xb8, "RxArlDrop"), 75 }; 76 77 /* Since phy_device has not yet been created and 78 * phy_{read,write}_mmd_indirect is not available, we provide our own 79 * core_{read,write}_mmd_indirect with core_{clear,write,set} wrappers 80 * to complete this function. 81 */ 82 static int 83 core_read_mmd_indirect(struct mt7530_priv *priv, int prtad, int devad) 84 { 85 struct mii_bus *bus = priv->bus; 86 int value, ret; 87 88 /* Write the desired MMD Devad */ 89 ret = bus->write(bus, 0, MII_MMD_CTRL, devad); 90 if (ret < 0) 91 goto err; 92 93 /* Write the desired MMD register address */ 94 ret = bus->write(bus, 0, MII_MMD_DATA, prtad); 95 if (ret < 0) 96 goto err; 97 98 /* Select the Function : DATA with no post increment */ 99 ret = bus->write(bus, 0, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR)); 100 if (ret < 0) 101 goto err; 102 103 /* Read the content of the MMD's selected register */ 104 value = bus->read(bus, 0, MII_MMD_DATA); 105 106 return value; 107 err: 108 dev_err(&bus->dev, "failed to read mmd register\n"); 109 110 return ret; 111 } 112 113 static int 114 core_write_mmd_indirect(struct mt7530_priv *priv, int prtad, 115 int devad, u32 data) 116 { 117 struct mii_bus *bus = priv->bus; 118 int ret; 119 120 /* Write the desired MMD Devad */ 121 ret = bus->write(bus, 0, MII_MMD_CTRL, devad); 122 if (ret < 0) 123 goto err; 124 125 /* Write the desired MMD register address */ 126 ret = bus->write(bus, 0, MII_MMD_DATA, prtad); 127 if (ret < 0) 128 goto err; 129 130 /* Select the Function : DATA with no post increment */ 131 ret = bus->write(bus, 0, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR)); 132 if (ret < 0) 133 goto err; 134 135 /* Write the data into MMD's selected register */ 136 ret = bus->write(bus, 0, MII_MMD_DATA, data); 137 err: 138 if (ret < 0) 139 dev_err(&bus->dev, 140 "failed to write mmd register\n"); 141 return ret; 142 } 143 144 static void 145 mt7530_mutex_lock(struct mt7530_priv *priv) 146 { 147 if (priv->bus) 148 mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED); 149 } 150 151 static void 152 mt7530_mutex_unlock(struct mt7530_priv *priv) 153 { 154 if (priv->bus) 155 mutex_unlock(&priv->bus->mdio_lock); 156 } 157 158 static void 159 core_write(struct mt7530_priv *priv, u32 reg, u32 val) 160 { 161 mt7530_mutex_lock(priv); 162 163 core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val); 164 165 mt7530_mutex_unlock(priv); 166 } 167 168 static void 169 core_rmw(struct mt7530_priv *priv, u32 reg, u32 mask, u32 set) 170 { 171 u32 val; 172 173 mt7530_mutex_lock(priv); 174 175 val = core_read_mmd_indirect(priv, reg, MDIO_MMD_VEND2); 176 val &= ~mask; 177 val |= set; 178 core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val); 179 180 mt7530_mutex_unlock(priv); 181 } 182 183 static void 184 core_set(struct mt7530_priv *priv, u32 reg, u32 val) 185 { 186 core_rmw(priv, reg, 0, val); 187 } 188 189 static void 190 core_clear(struct mt7530_priv *priv, u32 reg, u32 val) 191 { 192 core_rmw(priv, reg, val, 0); 193 } 194 195 static int 196 mt7530_mii_write(struct mt7530_priv *priv, u32 reg, u32 val) 197 { 198 int ret; 199 200 ret = regmap_write(priv->regmap, reg, val); 201 202 if (ret < 0) 203 dev_err(priv->dev, 204 "failed to write mt7530 register\n"); 205 206 return ret; 207 } 208 209 static u32 210 mt7530_mii_read(struct mt7530_priv *priv, u32 reg) 211 { 212 int ret; 213 u32 val; 214 215 ret = regmap_read(priv->regmap, reg, &val); 216 if (ret) { 217 WARN_ON_ONCE(1); 218 dev_err(priv->dev, 219 "failed to read mt7530 register\n"); 220 return 0; 221 } 222 223 return val; 224 } 225 226 static void 227 mt7530_write(struct mt7530_priv *priv, u32 reg, u32 val) 228 { 229 mt7530_mutex_lock(priv); 230 231 mt7530_mii_write(priv, reg, val); 232 233 mt7530_mutex_unlock(priv); 234 } 235 236 static u32 237 _mt7530_unlocked_read(struct mt7530_dummy_poll *p) 238 { 239 return mt7530_mii_read(p->priv, p->reg); 240 } 241 242 static u32 243 _mt7530_read(struct mt7530_dummy_poll *p) 244 { 245 u32 val; 246 247 mt7530_mutex_lock(p->priv); 248 249 val = mt7530_mii_read(p->priv, p->reg); 250 251 mt7530_mutex_unlock(p->priv); 252 253 return val; 254 } 255 256 static u32 257 mt7530_read(struct mt7530_priv *priv, u32 reg) 258 { 259 struct mt7530_dummy_poll p; 260 261 INIT_MT7530_DUMMY_POLL(&p, priv, reg); 262 return _mt7530_read(&p); 263 } 264 265 static void 266 mt7530_rmw(struct mt7530_priv *priv, u32 reg, 267 u32 mask, u32 set) 268 { 269 mt7530_mutex_lock(priv); 270 271 regmap_update_bits(priv->regmap, reg, mask, set); 272 273 mt7530_mutex_unlock(priv); 274 } 275 276 static void 277 mt7530_set(struct mt7530_priv *priv, u32 reg, u32 val) 278 { 279 mt7530_rmw(priv, reg, val, val); 280 } 281 282 static void 283 mt7530_clear(struct mt7530_priv *priv, u32 reg, u32 val) 284 { 285 mt7530_rmw(priv, reg, val, 0); 286 } 287 288 static int 289 mt7530_fdb_cmd(struct mt7530_priv *priv, enum mt7530_fdb_cmd cmd, u32 *rsp) 290 { 291 u32 val; 292 int ret; 293 struct mt7530_dummy_poll p; 294 295 /* Set the command operating upon the MAC address entries */ 296 val = ATC_BUSY | ATC_MAT(0) | cmd; 297 mt7530_write(priv, MT7530_ATC, val); 298 299 INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_ATC); 300 ret = readx_poll_timeout(_mt7530_read, &p, val, 301 !(val & ATC_BUSY), 20, 20000); 302 if (ret < 0) { 303 dev_err(priv->dev, "reset timeout\n"); 304 return ret; 305 } 306 307 /* Additional sanity for read command if the specified 308 * entry is invalid 309 */ 310 val = mt7530_read(priv, MT7530_ATC); 311 if ((cmd == MT7530_FDB_READ) && (val & ATC_INVALID)) 312 return -EINVAL; 313 314 if (rsp) 315 *rsp = val; 316 317 return 0; 318 } 319 320 static void 321 mt7530_fdb_read(struct mt7530_priv *priv, struct mt7530_fdb *fdb) 322 { 323 u32 reg[3]; 324 int i; 325 326 /* Read from ARL table into an array */ 327 for (i = 0; i < 3; i++) { 328 reg[i] = mt7530_read(priv, MT7530_TSRA1 + (i * 4)); 329 330 dev_dbg(priv->dev, "%s(%d) reg[%d]=0x%x\n", 331 __func__, __LINE__, i, reg[i]); 332 } 333 334 fdb->vid = (reg[1] >> CVID) & CVID_MASK; 335 fdb->aging = (reg[2] >> AGE_TIMER) & AGE_TIMER_MASK; 336 fdb->port_mask = (reg[2] >> PORT_MAP) & PORT_MAP_MASK; 337 fdb->mac[0] = (reg[0] >> MAC_BYTE_0) & MAC_BYTE_MASK; 338 fdb->mac[1] = (reg[0] >> MAC_BYTE_1) & MAC_BYTE_MASK; 339 fdb->mac[2] = (reg[0] >> MAC_BYTE_2) & MAC_BYTE_MASK; 340 fdb->mac[3] = (reg[0] >> MAC_BYTE_3) & MAC_BYTE_MASK; 341 fdb->mac[4] = (reg[1] >> MAC_BYTE_4) & MAC_BYTE_MASK; 342 fdb->mac[5] = (reg[1] >> MAC_BYTE_5) & MAC_BYTE_MASK; 343 fdb->noarp = ((reg[2] >> ENT_STATUS) & ENT_STATUS_MASK) == STATIC_ENT; 344 } 345 346 static void 347 mt7530_fdb_write(struct mt7530_priv *priv, u16 vid, 348 u8 port_mask, const u8 *mac, 349 u8 aging, u8 type) 350 { 351 u32 reg[3] = { 0 }; 352 int i; 353 354 reg[1] |= vid & CVID_MASK; 355 reg[1] |= ATA2_IVL; 356 reg[1] |= ATA2_FID(FID_BRIDGED); 357 reg[2] |= (aging & AGE_TIMER_MASK) << AGE_TIMER; 358 reg[2] |= (port_mask & PORT_MAP_MASK) << PORT_MAP; 359 /* STATIC_ENT indicate that entry is static wouldn't 360 * be aged out and STATIC_EMP specified as erasing an 361 * entry 362 */ 363 reg[2] |= (type & ENT_STATUS_MASK) << ENT_STATUS; 364 reg[1] |= mac[5] << MAC_BYTE_5; 365 reg[1] |= mac[4] << MAC_BYTE_4; 366 reg[0] |= mac[3] << MAC_BYTE_3; 367 reg[0] |= mac[2] << MAC_BYTE_2; 368 reg[0] |= mac[1] << MAC_BYTE_1; 369 reg[0] |= mac[0] << MAC_BYTE_0; 370 371 /* Write array into the ARL table */ 372 for (i = 0; i < 3; i++) 373 mt7530_write(priv, MT7530_ATA1 + (i * 4), reg[i]); 374 } 375 376 /* Set up switch core clock for MT7530 */ 377 static void mt7530_pll_setup(struct mt7530_priv *priv) 378 { 379 /* Disable core clock */ 380 core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); 381 382 /* Disable PLL */ 383 core_write(priv, CORE_GSWPLL_GRP1, 0); 384 385 /* Set core clock into 500Mhz */ 386 core_write(priv, CORE_GSWPLL_GRP2, 387 RG_GSWPLL_POSDIV_500M(1) | 388 RG_GSWPLL_FBKDIV_500M(25)); 389 390 /* Enable PLL */ 391 core_write(priv, CORE_GSWPLL_GRP1, 392 RG_GSWPLL_EN_PRE | 393 RG_GSWPLL_POSDIV_200M(2) | 394 RG_GSWPLL_FBKDIV_200M(32)); 395 396 udelay(20); 397 398 /* Enable core clock */ 399 core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); 400 } 401 402 /* If port 6 is available as a CPU port, always prefer that as the default, 403 * otherwise don't care. 404 */ 405 static struct dsa_port * 406 mt753x_preferred_default_local_cpu_port(struct dsa_switch *ds) 407 { 408 struct dsa_port *cpu_dp = dsa_to_port(ds, 6); 409 410 if (dsa_port_is_cpu(cpu_dp)) 411 return cpu_dp; 412 413 return NULL; 414 } 415 416 /* Setup port 6 interface mode and TRGMII TX circuit */ 417 static void 418 mt7530_setup_port6(struct dsa_switch *ds, phy_interface_t interface) 419 { 420 struct mt7530_priv *priv = ds->priv; 421 u32 ncpo1, ssc_delta, xtal; 422 423 /* Disable the MT7530 TRGMII clocks */ 424 core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN); 425 426 if (interface == PHY_INTERFACE_MODE_RGMII) { 427 mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK, 428 P6_INTF_MODE(0)); 429 return; 430 } 431 432 mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK, P6_INTF_MODE(1)); 433 434 xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK; 435 436 if (xtal == HWTRAP_XTAL_25MHZ) 437 ssc_delta = 0x57; 438 else 439 ssc_delta = 0x87; 440 441 if (priv->id == ID_MT7621) { 442 /* PLL frequency: 125MHz: 1.0GBit */ 443 if (xtal == HWTRAP_XTAL_40MHZ) 444 ncpo1 = 0x0640; 445 if (xtal == HWTRAP_XTAL_25MHZ) 446 ncpo1 = 0x0a00; 447 } else { /* PLL frequency: 250MHz: 2.0Gbit */ 448 if (xtal == HWTRAP_XTAL_40MHZ) 449 ncpo1 = 0x0c80; 450 if (xtal == HWTRAP_XTAL_25MHZ) 451 ncpo1 = 0x1400; 452 } 453 454 /* Setup the MT7530 TRGMII Tx Clock */ 455 core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1)); 456 core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0)); 457 core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta)); 458 core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta)); 459 core_write(priv, CORE_PLL_GROUP4, RG_SYSPLL_DDSFBK_EN | 460 RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN); 461 core_write(priv, CORE_PLL_GROUP2, RG_SYSPLL_EN_NORMAL | 462 RG_SYSPLL_VODEN | RG_SYSPLL_POSDIV(1)); 463 core_write(priv, CORE_PLL_GROUP7, RG_LCDDS_PCW_NCPO_CHG | 464 RG_LCCDS_C(3) | RG_LCDDS_PWDB | RG_LCDDS_ISO_EN); 465 466 /* Enable the MT7530 TRGMII clocks */ 467 core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN); 468 } 469 470 static void 471 mt7531_pll_setup(struct mt7530_priv *priv) 472 { 473 u32 top_sig; 474 u32 hwstrap; 475 u32 xtal; 476 u32 val; 477 478 val = mt7530_read(priv, MT7531_CREV); 479 top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR); 480 hwstrap = mt7530_read(priv, MT7531_HWTRAP); 481 if ((val & CHIP_REV_M) > 0) 482 xtal = (top_sig & PAD_MCM_SMI_EN) ? HWTRAP_XTAL_FSEL_40MHZ : 483 HWTRAP_XTAL_FSEL_25MHZ; 484 else 485 xtal = hwstrap & HWTRAP_XTAL_FSEL_MASK; 486 487 /* Step 1 : Disable MT7531 COREPLL */ 488 val = mt7530_read(priv, MT7531_PLLGP_EN); 489 val &= ~EN_COREPLL; 490 mt7530_write(priv, MT7531_PLLGP_EN, val); 491 492 /* Step 2: switch to XTAL output */ 493 val = mt7530_read(priv, MT7531_PLLGP_EN); 494 val |= SW_CLKSW; 495 mt7530_write(priv, MT7531_PLLGP_EN, val); 496 497 val = mt7530_read(priv, MT7531_PLLGP_CR0); 498 val &= ~RG_COREPLL_EN; 499 mt7530_write(priv, MT7531_PLLGP_CR0, val); 500 501 /* Step 3: disable PLLGP and enable program PLLGP */ 502 val = mt7530_read(priv, MT7531_PLLGP_EN); 503 val |= SW_PLLGP; 504 mt7530_write(priv, MT7531_PLLGP_EN, val); 505 506 /* Step 4: program COREPLL output frequency to 500MHz */ 507 val = mt7530_read(priv, MT7531_PLLGP_CR0); 508 val &= ~RG_COREPLL_POSDIV_M; 509 val |= 2 << RG_COREPLL_POSDIV_S; 510 mt7530_write(priv, MT7531_PLLGP_CR0, val); 511 usleep_range(25, 35); 512 513 switch (xtal) { 514 case HWTRAP_XTAL_FSEL_25MHZ: 515 val = mt7530_read(priv, MT7531_PLLGP_CR0); 516 val &= ~RG_COREPLL_SDM_PCW_M; 517 val |= 0x140000 << RG_COREPLL_SDM_PCW_S; 518 mt7530_write(priv, MT7531_PLLGP_CR0, val); 519 break; 520 case HWTRAP_XTAL_FSEL_40MHZ: 521 val = mt7530_read(priv, MT7531_PLLGP_CR0); 522 val &= ~RG_COREPLL_SDM_PCW_M; 523 val |= 0x190000 << RG_COREPLL_SDM_PCW_S; 524 mt7530_write(priv, MT7531_PLLGP_CR0, val); 525 break; 526 } 527 528 /* Set feedback divide ratio update signal to high */ 529 val = mt7530_read(priv, MT7531_PLLGP_CR0); 530 val |= RG_COREPLL_SDM_PCW_CHG; 531 mt7530_write(priv, MT7531_PLLGP_CR0, val); 532 /* Wait for at least 16 XTAL clocks */ 533 usleep_range(10, 20); 534 535 /* Step 5: set feedback divide ratio update signal to low */ 536 val = mt7530_read(priv, MT7531_PLLGP_CR0); 537 val &= ~RG_COREPLL_SDM_PCW_CHG; 538 mt7530_write(priv, MT7531_PLLGP_CR0, val); 539 540 /* Enable 325M clock for SGMII */ 541 mt7530_write(priv, MT7531_ANA_PLLGP_CR5, 0xad0000); 542 543 /* Enable 250SSC clock for RGMII */ 544 mt7530_write(priv, MT7531_ANA_PLLGP_CR2, 0x4f40000); 545 546 /* Step 6: Enable MT7531 PLL */ 547 val = mt7530_read(priv, MT7531_PLLGP_CR0); 548 val |= RG_COREPLL_EN; 549 mt7530_write(priv, MT7531_PLLGP_CR0, val); 550 551 val = mt7530_read(priv, MT7531_PLLGP_EN); 552 val |= EN_COREPLL; 553 mt7530_write(priv, MT7531_PLLGP_EN, val); 554 usleep_range(25, 35); 555 } 556 557 static void 558 mt7530_mib_reset(struct dsa_switch *ds) 559 { 560 struct mt7530_priv *priv = ds->priv; 561 562 mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_FLUSH); 563 mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_ACTIVATE); 564 } 565 566 static int mt7530_phy_read_c22(struct mt7530_priv *priv, int port, int regnum) 567 { 568 return mdiobus_read_nested(priv->bus, port, regnum); 569 } 570 571 static int mt7530_phy_write_c22(struct mt7530_priv *priv, int port, int regnum, 572 u16 val) 573 { 574 return mdiobus_write_nested(priv->bus, port, regnum, val); 575 } 576 577 static int mt7530_phy_read_c45(struct mt7530_priv *priv, int port, 578 int devad, int regnum) 579 { 580 return mdiobus_c45_read_nested(priv->bus, port, devad, regnum); 581 } 582 583 static int mt7530_phy_write_c45(struct mt7530_priv *priv, int port, int devad, 584 int regnum, u16 val) 585 { 586 return mdiobus_c45_write_nested(priv->bus, port, devad, regnum, val); 587 } 588 589 static int 590 mt7531_ind_c45_phy_read(struct mt7530_priv *priv, int port, int devad, 591 int regnum) 592 { 593 struct mt7530_dummy_poll p; 594 u32 reg, val; 595 int ret; 596 597 INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC); 598 599 mt7530_mutex_lock(priv); 600 601 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 602 !(val & MT7531_PHY_ACS_ST), 20, 100000); 603 if (ret < 0) { 604 dev_err(priv->dev, "poll timeout\n"); 605 goto out; 606 } 607 608 reg = MT7531_MDIO_CL45_ADDR | MT7531_MDIO_PHY_ADDR(port) | 609 MT7531_MDIO_DEV_ADDR(devad) | regnum; 610 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST); 611 612 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 613 !(val & MT7531_PHY_ACS_ST), 20, 100000); 614 if (ret < 0) { 615 dev_err(priv->dev, "poll timeout\n"); 616 goto out; 617 } 618 619 reg = MT7531_MDIO_CL45_READ | MT7531_MDIO_PHY_ADDR(port) | 620 MT7531_MDIO_DEV_ADDR(devad); 621 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST); 622 623 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 624 !(val & MT7531_PHY_ACS_ST), 20, 100000); 625 if (ret < 0) { 626 dev_err(priv->dev, "poll timeout\n"); 627 goto out; 628 } 629 630 ret = val & MT7531_MDIO_RW_DATA_MASK; 631 out: 632 mt7530_mutex_unlock(priv); 633 634 return ret; 635 } 636 637 static int 638 mt7531_ind_c45_phy_write(struct mt7530_priv *priv, int port, int devad, 639 int regnum, u16 data) 640 { 641 struct mt7530_dummy_poll p; 642 u32 val, reg; 643 int ret; 644 645 INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC); 646 647 mt7530_mutex_lock(priv); 648 649 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 650 !(val & MT7531_PHY_ACS_ST), 20, 100000); 651 if (ret < 0) { 652 dev_err(priv->dev, "poll timeout\n"); 653 goto out; 654 } 655 656 reg = MT7531_MDIO_CL45_ADDR | MT7531_MDIO_PHY_ADDR(port) | 657 MT7531_MDIO_DEV_ADDR(devad) | regnum; 658 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST); 659 660 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 661 !(val & MT7531_PHY_ACS_ST), 20, 100000); 662 if (ret < 0) { 663 dev_err(priv->dev, "poll timeout\n"); 664 goto out; 665 } 666 667 reg = MT7531_MDIO_CL45_WRITE | MT7531_MDIO_PHY_ADDR(port) | 668 MT7531_MDIO_DEV_ADDR(devad) | data; 669 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST); 670 671 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 672 !(val & MT7531_PHY_ACS_ST), 20, 100000); 673 if (ret < 0) { 674 dev_err(priv->dev, "poll timeout\n"); 675 goto out; 676 } 677 678 out: 679 mt7530_mutex_unlock(priv); 680 681 return ret; 682 } 683 684 static int 685 mt7531_ind_c22_phy_read(struct mt7530_priv *priv, int port, int regnum) 686 { 687 struct mt7530_dummy_poll p; 688 int ret; 689 u32 val; 690 691 INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC); 692 693 mt7530_mutex_lock(priv); 694 695 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 696 !(val & MT7531_PHY_ACS_ST), 20, 100000); 697 if (ret < 0) { 698 dev_err(priv->dev, "poll timeout\n"); 699 goto out; 700 } 701 702 val = MT7531_MDIO_CL22_READ | MT7531_MDIO_PHY_ADDR(port) | 703 MT7531_MDIO_REG_ADDR(regnum); 704 705 mt7530_mii_write(priv, MT7531_PHY_IAC, val | MT7531_PHY_ACS_ST); 706 707 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val, 708 !(val & MT7531_PHY_ACS_ST), 20, 100000); 709 if (ret < 0) { 710 dev_err(priv->dev, "poll timeout\n"); 711 goto out; 712 } 713 714 ret = val & MT7531_MDIO_RW_DATA_MASK; 715 out: 716 mt7530_mutex_unlock(priv); 717 718 return ret; 719 } 720 721 static int 722 mt7531_ind_c22_phy_write(struct mt7530_priv *priv, int port, int regnum, 723 u16 data) 724 { 725 struct mt7530_dummy_poll p; 726 int ret; 727 u32 reg; 728 729 INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC); 730 731 mt7530_mutex_lock(priv); 732 733 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, reg, 734 !(reg & MT7531_PHY_ACS_ST), 20, 100000); 735 if (ret < 0) { 736 dev_err(priv->dev, "poll timeout\n"); 737 goto out; 738 } 739 740 reg = MT7531_MDIO_CL22_WRITE | MT7531_MDIO_PHY_ADDR(port) | 741 MT7531_MDIO_REG_ADDR(regnum) | data; 742 743 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST); 744 745 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, reg, 746 !(reg & MT7531_PHY_ACS_ST), 20, 100000); 747 if (ret < 0) { 748 dev_err(priv->dev, "poll timeout\n"); 749 goto out; 750 } 751 752 out: 753 mt7530_mutex_unlock(priv); 754 755 return ret; 756 } 757 758 static int 759 mt753x_phy_read_c22(struct mii_bus *bus, int port, int regnum) 760 { 761 struct mt7530_priv *priv = bus->priv; 762 763 return priv->info->phy_read_c22(priv, port, regnum); 764 } 765 766 static int 767 mt753x_phy_read_c45(struct mii_bus *bus, int port, int devad, int regnum) 768 { 769 struct mt7530_priv *priv = bus->priv; 770 771 return priv->info->phy_read_c45(priv, port, devad, regnum); 772 } 773 774 static int 775 mt753x_phy_write_c22(struct mii_bus *bus, int port, int regnum, u16 val) 776 { 777 struct mt7530_priv *priv = bus->priv; 778 779 return priv->info->phy_write_c22(priv, port, regnum, val); 780 } 781 782 static int 783 mt753x_phy_write_c45(struct mii_bus *bus, int port, int devad, int regnum, 784 u16 val) 785 { 786 struct mt7530_priv *priv = bus->priv; 787 788 return priv->info->phy_write_c45(priv, port, devad, regnum, val); 789 } 790 791 static void 792 mt7530_get_strings(struct dsa_switch *ds, int port, u32 stringset, 793 uint8_t *data) 794 { 795 int i; 796 797 if (stringset != ETH_SS_STATS) 798 return; 799 800 for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++) 801 ethtool_puts(&data, mt7530_mib[i].name); 802 } 803 804 static void 805 mt7530_get_ethtool_stats(struct dsa_switch *ds, int port, 806 uint64_t *data) 807 { 808 struct mt7530_priv *priv = ds->priv; 809 const struct mt7530_mib_desc *mib; 810 u32 reg, i; 811 u64 hi; 812 813 for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++) { 814 mib = &mt7530_mib[i]; 815 reg = MT7530_PORT_MIB_COUNTER(port) + mib->offset; 816 817 data[i] = mt7530_read(priv, reg); 818 if (mib->size == 2) { 819 hi = mt7530_read(priv, reg + 4); 820 data[i] |= hi << 32; 821 } 822 } 823 } 824 825 static int 826 mt7530_get_sset_count(struct dsa_switch *ds, int port, int sset) 827 { 828 if (sset != ETH_SS_STATS) 829 return 0; 830 831 return ARRAY_SIZE(mt7530_mib); 832 } 833 834 static int 835 mt7530_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) 836 { 837 struct mt7530_priv *priv = ds->priv; 838 unsigned int secs = msecs / 1000; 839 unsigned int tmp_age_count; 840 unsigned int error = -1; 841 unsigned int age_count; 842 unsigned int age_unit; 843 844 /* Applied timer is (AGE_CNT + 1) * (AGE_UNIT + 1) seconds */ 845 if (secs < 1 || secs > (AGE_CNT_MAX + 1) * (AGE_UNIT_MAX + 1)) 846 return -ERANGE; 847 848 /* iterate through all possible age_count to find the closest pair */ 849 for (tmp_age_count = 0; tmp_age_count <= AGE_CNT_MAX; ++tmp_age_count) { 850 unsigned int tmp_age_unit = secs / (tmp_age_count + 1) - 1; 851 852 if (tmp_age_unit <= AGE_UNIT_MAX) { 853 unsigned int tmp_error = secs - 854 (tmp_age_count + 1) * (tmp_age_unit + 1); 855 856 /* found a closer pair */ 857 if (error > tmp_error) { 858 error = tmp_error; 859 age_count = tmp_age_count; 860 age_unit = tmp_age_unit; 861 } 862 863 /* found the exact match, so break the loop */ 864 if (!error) 865 break; 866 } 867 } 868 869 mt7530_write(priv, MT7530_AAC, AGE_CNT(age_count) | AGE_UNIT(age_unit)); 870 871 return 0; 872 } 873 874 static const char *p5_intf_modes(unsigned int p5_interface) 875 { 876 switch (p5_interface) { 877 case P5_DISABLED: 878 return "DISABLED"; 879 case P5_INTF_SEL_PHY_P0: 880 return "PHY P0"; 881 case P5_INTF_SEL_PHY_P4: 882 return "PHY P4"; 883 case P5_INTF_SEL_GMAC5: 884 return "GMAC5"; 885 default: 886 return "unknown"; 887 } 888 } 889 890 static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface) 891 { 892 struct mt7530_priv *priv = ds->priv; 893 u8 tx_delay = 0; 894 int val; 895 896 mutex_lock(&priv->reg_mutex); 897 898 val = mt7530_read(priv, MT7530_MHWTRAP); 899 900 val |= MHWTRAP_MANUAL | MHWTRAP_P5_MAC_SEL | MHWTRAP_P5_DIS; 901 val &= ~MHWTRAP_P5_RGMII_MODE & ~MHWTRAP_PHY0_SEL; 902 903 switch (priv->p5_intf_sel) { 904 case P5_INTF_SEL_PHY_P0: 905 /* MT7530_P5_MODE_GPHY_P0: 2nd GMAC -> P5 -> P0 */ 906 val |= MHWTRAP_PHY0_SEL; 907 fallthrough; 908 case P5_INTF_SEL_PHY_P4: 909 /* MT7530_P5_MODE_GPHY_P4: 2nd GMAC -> P5 -> P4 */ 910 val &= ~MHWTRAP_P5_MAC_SEL & ~MHWTRAP_P5_DIS; 911 912 /* Setup the MAC by default for the cpu port */ 913 mt7530_write(priv, MT7530_PMCR_P(5), 0x56300); 914 break; 915 case P5_INTF_SEL_GMAC5: 916 /* MT7530_P5_MODE_GMAC: P5 -> External phy or 2nd GMAC */ 917 val &= ~MHWTRAP_P5_DIS; 918 break; 919 default: 920 break; 921 } 922 923 /* Setup RGMII settings */ 924 if (phy_interface_mode_is_rgmii(interface)) { 925 val |= MHWTRAP_P5_RGMII_MODE; 926 927 /* P5 RGMII RX Clock Control: delay setting for 1000M */ 928 mt7530_write(priv, MT7530_P5RGMIIRXCR, CSR_RGMII_EDGE_ALIGN); 929 930 /* Don't set delay in DSA mode */ 931 if (!dsa_is_dsa_port(priv->ds, 5) && 932 (interface == PHY_INTERFACE_MODE_RGMII_TXID || 933 interface == PHY_INTERFACE_MODE_RGMII_ID)) 934 tx_delay = 4; /* n * 0.5 ns */ 935 936 /* P5 RGMII TX Clock Control: delay x */ 937 mt7530_write(priv, MT7530_P5RGMIITXCR, 938 CSR_RGMII_TXC_CFG(0x10 + tx_delay)); 939 940 /* reduce P5 RGMII Tx driving, 8mA */ 941 mt7530_write(priv, MT7530_IO_DRV_CR, 942 P5_IO_CLK_DRV(1) | P5_IO_DATA_DRV(1)); 943 } 944 945 mt7530_write(priv, MT7530_MHWTRAP, val); 946 947 dev_dbg(ds->dev, "Setup P5, HWTRAP=0x%x, intf_sel=%s, phy-mode=%s\n", 948 val, p5_intf_modes(priv->p5_intf_sel), phy_modes(interface)); 949 950 mutex_unlock(&priv->reg_mutex); 951 } 952 953 static void 954 mt753x_trap_frames(struct mt7530_priv *priv) 955 { 956 /* Trap BPDUs to the CPU port(s) */ 957 mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK, 958 MT753X_BPDU_CPU_ONLY); 959 960 /* Trap 802.1X PAE frames to the CPU port(s) */ 961 mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_PORT_FW_MASK, 962 MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY)); 963 964 /* Trap LLDP frames with :0E MAC DA to the CPU port(s) */ 965 mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_PORT_FW_MASK, 966 MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY)); 967 } 968 969 static void 970 mt753x_cpu_port_enable(struct dsa_switch *ds, int port) 971 { 972 struct mt7530_priv *priv = ds->priv; 973 974 /* Enable Mediatek header mode on the cpu port */ 975 mt7530_write(priv, MT7530_PVC_P(port), 976 PORT_SPEC_TAG); 977 978 /* Enable flooding on the CPU port */ 979 mt7530_set(priv, MT7530_MFC, BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) | 980 UNU_FFP(BIT(port))); 981 982 /* Add the CPU port to the CPU port bitmap for MT7531 and the switch on 983 * the MT7988 SoC. Trapped frames will be forwarded to the CPU port that 984 * is affine to the inbound user port. 985 */ 986 if (priv->id == ID_MT7531 || priv->id == ID_MT7988) 987 mt7530_set(priv, MT7531_CFC, MT7531_CPU_PMAP(BIT(port))); 988 989 /* CPU port gets connected to all user ports of 990 * the switch. 991 */ 992 mt7530_write(priv, MT7530_PCR_P(port), 993 PCR_MATRIX(dsa_user_ports(priv->ds))); 994 995 /* Set to fallback mode for independent VLAN learning */ 996 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, 997 MT7530_PORT_FALLBACK_MODE); 998 } 999 1000 static int 1001 mt7530_port_enable(struct dsa_switch *ds, int port, 1002 struct phy_device *phy) 1003 { 1004 struct dsa_port *dp = dsa_to_port(ds, port); 1005 struct mt7530_priv *priv = ds->priv; 1006 1007 mutex_lock(&priv->reg_mutex); 1008 1009 /* Allow the user port gets connected to the cpu port and also 1010 * restore the port matrix if the port is the member of a certain 1011 * bridge. 1012 */ 1013 if (dsa_port_is_user(dp)) { 1014 struct dsa_port *cpu_dp = dp->cpu_dp; 1015 1016 priv->ports[port].pm |= PCR_MATRIX(BIT(cpu_dp->index)); 1017 } 1018 priv->ports[port].enable = true; 1019 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK, 1020 priv->ports[port].pm); 1021 1022 mutex_unlock(&priv->reg_mutex); 1023 1024 return 0; 1025 } 1026 1027 static void 1028 mt7530_port_disable(struct dsa_switch *ds, int port) 1029 { 1030 struct mt7530_priv *priv = ds->priv; 1031 1032 mutex_lock(&priv->reg_mutex); 1033 1034 /* Clear up all port matrix which could be restored in the next 1035 * enablement for the port. 1036 */ 1037 priv->ports[port].enable = false; 1038 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK, 1039 PCR_MATRIX_CLR); 1040 1041 mutex_unlock(&priv->reg_mutex); 1042 } 1043 1044 static int 1045 mt7530_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) 1046 { 1047 struct mt7530_priv *priv = ds->priv; 1048 int length; 1049 u32 val; 1050 1051 /* When a new MTU is set, DSA always set the CPU port's MTU to the 1052 * largest MTU of the user ports. Because the switch only has a global 1053 * RX length register, only allowing CPU port here is enough. 1054 */ 1055 if (!dsa_is_cpu_port(ds, port)) 1056 return 0; 1057 1058 mt7530_mutex_lock(priv); 1059 1060 val = mt7530_mii_read(priv, MT7530_GMACCR); 1061 val &= ~MAX_RX_PKT_LEN_MASK; 1062 1063 /* RX length also includes Ethernet header, MTK tag, and FCS length */ 1064 length = new_mtu + ETH_HLEN + MTK_HDR_LEN + ETH_FCS_LEN; 1065 if (length <= 1522) { 1066 val |= MAX_RX_PKT_LEN_1522; 1067 } else if (length <= 1536) { 1068 val |= MAX_RX_PKT_LEN_1536; 1069 } else if (length <= 1552) { 1070 val |= MAX_RX_PKT_LEN_1552; 1071 } else { 1072 val &= ~MAX_RX_JUMBO_MASK; 1073 val |= MAX_RX_JUMBO(DIV_ROUND_UP(length, 1024)); 1074 val |= MAX_RX_PKT_LEN_JUMBO; 1075 } 1076 1077 mt7530_mii_write(priv, MT7530_GMACCR, val); 1078 1079 mt7530_mutex_unlock(priv); 1080 1081 return 0; 1082 } 1083 1084 static int 1085 mt7530_port_max_mtu(struct dsa_switch *ds, int port) 1086 { 1087 return MT7530_MAX_MTU; 1088 } 1089 1090 static void 1091 mt7530_stp_state_set(struct dsa_switch *ds, int port, u8 state) 1092 { 1093 struct mt7530_priv *priv = ds->priv; 1094 u32 stp_state; 1095 1096 switch (state) { 1097 case BR_STATE_DISABLED: 1098 stp_state = MT7530_STP_DISABLED; 1099 break; 1100 case BR_STATE_BLOCKING: 1101 stp_state = MT7530_STP_BLOCKING; 1102 break; 1103 case BR_STATE_LISTENING: 1104 stp_state = MT7530_STP_LISTENING; 1105 break; 1106 case BR_STATE_LEARNING: 1107 stp_state = MT7530_STP_LEARNING; 1108 break; 1109 case BR_STATE_FORWARDING: 1110 default: 1111 stp_state = MT7530_STP_FORWARDING; 1112 break; 1113 } 1114 1115 mt7530_rmw(priv, MT7530_SSP_P(port), FID_PST_MASK(FID_BRIDGED), 1116 FID_PST(FID_BRIDGED, stp_state)); 1117 } 1118 1119 static int 1120 mt7530_port_pre_bridge_flags(struct dsa_switch *ds, int port, 1121 struct switchdev_brport_flags flags, 1122 struct netlink_ext_ack *extack) 1123 { 1124 if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 1125 BR_BCAST_FLOOD)) 1126 return -EINVAL; 1127 1128 return 0; 1129 } 1130 1131 static int 1132 mt7530_port_bridge_flags(struct dsa_switch *ds, int port, 1133 struct switchdev_brport_flags flags, 1134 struct netlink_ext_ack *extack) 1135 { 1136 struct mt7530_priv *priv = ds->priv; 1137 1138 if (flags.mask & BR_LEARNING) 1139 mt7530_rmw(priv, MT7530_PSC_P(port), SA_DIS, 1140 flags.val & BR_LEARNING ? 0 : SA_DIS); 1141 1142 if (flags.mask & BR_FLOOD) 1143 mt7530_rmw(priv, MT7530_MFC, UNU_FFP(BIT(port)), 1144 flags.val & BR_FLOOD ? UNU_FFP(BIT(port)) : 0); 1145 1146 if (flags.mask & BR_MCAST_FLOOD) 1147 mt7530_rmw(priv, MT7530_MFC, UNM_FFP(BIT(port)), 1148 flags.val & BR_MCAST_FLOOD ? UNM_FFP(BIT(port)) : 0); 1149 1150 if (flags.mask & BR_BCAST_FLOOD) 1151 mt7530_rmw(priv, MT7530_MFC, BC_FFP(BIT(port)), 1152 flags.val & BR_BCAST_FLOOD ? BC_FFP(BIT(port)) : 0); 1153 1154 return 0; 1155 } 1156 1157 static int 1158 mt7530_port_bridge_join(struct dsa_switch *ds, int port, 1159 struct dsa_bridge bridge, bool *tx_fwd_offload, 1160 struct netlink_ext_ack *extack) 1161 { 1162 struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; 1163 struct dsa_port *cpu_dp = dp->cpu_dp; 1164 u32 port_bitmap = BIT(cpu_dp->index); 1165 struct mt7530_priv *priv = ds->priv; 1166 1167 mutex_lock(&priv->reg_mutex); 1168 1169 dsa_switch_for_each_user_port(other_dp, ds) { 1170 int other_port = other_dp->index; 1171 1172 if (dp == other_dp) 1173 continue; 1174 1175 /* Add this port to the port matrix of the other ports in the 1176 * same bridge. If the port is disabled, port matrix is kept 1177 * and not being setup until the port becomes enabled. 1178 */ 1179 if (!dsa_port_offloads_bridge(other_dp, &bridge)) 1180 continue; 1181 1182 if (priv->ports[other_port].enable) 1183 mt7530_set(priv, MT7530_PCR_P(other_port), 1184 PCR_MATRIX(BIT(port))); 1185 priv->ports[other_port].pm |= PCR_MATRIX(BIT(port)); 1186 1187 port_bitmap |= BIT(other_port); 1188 } 1189 1190 /* Add the all other ports to this port matrix. */ 1191 if (priv->ports[port].enable) 1192 mt7530_rmw(priv, MT7530_PCR_P(port), 1193 PCR_MATRIX_MASK, PCR_MATRIX(port_bitmap)); 1194 priv->ports[port].pm |= PCR_MATRIX(port_bitmap); 1195 1196 /* Set to fallback mode for independent VLAN learning */ 1197 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, 1198 MT7530_PORT_FALLBACK_MODE); 1199 1200 mutex_unlock(&priv->reg_mutex); 1201 1202 return 0; 1203 } 1204 1205 static void 1206 mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port) 1207 { 1208 struct mt7530_priv *priv = ds->priv; 1209 bool all_user_ports_removed = true; 1210 int i; 1211 1212 /* This is called after .port_bridge_leave when leaving a VLAN-aware 1213 * bridge. Don't set standalone ports to fallback mode. 1214 */ 1215 if (dsa_port_bridge_dev_get(dsa_to_port(ds, port))) 1216 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, 1217 MT7530_PORT_FALLBACK_MODE); 1218 1219 mt7530_rmw(priv, MT7530_PVC_P(port), 1220 VLAN_ATTR_MASK | PVC_EG_TAG_MASK | ACC_FRM_MASK, 1221 VLAN_ATTR(MT7530_VLAN_TRANSPARENT) | 1222 PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT) | 1223 MT7530_VLAN_ACC_ALL); 1224 1225 /* Set PVID to 0 */ 1226 mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, 1227 G0_PORT_VID_DEF); 1228 1229 for (i = 0; i < MT7530_NUM_PORTS; i++) { 1230 if (dsa_is_user_port(ds, i) && 1231 dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) { 1232 all_user_ports_removed = false; 1233 break; 1234 } 1235 } 1236 1237 /* CPU port also does the same thing until all user ports belonging to 1238 * the CPU port get out of VLAN filtering mode. 1239 */ 1240 if (all_user_ports_removed) { 1241 struct dsa_port *dp = dsa_to_port(ds, port); 1242 struct dsa_port *cpu_dp = dp->cpu_dp; 1243 1244 mt7530_write(priv, MT7530_PCR_P(cpu_dp->index), 1245 PCR_MATRIX(dsa_user_ports(priv->ds))); 1246 mt7530_write(priv, MT7530_PVC_P(cpu_dp->index), PORT_SPEC_TAG 1247 | PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); 1248 } 1249 } 1250 1251 static void 1252 mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port) 1253 { 1254 struct mt7530_priv *priv = ds->priv; 1255 1256 /* Trapped into security mode allows packet forwarding through VLAN 1257 * table lookup. 1258 */ 1259 if (dsa_is_user_port(ds, port)) { 1260 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, 1261 MT7530_PORT_SECURITY_MODE); 1262 mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, 1263 G0_PORT_VID(priv->ports[port].pvid)); 1264 1265 /* Only accept tagged frames if PVID is not set */ 1266 if (!priv->ports[port].pvid) 1267 mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK, 1268 MT7530_VLAN_ACC_TAGGED); 1269 1270 /* Set the port as a user port which is to be able to recognize 1271 * VID from incoming packets before fetching entry within the 1272 * VLAN table. 1273 */ 1274 mt7530_rmw(priv, MT7530_PVC_P(port), 1275 VLAN_ATTR_MASK | PVC_EG_TAG_MASK, 1276 VLAN_ATTR(MT7530_VLAN_USER) | 1277 PVC_EG_TAG(MT7530_VLAN_EG_DISABLED)); 1278 } else { 1279 /* Also set CPU ports to the "user" VLAN port attribute, to 1280 * allow VLAN classification, but keep the EG_TAG attribute as 1281 * "consistent" (i.o.w. don't change its value) for packets 1282 * received by the switch from the CPU, so that tagged packets 1283 * are forwarded to user ports as tagged, and untagged as 1284 * untagged. 1285 */ 1286 mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK, 1287 VLAN_ATTR(MT7530_VLAN_USER)); 1288 } 1289 } 1290 1291 static void 1292 mt7530_port_bridge_leave(struct dsa_switch *ds, int port, 1293 struct dsa_bridge bridge) 1294 { 1295 struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; 1296 struct dsa_port *cpu_dp = dp->cpu_dp; 1297 struct mt7530_priv *priv = ds->priv; 1298 1299 mutex_lock(&priv->reg_mutex); 1300 1301 dsa_switch_for_each_user_port(other_dp, ds) { 1302 int other_port = other_dp->index; 1303 1304 if (dp == other_dp) 1305 continue; 1306 1307 /* Remove this port from the port matrix of the other ports 1308 * in the same bridge. If the port is disabled, port matrix 1309 * is kept and not being setup until the port becomes enabled. 1310 */ 1311 if (!dsa_port_offloads_bridge(other_dp, &bridge)) 1312 continue; 1313 1314 if (priv->ports[other_port].enable) 1315 mt7530_clear(priv, MT7530_PCR_P(other_port), 1316 PCR_MATRIX(BIT(port))); 1317 priv->ports[other_port].pm &= ~PCR_MATRIX(BIT(port)); 1318 } 1319 1320 /* Set the cpu port to be the only one in the port matrix of 1321 * this port. 1322 */ 1323 if (priv->ports[port].enable) 1324 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK, 1325 PCR_MATRIX(BIT(cpu_dp->index))); 1326 priv->ports[port].pm = PCR_MATRIX(BIT(cpu_dp->index)); 1327 1328 /* When a port is removed from the bridge, the port would be set up 1329 * back to the default as is at initial boot which is a VLAN-unaware 1330 * port. 1331 */ 1332 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, 1333 MT7530_PORT_MATRIX_MODE); 1334 1335 mutex_unlock(&priv->reg_mutex); 1336 } 1337 1338 static int 1339 mt7530_port_fdb_add(struct dsa_switch *ds, int port, 1340 const unsigned char *addr, u16 vid, 1341 struct dsa_db db) 1342 { 1343 struct mt7530_priv *priv = ds->priv; 1344 int ret; 1345 u8 port_mask = BIT(port); 1346 1347 mutex_lock(&priv->reg_mutex); 1348 mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_ENT); 1349 ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL); 1350 mutex_unlock(&priv->reg_mutex); 1351 1352 return ret; 1353 } 1354 1355 static int 1356 mt7530_port_fdb_del(struct dsa_switch *ds, int port, 1357 const unsigned char *addr, u16 vid, 1358 struct dsa_db db) 1359 { 1360 struct mt7530_priv *priv = ds->priv; 1361 int ret; 1362 u8 port_mask = BIT(port); 1363 1364 mutex_lock(&priv->reg_mutex); 1365 mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_EMP); 1366 ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL); 1367 mutex_unlock(&priv->reg_mutex); 1368 1369 return ret; 1370 } 1371 1372 static int 1373 mt7530_port_fdb_dump(struct dsa_switch *ds, int port, 1374 dsa_fdb_dump_cb_t *cb, void *data) 1375 { 1376 struct mt7530_priv *priv = ds->priv; 1377 struct mt7530_fdb _fdb = { 0 }; 1378 int cnt = MT7530_NUM_FDB_RECORDS; 1379 int ret = 0; 1380 u32 rsp = 0; 1381 1382 mutex_lock(&priv->reg_mutex); 1383 1384 ret = mt7530_fdb_cmd(priv, MT7530_FDB_START, &rsp); 1385 if (ret < 0) 1386 goto err; 1387 1388 do { 1389 if (rsp & ATC_SRCH_HIT) { 1390 mt7530_fdb_read(priv, &_fdb); 1391 if (_fdb.port_mask & BIT(port)) { 1392 ret = cb(_fdb.mac, _fdb.vid, _fdb.noarp, 1393 data); 1394 if (ret < 0) 1395 break; 1396 } 1397 } 1398 } while (--cnt && 1399 !(rsp & ATC_SRCH_END) && 1400 !mt7530_fdb_cmd(priv, MT7530_FDB_NEXT, &rsp)); 1401 err: 1402 mutex_unlock(&priv->reg_mutex); 1403 1404 return 0; 1405 } 1406 1407 static int 1408 mt7530_port_mdb_add(struct dsa_switch *ds, int port, 1409 const struct switchdev_obj_port_mdb *mdb, 1410 struct dsa_db db) 1411 { 1412 struct mt7530_priv *priv = ds->priv; 1413 const u8 *addr = mdb->addr; 1414 u16 vid = mdb->vid; 1415 u8 port_mask = 0; 1416 int ret; 1417 1418 mutex_lock(&priv->reg_mutex); 1419 1420 mt7530_fdb_write(priv, vid, 0, addr, 0, STATIC_EMP); 1421 if (!mt7530_fdb_cmd(priv, MT7530_FDB_READ, NULL)) 1422 port_mask = (mt7530_read(priv, MT7530_ATRD) >> PORT_MAP) 1423 & PORT_MAP_MASK; 1424 1425 port_mask |= BIT(port); 1426 mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_ENT); 1427 ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL); 1428 1429 mutex_unlock(&priv->reg_mutex); 1430 1431 return ret; 1432 } 1433 1434 static int 1435 mt7530_port_mdb_del(struct dsa_switch *ds, int port, 1436 const struct switchdev_obj_port_mdb *mdb, 1437 struct dsa_db db) 1438 { 1439 struct mt7530_priv *priv = ds->priv; 1440 const u8 *addr = mdb->addr; 1441 u16 vid = mdb->vid; 1442 u8 port_mask = 0; 1443 int ret; 1444 1445 mutex_lock(&priv->reg_mutex); 1446 1447 mt7530_fdb_write(priv, vid, 0, addr, 0, STATIC_EMP); 1448 if (!mt7530_fdb_cmd(priv, MT7530_FDB_READ, NULL)) 1449 port_mask = (mt7530_read(priv, MT7530_ATRD) >> PORT_MAP) 1450 & PORT_MAP_MASK; 1451 1452 port_mask &= ~BIT(port); 1453 mt7530_fdb_write(priv, vid, port_mask, addr, -1, 1454 port_mask ? STATIC_ENT : STATIC_EMP); 1455 ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL); 1456 1457 mutex_unlock(&priv->reg_mutex); 1458 1459 return ret; 1460 } 1461 1462 static int 1463 mt7530_vlan_cmd(struct mt7530_priv *priv, enum mt7530_vlan_cmd cmd, u16 vid) 1464 { 1465 struct mt7530_dummy_poll p; 1466 u32 val; 1467 int ret; 1468 1469 val = VTCR_BUSY | VTCR_FUNC(cmd) | vid; 1470 mt7530_write(priv, MT7530_VTCR, val); 1471 1472 INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_VTCR); 1473 ret = readx_poll_timeout(_mt7530_read, &p, val, 1474 !(val & VTCR_BUSY), 20, 20000); 1475 if (ret < 0) { 1476 dev_err(priv->dev, "poll timeout\n"); 1477 return ret; 1478 } 1479 1480 val = mt7530_read(priv, MT7530_VTCR); 1481 if (val & VTCR_INVALID) { 1482 dev_err(priv->dev, "read VTCR invalid\n"); 1483 return -EINVAL; 1484 } 1485 1486 return 0; 1487 } 1488 1489 static int 1490 mt7530_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, 1491 struct netlink_ext_ack *extack) 1492 { 1493 struct dsa_port *dp = dsa_to_port(ds, port); 1494 struct dsa_port *cpu_dp = dp->cpu_dp; 1495 1496 if (vlan_filtering) { 1497 /* The port is being kept as VLAN-unaware port when bridge is 1498 * set up with vlan_filtering not being set, Otherwise, the 1499 * port and the corresponding CPU port is required the setup 1500 * for becoming a VLAN-aware port. 1501 */ 1502 mt7530_port_set_vlan_aware(ds, port); 1503 mt7530_port_set_vlan_aware(ds, cpu_dp->index); 1504 } else { 1505 mt7530_port_set_vlan_unaware(ds, port); 1506 } 1507 1508 return 0; 1509 } 1510 1511 static void 1512 mt7530_hw_vlan_add(struct mt7530_priv *priv, 1513 struct mt7530_hw_vlan_entry *entry) 1514 { 1515 struct dsa_port *dp = dsa_to_port(priv->ds, entry->port); 1516 u8 new_members; 1517 u32 val; 1518 1519 new_members = entry->old_members | BIT(entry->port); 1520 1521 /* Validate the entry with independent learning, create egress tag per 1522 * VLAN and joining the port as one of the port members. 1523 */ 1524 val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) | FID(FID_BRIDGED) | 1525 VLAN_VALID; 1526 mt7530_write(priv, MT7530_VAWD1, val); 1527 1528 /* Decide whether adding tag or not for those outgoing packets from the 1529 * port inside the VLAN. 1530 * CPU port is always taken as a tagged port for serving more than one 1531 * VLANs across and also being applied with egress type stack mode for 1532 * that VLAN tags would be appended after hardware special tag used as 1533 * DSA tag. 1534 */ 1535 if (dsa_port_is_cpu(dp)) 1536 val = MT7530_VLAN_EGRESS_STACK; 1537 else if (entry->untagged) 1538 val = MT7530_VLAN_EGRESS_UNTAG; 1539 else 1540 val = MT7530_VLAN_EGRESS_TAG; 1541 mt7530_rmw(priv, MT7530_VAWD2, 1542 ETAG_CTRL_P_MASK(entry->port), 1543 ETAG_CTRL_P(entry->port, val)); 1544 } 1545 1546 static void 1547 mt7530_hw_vlan_del(struct mt7530_priv *priv, 1548 struct mt7530_hw_vlan_entry *entry) 1549 { 1550 u8 new_members; 1551 u32 val; 1552 1553 new_members = entry->old_members & ~BIT(entry->port); 1554 1555 val = mt7530_read(priv, MT7530_VAWD1); 1556 if (!(val & VLAN_VALID)) { 1557 dev_err(priv->dev, 1558 "Cannot be deleted due to invalid entry\n"); 1559 return; 1560 } 1561 1562 if (new_members) { 1563 val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) | 1564 VLAN_VALID; 1565 mt7530_write(priv, MT7530_VAWD1, val); 1566 } else { 1567 mt7530_write(priv, MT7530_VAWD1, 0); 1568 mt7530_write(priv, MT7530_VAWD2, 0); 1569 } 1570 } 1571 1572 static void 1573 mt7530_hw_vlan_update(struct mt7530_priv *priv, u16 vid, 1574 struct mt7530_hw_vlan_entry *entry, 1575 mt7530_vlan_op vlan_op) 1576 { 1577 u32 val; 1578 1579 /* Fetch entry */ 1580 mt7530_vlan_cmd(priv, MT7530_VTCR_RD_VID, vid); 1581 1582 val = mt7530_read(priv, MT7530_VAWD1); 1583 1584 entry->old_members = (val >> PORT_MEM_SHFT) & PORT_MEM_MASK; 1585 1586 /* Manipulate entry */ 1587 vlan_op(priv, entry); 1588 1589 /* Flush result to hardware */ 1590 mt7530_vlan_cmd(priv, MT7530_VTCR_WR_VID, vid); 1591 } 1592 1593 static int 1594 mt7530_setup_vlan0(struct mt7530_priv *priv) 1595 { 1596 u32 val; 1597 1598 /* Validate the entry with independent learning, keep the original 1599 * ingress tag attribute. 1600 */ 1601 val = IVL_MAC | EG_CON | PORT_MEM(MT7530_ALL_MEMBERS) | FID(FID_BRIDGED) | 1602 VLAN_VALID; 1603 mt7530_write(priv, MT7530_VAWD1, val); 1604 1605 return mt7530_vlan_cmd(priv, MT7530_VTCR_WR_VID, 0); 1606 } 1607 1608 static int 1609 mt7530_port_vlan_add(struct dsa_switch *ds, int port, 1610 const struct switchdev_obj_port_vlan *vlan, 1611 struct netlink_ext_ack *extack) 1612 { 1613 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1614 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 1615 struct mt7530_hw_vlan_entry new_entry; 1616 struct mt7530_priv *priv = ds->priv; 1617 1618 mutex_lock(&priv->reg_mutex); 1619 1620 mt7530_hw_vlan_entry_init(&new_entry, port, untagged); 1621 mt7530_hw_vlan_update(priv, vlan->vid, &new_entry, mt7530_hw_vlan_add); 1622 1623 if (pvid) { 1624 priv->ports[port].pvid = vlan->vid; 1625 1626 /* Accept all frames if PVID is set */ 1627 mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK, 1628 MT7530_VLAN_ACC_ALL); 1629 1630 /* Only configure PVID if VLAN filtering is enabled */ 1631 if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) 1632 mt7530_rmw(priv, MT7530_PPBV1_P(port), 1633 G0_PORT_VID_MASK, 1634 G0_PORT_VID(vlan->vid)); 1635 } else if (vlan->vid && priv->ports[port].pvid == vlan->vid) { 1636 /* This VLAN is overwritten without PVID, so unset it */ 1637 priv->ports[port].pvid = G0_PORT_VID_DEF; 1638 1639 /* Only accept tagged frames if the port is VLAN-aware */ 1640 if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) 1641 mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK, 1642 MT7530_VLAN_ACC_TAGGED); 1643 1644 mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, 1645 G0_PORT_VID_DEF); 1646 } 1647 1648 mutex_unlock(&priv->reg_mutex); 1649 1650 return 0; 1651 } 1652 1653 static int 1654 mt7530_port_vlan_del(struct dsa_switch *ds, int port, 1655 const struct switchdev_obj_port_vlan *vlan) 1656 { 1657 struct mt7530_hw_vlan_entry target_entry; 1658 struct mt7530_priv *priv = ds->priv; 1659 1660 mutex_lock(&priv->reg_mutex); 1661 1662 mt7530_hw_vlan_entry_init(&target_entry, port, 0); 1663 mt7530_hw_vlan_update(priv, vlan->vid, &target_entry, 1664 mt7530_hw_vlan_del); 1665 1666 /* PVID is being restored to the default whenever the PVID port 1667 * is being removed from the VLAN. 1668 */ 1669 if (priv->ports[port].pvid == vlan->vid) { 1670 priv->ports[port].pvid = G0_PORT_VID_DEF; 1671 1672 /* Only accept tagged frames if the port is VLAN-aware */ 1673 if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) 1674 mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK, 1675 MT7530_VLAN_ACC_TAGGED); 1676 1677 mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, 1678 G0_PORT_VID_DEF); 1679 } 1680 1681 1682 mutex_unlock(&priv->reg_mutex); 1683 1684 return 0; 1685 } 1686 1687 static int mt753x_mirror_port_get(unsigned int id, u32 val) 1688 { 1689 return (id == ID_MT7531) ? MT7531_MIRROR_PORT_GET(val) : 1690 MIRROR_PORT(val); 1691 } 1692 1693 static int mt753x_mirror_port_set(unsigned int id, u32 val) 1694 { 1695 return (id == ID_MT7531) ? MT7531_MIRROR_PORT_SET(val) : 1696 MIRROR_PORT(val); 1697 } 1698 1699 static int mt753x_port_mirror_add(struct dsa_switch *ds, int port, 1700 struct dsa_mall_mirror_tc_entry *mirror, 1701 bool ingress, struct netlink_ext_ack *extack) 1702 { 1703 struct mt7530_priv *priv = ds->priv; 1704 int monitor_port; 1705 u32 val; 1706 1707 /* Check for existent entry */ 1708 if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port)) 1709 return -EEXIST; 1710 1711 val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id)); 1712 1713 /* MT7530 only supports one monitor port */ 1714 monitor_port = mt753x_mirror_port_get(priv->id, val); 1715 if (val & MT753X_MIRROR_EN(priv->id) && 1716 monitor_port != mirror->to_local_port) 1717 return -EEXIST; 1718 1719 val |= MT753X_MIRROR_EN(priv->id); 1720 val &= ~MT753X_MIRROR_MASK(priv->id); 1721 val |= mt753x_mirror_port_set(priv->id, mirror->to_local_port); 1722 mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val); 1723 1724 val = mt7530_read(priv, MT7530_PCR_P(port)); 1725 if (ingress) { 1726 val |= PORT_RX_MIR; 1727 priv->mirror_rx |= BIT(port); 1728 } else { 1729 val |= PORT_TX_MIR; 1730 priv->mirror_tx |= BIT(port); 1731 } 1732 mt7530_write(priv, MT7530_PCR_P(port), val); 1733 1734 return 0; 1735 } 1736 1737 static void mt753x_port_mirror_del(struct dsa_switch *ds, int port, 1738 struct dsa_mall_mirror_tc_entry *mirror) 1739 { 1740 struct mt7530_priv *priv = ds->priv; 1741 u32 val; 1742 1743 val = mt7530_read(priv, MT7530_PCR_P(port)); 1744 if (mirror->ingress) { 1745 val &= ~PORT_RX_MIR; 1746 priv->mirror_rx &= ~BIT(port); 1747 } else { 1748 val &= ~PORT_TX_MIR; 1749 priv->mirror_tx &= ~BIT(port); 1750 } 1751 mt7530_write(priv, MT7530_PCR_P(port), val); 1752 1753 if (!priv->mirror_rx && !priv->mirror_tx) { 1754 val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id)); 1755 val &= ~MT753X_MIRROR_EN(priv->id); 1756 mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val); 1757 } 1758 } 1759 1760 static enum dsa_tag_protocol 1761 mtk_get_tag_protocol(struct dsa_switch *ds, int port, 1762 enum dsa_tag_protocol mp) 1763 { 1764 return DSA_TAG_PROTO_MTK; 1765 } 1766 1767 #ifdef CONFIG_GPIOLIB 1768 static inline u32 1769 mt7530_gpio_to_bit(unsigned int offset) 1770 { 1771 /* Map GPIO offset to register bit 1772 * [ 2: 0] port 0 LED 0..2 as GPIO 0..2 1773 * [ 6: 4] port 1 LED 0..2 as GPIO 3..5 1774 * [10: 8] port 2 LED 0..2 as GPIO 6..8 1775 * [14:12] port 3 LED 0..2 as GPIO 9..11 1776 * [18:16] port 4 LED 0..2 as GPIO 12..14 1777 */ 1778 return BIT(offset + offset / 3); 1779 } 1780 1781 static int 1782 mt7530_gpio_get(struct gpio_chip *gc, unsigned int offset) 1783 { 1784 struct mt7530_priv *priv = gpiochip_get_data(gc); 1785 u32 bit = mt7530_gpio_to_bit(offset); 1786 1787 return !!(mt7530_read(priv, MT7530_LED_GPIO_DATA) & bit); 1788 } 1789 1790 static void 1791 mt7530_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) 1792 { 1793 struct mt7530_priv *priv = gpiochip_get_data(gc); 1794 u32 bit = mt7530_gpio_to_bit(offset); 1795 1796 if (value) 1797 mt7530_set(priv, MT7530_LED_GPIO_DATA, bit); 1798 else 1799 mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit); 1800 } 1801 1802 static int 1803 mt7530_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) 1804 { 1805 struct mt7530_priv *priv = gpiochip_get_data(gc); 1806 u32 bit = mt7530_gpio_to_bit(offset); 1807 1808 return (mt7530_read(priv, MT7530_LED_GPIO_DIR) & bit) ? 1809 GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN; 1810 } 1811 1812 static int 1813 mt7530_gpio_direction_input(struct gpio_chip *gc, unsigned int offset) 1814 { 1815 struct mt7530_priv *priv = gpiochip_get_data(gc); 1816 u32 bit = mt7530_gpio_to_bit(offset); 1817 1818 mt7530_clear(priv, MT7530_LED_GPIO_OE, bit); 1819 mt7530_clear(priv, MT7530_LED_GPIO_DIR, bit); 1820 1821 return 0; 1822 } 1823 1824 static int 1825 mt7530_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value) 1826 { 1827 struct mt7530_priv *priv = gpiochip_get_data(gc); 1828 u32 bit = mt7530_gpio_to_bit(offset); 1829 1830 mt7530_set(priv, MT7530_LED_GPIO_DIR, bit); 1831 1832 if (value) 1833 mt7530_set(priv, MT7530_LED_GPIO_DATA, bit); 1834 else 1835 mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit); 1836 1837 mt7530_set(priv, MT7530_LED_GPIO_OE, bit); 1838 1839 return 0; 1840 } 1841 1842 static int 1843 mt7530_setup_gpio(struct mt7530_priv *priv) 1844 { 1845 struct device *dev = priv->dev; 1846 struct gpio_chip *gc; 1847 1848 gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL); 1849 if (!gc) 1850 return -ENOMEM; 1851 1852 mt7530_write(priv, MT7530_LED_GPIO_OE, 0); 1853 mt7530_write(priv, MT7530_LED_GPIO_DIR, 0); 1854 mt7530_write(priv, MT7530_LED_IO_MODE, 0); 1855 1856 gc->label = "mt7530"; 1857 gc->parent = dev; 1858 gc->owner = THIS_MODULE; 1859 gc->get_direction = mt7530_gpio_get_direction; 1860 gc->direction_input = mt7530_gpio_direction_input; 1861 gc->direction_output = mt7530_gpio_direction_output; 1862 gc->get = mt7530_gpio_get; 1863 gc->set = mt7530_gpio_set; 1864 gc->base = -1; 1865 gc->ngpio = 15; 1866 gc->can_sleep = true; 1867 1868 return devm_gpiochip_add_data(dev, gc, priv); 1869 } 1870 #endif /* CONFIG_GPIOLIB */ 1871 1872 static irqreturn_t 1873 mt7530_irq_thread_fn(int irq, void *dev_id) 1874 { 1875 struct mt7530_priv *priv = dev_id; 1876 bool handled = false; 1877 u32 val; 1878 int p; 1879 1880 mt7530_mutex_lock(priv); 1881 val = mt7530_mii_read(priv, MT7530_SYS_INT_STS); 1882 mt7530_mii_write(priv, MT7530_SYS_INT_STS, val); 1883 mt7530_mutex_unlock(priv); 1884 1885 for (p = 0; p < MT7530_NUM_PHYS; p++) { 1886 if (BIT(p) & val) { 1887 unsigned int irq; 1888 1889 irq = irq_find_mapping(priv->irq_domain, p); 1890 handle_nested_irq(irq); 1891 handled = true; 1892 } 1893 } 1894 1895 return IRQ_RETVAL(handled); 1896 } 1897 1898 static void 1899 mt7530_irq_mask(struct irq_data *d) 1900 { 1901 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); 1902 1903 priv->irq_enable &= ~BIT(d->hwirq); 1904 } 1905 1906 static void 1907 mt7530_irq_unmask(struct irq_data *d) 1908 { 1909 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); 1910 1911 priv->irq_enable |= BIT(d->hwirq); 1912 } 1913 1914 static void 1915 mt7530_irq_bus_lock(struct irq_data *d) 1916 { 1917 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); 1918 1919 mt7530_mutex_lock(priv); 1920 } 1921 1922 static void 1923 mt7530_irq_bus_sync_unlock(struct irq_data *d) 1924 { 1925 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); 1926 1927 mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable); 1928 mt7530_mutex_unlock(priv); 1929 } 1930 1931 static struct irq_chip mt7530_irq_chip = { 1932 .name = KBUILD_MODNAME, 1933 .irq_mask = mt7530_irq_mask, 1934 .irq_unmask = mt7530_irq_unmask, 1935 .irq_bus_lock = mt7530_irq_bus_lock, 1936 .irq_bus_sync_unlock = mt7530_irq_bus_sync_unlock, 1937 }; 1938 1939 static int 1940 mt7530_irq_map(struct irq_domain *domain, unsigned int irq, 1941 irq_hw_number_t hwirq) 1942 { 1943 irq_set_chip_data(irq, domain->host_data); 1944 irq_set_chip_and_handler(irq, &mt7530_irq_chip, handle_simple_irq); 1945 irq_set_nested_thread(irq, true); 1946 irq_set_noprobe(irq); 1947 1948 return 0; 1949 } 1950 1951 static const struct irq_domain_ops mt7530_irq_domain_ops = { 1952 .map = mt7530_irq_map, 1953 .xlate = irq_domain_xlate_onecell, 1954 }; 1955 1956 static void 1957 mt7988_irq_mask(struct irq_data *d) 1958 { 1959 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); 1960 1961 priv->irq_enable &= ~BIT(d->hwirq); 1962 mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable); 1963 } 1964 1965 static void 1966 mt7988_irq_unmask(struct irq_data *d) 1967 { 1968 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); 1969 1970 priv->irq_enable |= BIT(d->hwirq); 1971 mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable); 1972 } 1973 1974 static struct irq_chip mt7988_irq_chip = { 1975 .name = KBUILD_MODNAME, 1976 .irq_mask = mt7988_irq_mask, 1977 .irq_unmask = mt7988_irq_unmask, 1978 }; 1979 1980 static int 1981 mt7988_irq_map(struct irq_domain *domain, unsigned int irq, 1982 irq_hw_number_t hwirq) 1983 { 1984 irq_set_chip_data(irq, domain->host_data); 1985 irq_set_chip_and_handler(irq, &mt7988_irq_chip, handle_simple_irq); 1986 irq_set_nested_thread(irq, true); 1987 irq_set_noprobe(irq); 1988 1989 return 0; 1990 } 1991 1992 static const struct irq_domain_ops mt7988_irq_domain_ops = { 1993 .map = mt7988_irq_map, 1994 .xlate = irq_domain_xlate_onecell, 1995 }; 1996 1997 static void 1998 mt7530_setup_mdio_irq(struct mt7530_priv *priv) 1999 { 2000 struct dsa_switch *ds = priv->ds; 2001 int p; 2002 2003 for (p = 0; p < MT7530_NUM_PHYS; p++) { 2004 if (BIT(p) & ds->phys_mii_mask) { 2005 unsigned int irq; 2006 2007 irq = irq_create_mapping(priv->irq_domain, p); 2008 ds->user_mii_bus->irq[p] = irq; 2009 } 2010 } 2011 } 2012 2013 static int 2014 mt7530_setup_irq(struct mt7530_priv *priv) 2015 { 2016 struct device *dev = priv->dev; 2017 struct device_node *np = dev->of_node; 2018 int ret; 2019 2020 if (!of_property_read_bool(np, "interrupt-controller")) { 2021 dev_info(dev, "no interrupt support\n"); 2022 return 0; 2023 } 2024 2025 priv->irq = of_irq_get(np, 0); 2026 if (priv->irq <= 0) { 2027 dev_err(dev, "failed to get parent IRQ: %d\n", priv->irq); 2028 return priv->irq ? : -EINVAL; 2029 } 2030 2031 if (priv->id == ID_MT7988) 2032 priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS, 2033 &mt7988_irq_domain_ops, 2034 priv); 2035 else 2036 priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS, 2037 &mt7530_irq_domain_ops, 2038 priv); 2039 2040 if (!priv->irq_domain) { 2041 dev_err(dev, "failed to create IRQ domain\n"); 2042 return -ENOMEM; 2043 } 2044 2045 /* This register must be set for MT7530 to properly fire interrupts */ 2046 if (priv->id == ID_MT7530 || priv->id == ID_MT7621) 2047 mt7530_set(priv, MT7530_TOP_SIG_CTRL, TOP_SIG_CTRL_NORMAL); 2048 2049 ret = request_threaded_irq(priv->irq, NULL, mt7530_irq_thread_fn, 2050 IRQF_ONESHOT, KBUILD_MODNAME, priv); 2051 if (ret) { 2052 irq_domain_remove(priv->irq_domain); 2053 dev_err(dev, "failed to request IRQ: %d\n", ret); 2054 return ret; 2055 } 2056 2057 return 0; 2058 } 2059 2060 static void 2061 mt7530_free_mdio_irq(struct mt7530_priv *priv) 2062 { 2063 int p; 2064 2065 for (p = 0; p < MT7530_NUM_PHYS; p++) { 2066 if (BIT(p) & priv->ds->phys_mii_mask) { 2067 unsigned int irq; 2068 2069 irq = irq_find_mapping(priv->irq_domain, p); 2070 irq_dispose_mapping(irq); 2071 } 2072 } 2073 } 2074 2075 static void 2076 mt7530_free_irq_common(struct mt7530_priv *priv) 2077 { 2078 free_irq(priv->irq, priv); 2079 irq_domain_remove(priv->irq_domain); 2080 } 2081 2082 static void 2083 mt7530_free_irq(struct mt7530_priv *priv) 2084 { 2085 struct device_node *mnp, *np = priv->dev->of_node; 2086 2087 mnp = of_get_child_by_name(np, "mdio"); 2088 if (!mnp) 2089 mt7530_free_mdio_irq(priv); 2090 of_node_put(mnp); 2091 2092 mt7530_free_irq_common(priv); 2093 } 2094 2095 static int 2096 mt7530_setup_mdio(struct mt7530_priv *priv) 2097 { 2098 struct device_node *mnp, *np = priv->dev->of_node; 2099 struct dsa_switch *ds = priv->ds; 2100 struct device *dev = priv->dev; 2101 struct mii_bus *bus; 2102 static int idx; 2103 int ret = 0; 2104 2105 mnp = of_get_child_by_name(np, "mdio"); 2106 2107 if (mnp && !of_device_is_available(mnp)) 2108 goto out; 2109 2110 bus = devm_mdiobus_alloc(dev); 2111 if (!bus) { 2112 ret = -ENOMEM; 2113 goto out; 2114 } 2115 2116 if (!mnp) 2117 ds->user_mii_bus = bus; 2118 2119 bus->priv = priv; 2120 bus->name = KBUILD_MODNAME "-mii"; 2121 snprintf(bus->id, MII_BUS_ID_SIZE, KBUILD_MODNAME "-%d", idx++); 2122 bus->read = mt753x_phy_read_c22; 2123 bus->write = mt753x_phy_write_c22; 2124 bus->read_c45 = mt753x_phy_read_c45; 2125 bus->write_c45 = mt753x_phy_write_c45; 2126 bus->parent = dev; 2127 bus->phy_mask = ~ds->phys_mii_mask; 2128 2129 if (priv->irq && !mnp) 2130 mt7530_setup_mdio_irq(priv); 2131 2132 ret = devm_of_mdiobus_register(dev, bus, mnp); 2133 if (ret) { 2134 dev_err(dev, "failed to register MDIO bus: %d\n", ret); 2135 if (priv->irq && !mnp) 2136 mt7530_free_mdio_irq(priv); 2137 } 2138 2139 out: 2140 of_node_put(mnp); 2141 return ret; 2142 } 2143 2144 static int 2145 mt7530_setup(struct dsa_switch *ds) 2146 { 2147 struct mt7530_priv *priv = ds->priv; 2148 struct device_node *dn = NULL; 2149 struct device_node *phy_node; 2150 struct device_node *mac_np; 2151 struct mt7530_dummy_poll p; 2152 phy_interface_t interface; 2153 struct dsa_port *cpu_dp; 2154 u32 id, val; 2155 int ret, i; 2156 2157 /* The parent node of conduit netdev which holds the common system 2158 * controller also is the container for two GMACs nodes representing 2159 * as two netdev instances. 2160 */ 2161 dsa_switch_for_each_cpu_port(cpu_dp, ds) { 2162 dn = cpu_dp->conduit->dev.of_node->parent; 2163 /* It doesn't matter which CPU port is found first, 2164 * their conduits should share the same parent OF node 2165 */ 2166 break; 2167 } 2168 2169 if (!dn) { 2170 dev_err(ds->dev, "parent OF node of DSA conduit not found"); 2171 return -EINVAL; 2172 } 2173 2174 ds->assisted_learning_on_cpu_port = true; 2175 ds->mtu_enforcement_ingress = true; 2176 2177 if (priv->id == ID_MT7530) { 2178 regulator_set_voltage(priv->core_pwr, 1000000, 1000000); 2179 ret = regulator_enable(priv->core_pwr); 2180 if (ret < 0) { 2181 dev_err(priv->dev, 2182 "Failed to enable core power: %d\n", ret); 2183 return ret; 2184 } 2185 2186 regulator_set_voltage(priv->io_pwr, 3300000, 3300000); 2187 ret = regulator_enable(priv->io_pwr); 2188 if (ret < 0) { 2189 dev_err(priv->dev, "Failed to enable io pwr: %d\n", 2190 ret); 2191 return ret; 2192 } 2193 } 2194 2195 /* Disable LEDs before reset to prevent the MT7530 sampling a 2196 * potentially incorrect HT_XTAL_FSEL value. 2197 */ 2198 mt7530_write(priv, MT7530_LED_EN, 0); 2199 usleep_range(1000, 1100); 2200 2201 /* Reset whole chip through gpio pin or memory-mapped registers for 2202 * different type of hardware 2203 */ 2204 if (priv->mcm) { 2205 reset_control_assert(priv->rstc); 2206 usleep_range(1000, 1100); 2207 reset_control_deassert(priv->rstc); 2208 } else { 2209 gpiod_set_value_cansleep(priv->reset, 0); 2210 usleep_range(1000, 1100); 2211 gpiod_set_value_cansleep(priv->reset, 1); 2212 } 2213 2214 /* Waiting for MT7530 got to stable */ 2215 INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP); 2216 ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0, 2217 20, 1000000); 2218 if (ret < 0) { 2219 dev_err(priv->dev, "reset timeout\n"); 2220 return ret; 2221 } 2222 2223 id = mt7530_read(priv, MT7530_CREV); 2224 id >>= CHIP_NAME_SHIFT; 2225 if (id != MT7530_ID) { 2226 dev_err(priv->dev, "chip %x can't be supported\n", id); 2227 return -ENODEV; 2228 } 2229 2230 if ((val & HWTRAP_XTAL_MASK) == HWTRAP_XTAL_20MHZ) { 2231 dev_err(priv->dev, 2232 "MT7530 with a 20MHz XTAL is not supported!\n"); 2233 return -EINVAL; 2234 } 2235 2236 /* Reset the switch through internal reset */ 2237 mt7530_write(priv, MT7530_SYS_CTRL, 2238 SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST | 2239 SYS_CTRL_REG_RST); 2240 2241 mt7530_pll_setup(priv); 2242 2243 /* Lower Tx driving for TRGMII path */ 2244 for (i = 0; i < NUM_TRGMII_CTRL; i++) 2245 mt7530_write(priv, MT7530_TRGMII_TD_ODT(i), 2246 TD_DM_DRVP(8) | TD_DM_DRVN(8)); 2247 2248 for (i = 0; i < NUM_TRGMII_CTRL; i++) 2249 mt7530_rmw(priv, MT7530_TRGMII_RD(i), 2250 RD_TAP_MASK, RD_TAP(16)); 2251 2252 /* Enable port 6 */ 2253 val = mt7530_read(priv, MT7530_MHWTRAP); 2254 val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS; 2255 val |= MHWTRAP_MANUAL; 2256 mt7530_write(priv, MT7530_MHWTRAP, val); 2257 2258 mt753x_trap_frames(priv); 2259 2260 /* Enable and reset MIB counters */ 2261 mt7530_mib_reset(ds); 2262 2263 for (i = 0; i < MT7530_NUM_PORTS; i++) { 2264 /* Clear link settings and enable force mode to force link down 2265 * on all ports until they're enabled later. 2266 */ 2267 mt7530_rmw(priv, MT7530_PMCR_P(i), PMCR_LINK_SETTINGS_MASK | 2268 PMCR_FORCE_MODE, PMCR_FORCE_MODE); 2269 2270 /* Disable forwarding by default on all ports */ 2271 mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK, 2272 PCR_MATRIX_CLR); 2273 2274 /* Disable learning by default on all ports */ 2275 mt7530_set(priv, MT7530_PSC_P(i), SA_DIS); 2276 2277 if (dsa_is_cpu_port(ds, i)) { 2278 mt753x_cpu_port_enable(ds, i); 2279 } else { 2280 mt7530_port_disable(ds, i); 2281 2282 /* Set default PVID to 0 on all user ports */ 2283 mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK, 2284 G0_PORT_VID_DEF); 2285 } 2286 /* Enable consistent egress tag */ 2287 mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK, 2288 PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); 2289 } 2290 2291 /* Setup VLAN ID 0 for VLAN-unaware bridges */ 2292 ret = mt7530_setup_vlan0(priv); 2293 if (ret) 2294 return ret; 2295 2296 /* Setup port 5 */ 2297 if (!dsa_is_unused_port(ds, 5)) { 2298 priv->p5_intf_sel = P5_INTF_SEL_GMAC5; 2299 } else { 2300 /* Scan the ethernet nodes. Look for GMAC1, lookup the used PHY. 2301 * Set priv->p5_intf_sel to the appropriate value if PHY muxing 2302 * is detected. 2303 */ 2304 for_each_child_of_node(dn, mac_np) { 2305 if (!of_device_is_compatible(mac_np, 2306 "mediatek,eth-mac")) 2307 continue; 2308 2309 ret = of_property_read_u32(mac_np, "reg", &id); 2310 if (ret < 0 || id != 1) 2311 continue; 2312 2313 phy_node = of_parse_phandle(mac_np, "phy-handle", 0); 2314 if (!phy_node) 2315 continue; 2316 2317 if (phy_node->parent == priv->dev->of_node->parent) { 2318 ret = of_get_phy_mode(mac_np, &interface); 2319 if (ret && ret != -ENODEV) { 2320 of_node_put(mac_np); 2321 of_node_put(phy_node); 2322 return ret; 2323 } 2324 id = of_mdio_parse_addr(ds->dev, phy_node); 2325 if (id == 0) 2326 priv->p5_intf_sel = P5_INTF_SEL_PHY_P0; 2327 if (id == 4) 2328 priv->p5_intf_sel = P5_INTF_SEL_PHY_P4; 2329 } 2330 of_node_put(mac_np); 2331 of_node_put(phy_node); 2332 break; 2333 } 2334 2335 if (priv->p5_intf_sel == P5_INTF_SEL_PHY_P0 || 2336 priv->p5_intf_sel == P5_INTF_SEL_PHY_P4) 2337 mt7530_setup_port5(ds, interface); 2338 } 2339 2340 #ifdef CONFIG_GPIOLIB 2341 if (of_property_read_bool(priv->dev->of_node, "gpio-controller")) { 2342 ret = mt7530_setup_gpio(priv); 2343 if (ret) 2344 return ret; 2345 } 2346 #endif /* CONFIG_GPIOLIB */ 2347 2348 /* Flush the FDB table */ 2349 ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL); 2350 if (ret < 0) 2351 return ret; 2352 2353 return 0; 2354 } 2355 2356 static int 2357 mt7531_setup_common(struct dsa_switch *ds) 2358 { 2359 struct mt7530_priv *priv = ds->priv; 2360 int ret, i; 2361 2362 mt753x_trap_frames(priv); 2363 2364 /* Enable and reset MIB counters */ 2365 mt7530_mib_reset(ds); 2366 2367 /* Disable flooding on all ports */ 2368 mt7530_clear(priv, MT7530_MFC, BC_FFP_MASK | UNM_FFP_MASK | 2369 UNU_FFP_MASK); 2370 2371 for (i = 0; i < MT7530_NUM_PORTS; i++) { 2372 /* Clear link settings and enable force mode to force link down 2373 * on all ports until they're enabled later. 2374 */ 2375 mt7530_rmw(priv, MT7530_PMCR_P(i), PMCR_LINK_SETTINGS_MASK | 2376 MT7531_FORCE_MODE, MT7531_FORCE_MODE); 2377 2378 /* Disable forwarding by default on all ports */ 2379 mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK, 2380 PCR_MATRIX_CLR); 2381 2382 /* Disable learning by default on all ports */ 2383 mt7530_set(priv, MT7530_PSC_P(i), SA_DIS); 2384 2385 mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR); 2386 2387 if (dsa_is_cpu_port(ds, i)) { 2388 mt753x_cpu_port_enable(ds, i); 2389 } else { 2390 mt7530_port_disable(ds, i); 2391 2392 /* Set default PVID to 0 on all user ports */ 2393 mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK, 2394 G0_PORT_VID_DEF); 2395 } 2396 2397 /* Enable consistent egress tag */ 2398 mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK, 2399 PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); 2400 } 2401 2402 /* Flush the FDB table */ 2403 ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL); 2404 if (ret < 0) 2405 return ret; 2406 2407 return 0; 2408 } 2409 2410 static int 2411 mt7531_setup(struct dsa_switch *ds) 2412 { 2413 struct mt7530_priv *priv = ds->priv; 2414 struct mt7530_dummy_poll p; 2415 u32 val, id; 2416 int ret, i; 2417 2418 /* Reset whole chip through gpio pin or memory-mapped registers for 2419 * different type of hardware 2420 */ 2421 if (priv->mcm) { 2422 reset_control_assert(priv->rstc); 2423 usleep_range(1000, 1100); 2424 reset_control_deassert(priv->rstc); 2425 } else { 2426 gpiod_set_value_cansleep(priv->reset, 0); 2427 usleep_range(1000, 1100); 2428 gpiod_set_value_cansleep(priv->reset, 1); 2429 } 2430 2431 /* Waiting for MT7530 got to stable */ 2432 INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP); 2433 ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0, 2434 20, 1000000); 2435 if (ret < 0) { 2436 dev_err(priv->dev, "reset timeout\n"); 2437 return ret; 2438 } 2439 2440 id = mt7530_read(priv, MT7531_CREV); 2441 id >>= CHIP_NAME_SHIFT; 2442 2443 if (id != MT7531_ID) { 2444 dev_err(priv->dev, "chip %x can't be supported\n", id); 2445 return -ENODEV; 2446 } 2447 2448 /* MT7531AE has got two SGMII units. One for port 5, one for port 6. 2449 * MT7531BE has got only one SGMII unit which is for port 6. 2450 */ 2451 val = mt7530_read(priv, MT7531_TOP_SIG_SR); 2452 priv->p5_sgmii = !!(val & PAD_DUAL_SGMII_EN); 2453 2454 /* Force link down on all ports before internal reset */ 2455 for (i = 0; i < MT7530_NUM_PORTS; i++) 2456 mt7530_write(priv, MT7530_PMCR_P(i), MT7531_FORCE_LNK); 2457 2458 /* Reset the switch through internal reset */ 2459 mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_SW_RST | SYS_CTRL_REG_RST); 2460 2461 if (!priv->p5_sgmii) { 2462 mt7531_pll_setup(priv); 2463 } else { 2464 /* Let ds->user_mii_bus be able to access external phy. */ 2465 mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO11_RG_RXD2_MASK, 2466 MT7531_EXT_P_MDC_11); 2467 mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO12_RG_RXD3_MASK, 2468 MT7531_EXT_P_MDIO_12); 2469 } 2470 2471 if (!dsa_is_unused_port(ds, 5)) 2472 priv->p5_intf_sel = P5_INTF_SEL_GMAC5; 2473 2474 mt7530_rmw(priv, MT7531_GPIO_MODE0, MT7531_GPIO0_MASK, 2475 MT7531_GPIO0_INTERRUPT); 2476 2477 /* Enable PHY core PLL, since phy_device has not yet been created 2478 * provided for phy_[read,write]_mmd_indirect is called, we provide 2479 * our own mt7531_ind_mmd_phy_[read,write] to complete this 2480 * function. 2481 */ 2482 val = mt7531_ind_c45_phy_read(priv, MT753X_CTRL_PHY_ADDR, 2483 MDIO_MMD_VEND2, CORE_PLL_GROUP4); 2484 val |= MT7531_PHY_PLL_BYPASS_MODE; 2485 val &= ~MT7531_PHY_PLL_OFF; 2486 mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2, 2487 CORE_PLL_GROUP4, val); 2488 2489 mt7531_setup_common(ds); 2490 2491 /* Setup VLAN ID 0 for VLAN-unaware bridges */ 2492 ret = mt7530_setup_vlan0(priv); 2493 if (ret) 2494 return ret; 2495 2496 ds->assisted_learning_on_cpu_port = true; 2497 ds->mtu_enforcement_ingress = true; 2498 2499 return 0; 2500 } 2501 2502 static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port, 2503 struct phylink_config *config) 2504 { 2505 switch (port) { 2506 /* Ports which are connected to switch PHYs. There is no MII pinout. */ 2507 case 0 ... 4: 2508 __set_bit(PHY_INTERFACE_MODE_GMII, 2509 config->supported_interfaces); 2510 break; 2511 2512 /* Port 5 supports rgmii with delays, mii, and gmii. */ 2513 case 5: 2514 phy_interface_set_rgmii(config->supported_interfaces); 2515 __set_bit(PHY_INTERFACE_MODE_MII, 2516 config->supported_interfaces); 2517 __set_bit(PHY_INTERFACE_MODE_GMII, 2518 config->supported_interfaces); 2519 break; 2520 2521 /* Port 6 supports rgmii and trgmii. */ 2522 case 6: 2523 __set_bit(PHY_INTERFACE_MODE_RGMII, 2524 config->supported_interfaces); 2525 __set_bit(PHY_INTERFACE_MODE_TRGMII, 2526 config->supported_interfaces); 2527 break; 2528 } 2529 } 2530 2531 static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port, 2532 struct phylink_config *config) 2533 { 2534 struct mt7530_priv *priv = ds->priv; 2535 2536 switch (port) { 2537 /* Ports which are connected to switch PHYs. There is no MII pinout. */ 2538 case 0 ... 4: 2539 __set_bit(PHY_INTERFACE_MODE_GMII, 2540 config->supported_interfaces); 2541 break; 2542 2543 /* Port 5 supports rgmii with delays on MT7531BE, sgmii/802.3z on 2544 * MT7531AE. 2545 */ 2546 case 5: 2547 if (!priv->p5_sgmii) { 2548 phy_interface_set_rgmii(config->supported_interfaces); 2549 break; 2550 } 2551 fallthrough; 2552 2553 /* Port 6 supports sgmii/802.3z. */ 2554 case 6: 2555 __set_bit(PHY_INTERFACE_MODE_SGMII, 2556 config->supported_interfaces); 2557 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 2558 config->supported_interfaces); 2559 __set_bit(PHY_INTERFACE_MODE_2500BASEX, 2560 config->supported_interfaces); 2561 2562 config->mac_capabilities |= MAC_2500FD; 2563 break; 2564 } 2565 } 2566 2567 static void mt7988_mac_port_get_caps(struct dsa_switch *ds, int port, 2568 struct phylink_config *config) 2569 { 2570 switch (port) { 2571 /* Ports which are connected to switch PHYs. There is no MII pinout. */ 2572 case 0 ... 3: 2573 __set_bit(PHY_INTERFACE_MODE_INTERNAL, 2574 config->supported_interfaces); 2575 break; 2576 2577 /* Port 6 is connected to SoC's XGMII MAC. There is no MII pinout. */ 2578 case 6: 2579 __set_bit(PHY_INTERFACE_MODE_INTERNAL, 2580 config->supported_interfaces); 2581 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | 2582 MAC_10000FD; 2583 } 2584 } 2585 2586 static void 2587 mt7530_mac_config(struct dsa_switch *ds, int port, unsigned int mode, 2588 phy_interface_t interface) 2589 { 2590 struct mt7530_priv *priv = ds->priv; 2591 2592 if (port == 5) 2593 mt7530_setup_port5(priv->ds, interface); 2594 else if (port == 6) 2595 mt7530_setup_port6(priv->ds, interface); 2596 } 2597 2598 static void mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port, 2599 phy_interface_t interface, 2600 struct phy_device *phydev) 2601 { 2602 u32 val; 2603 2604 val = mt7530_read(priv, MT7531_CLKGEN_CTRL); 2605 val |= GP_CLK_EN; 2606 val &= ~GP_MODE_MASK; 2607 val |= GP_MODE(MT7531_GP_MODE_RGMII); 2608 val &= ~CLK_SKEW_IN_MASK; 2609 val |= CLK_SKEW_IN(MT7531_CLK_SKEW_NO_CHG); 2610 val &= ~CLK_SKEW_OUT_MASK; 2611 val |= CLK_SKEW_OUT(MT7531_CLK_SKEW_NO_CHG); 2612 val |= TXCLK_NO_REVERSE | RXCLK_NO_DELAY; 2613 2614 /* Do not adjust rgmii delay when vendor phy driver presents. */ 2615 if (!phydev || phy_driver_is_genphy(phydev)) { 2616 val &= ~(TXCLK_NO_REVERSE | RXCLK_NO_DELAY); 2617 switch (interface) { 2618 case PHY_INTERFACE_MODE_RGMII: 2619 val |= TXCLK_NO_REVERSE; 2620 val |= RXCLK_NO_DELAY; 2621 break; 2622 case PHY_INTERFACE_MODE_RGMII_RXID: 2623 val |= TXCLK_NO_REVERSE; 2624 break; 2625 case PHY_INTERFACE_MODE_RGMII_TXID: 2626 val |= RXCLK_NO_DELAY; 2627 break; 2628 case PHY_INTERFACE_MODE_RGMII_ID: 2629 break; 2630 default: 2631 break; 2632 } 2633 } 2634 2635 mt7530_write(priv, MT7531_CLKGEN_CTRL, val); 2636 } 2637 2638 static void 2639 mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode, 2640 phy_interface_t interface) 2641 { 2642 struct mt7530_priv *priv = ds->priv; 2643 struct phy_device *phydev; 2644 struct dsa_port *dp; 2645 2646 if (phy_interface_mode_is_rgmii(interface)) { 2647 dp = dsa_to_port(ds, port); 2648 phydev = dp->user->phydev; 2649 mt7531_rgmii_setup(priv, port, interface, phydev); 2650 } 2651 } 2652 2653 static struct phylink_pcs * 2654 mt753x_phylink_mac_select_pcs(struct dsa_switch *ds, int port, 2655 phy_interface_t interface) 2656 { 2657 struct mt7530_priv *priv = ds->priv; 2658 2659 switch (interface) { 2660 case PHY_INTERFACE_MODE_TRGMII: 2661 return &priv->pcs[port].pcs; 2662 case PHY_INTERFACE_MODE_SGMII: 2663 case PHY_INTERFACE_MODE_1000BASEX: 2664 case PHY_INTERFACE_MODE_2500BASEX: 2665 return priv->ports[port].sgmii_pcs; 2666 default: 2667 return NULL; 2668 } 2669 } 2670 2671 static void 2672 mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, 2673 const struct phylink_link_state *state) 2674 { 2675 struct mt7530_priv *priv = ds->priv; 2676 2677 if ((port == 5 || port == 6) && priv->info->mac_port_config) 2678 priv->info->mac_port_config(ds, port, mode, state->interface); 2679 2680 /* Are we connected to external phy */ 2681 if (port == 5 && dsa_is_user_port(ds, 5)) 2682 mt7530_set(priv, MT7530_PMCR_P(port), PMCR_EXT_PHY); 2683 } 2684 2685 static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port, 2686 unsigned int mode, 2687 phy_interface_t interface) 2688 { 2689 struct mt7530_priv *priv = ds->priv; 2690 2691 mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK); 2692 } 2693 2694 static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port, 2695 unsigned int mode, 2696 phy_interface_t interface, 2697 struct phy_device *phydev, 2698 int speed, int duplex, 2699 bool tx_pause, bool rx_pause) 2700 { 2701 struct mt7530_priv *priv = ds->priv; 2702 u32 mcr; 2703 2704 mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK; 2705 2706 switch (speed) { 2707 case SPEED_1000: 2708 case SPEED_2500: 2709 case SPEED_10000: 2710 mcr |= PMCR_FORCE_SPEED_1000; 2711 break; 2712 case SPEED_100: 2713 mcr |= PMCR_FORCE_SPEED_100; 2714 break; 2715 } 2716 if (duplex == DUPLEX_FULL) { 2717 mcr |= PMCR_FORCE_FDX; 2718 if (tx_pause) 2719 mcr |= PMCR_TX_FC_EN; 2720 if (rx_pause) 2721 mcr |= PMCR_RX_FC_EN; 2722 } 2723 2724 if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, false) >= 0) { 2725 switch (speed) { 2726 case SPEED_1000: 2727 case SPEED_2500: 2728 mcr |= PMCR_FORCE_EEE1G; 2729 break; 2730 case SPEED_100: 2731 mcr |= PMCR_FORCE_EEE100; 2732 break; 2733 } 2734 } 2735 2736 mt7530_set(priv, MT7530_PMCR_P(port), mcr); 2737 } 2738 2739 static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port, 2740 struct phylink_config *config) 2741 { 2742 struct mt7530_priv *priv = ds->priv; 2743 2744 /* This switch only supports full-duplex at 1Gbps */ 2745 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | 2746 MAC_10 | MAC_100 | MAC_1000FD; 2747 2748 priv->info->mac_port_get_caps(ds, port, config); 2749 } 2750 2751 static int mt753x_pcs_validate(struct phylink_pcs *pcs, 2752 unsigned long *supported, 2753 const struct phylink_link_state *state) 2754 { 2755 /* Autonegotiation is not supported in TRGMII nor 802.3z modes */ 2756 if (state->interface == PHY_INTERFACE_MODE_TRGMII || 2757 phy_interface_mode_is_8023z(state->interface)) 2758 phylink_clear(supported, Autoneg); 2759 2760 return 0; 2761 } 2762 2763 static void mt7530_pcs_get_state(struct phylink_pcs *pcs, 2764 struct phylink_link_state *state) 2765 { 2766 struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv; 2767 int port = pcs_to_mt753x_pcs(pcs)->port; 2768 u32 pmsr; 2769 2770 pmsr = mt7530_read(priv, MT7530_PMSR_P(port)); 2771 2772 state->link = (pmsr & PMSR_LINK); 2773 state->an_complete = state->link; 2774 state->duplex = !!(pmsr & PMSR_DPX); 2775 2776 switch (pmsr & PMSR_SPEED_MASK) { 2777 case PMSR_SPEED_10: 2778 state->speed = SPEED_10; 2779 break; 2780 case PMSR_SPEED_100: 2781 state->speed = SPEED_100; 2782 break; 2783 case PMSR_SPEED_1000: 2784 state->speed = SPEED_1000; 2785 break; 2786 default: 2787 state->speed = SPEED_UNKNOWN; 2788 break; 2789 } 2790 2791 state->pause &= ~(MLO_PAUSE_RX | MLO_PAUSE_TX); 2792 if (pmsr & PMSR_RX_FC) 2793 state->pause |= MLO_PAUSE_RX; 2794 if (pmsr & PMSR_TX_FC) 2795 state->pause |= MLO_PAUSE_TX; 2796 } 2797 2798 static int mt753x_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, 2799 phy_interface_t interface, 2800 const unsigned long *advertising, 2801 bool permit_pause_to_mac) 2802 { 2803 return 0; 2804 } 2805 2806 static void mt7530_pcs_an_restart(struct phylink_pcs *pcs) 2807 { 2808 } 2809 2810 static const struct phylink_pcs_ops mt7530_pcs_ops = { 2811 .pcs_validate = mt753x_pcs_validate, 2812 .pcs_get_state = mt7530_pcs_get_state, 2813 .pcs_config = mt753x_pcs_config, 2814 .pcs_an_restart = mt7530_pcs_an_restart, 2815 }; 2816 2817 static int 2818 mt753x_setup(struct dsa_switch *ds) 2819 { 2820 struct mt7530_priv *priv = ds->priv; 2821 int ret = priv->info->sw_setup(ds); 2822 int i; 2823 2824 if (ret) 2825 return ret; 2826 2827 ret = mt7530_setup_irq(priv); 2828 if (ret) 2829 return ret; 2830 2831 ret = mt7530_setup_mdio(priv); 2832 if (ret && priv->irq) 2833 mt7530_free_irq_common(priv); 2834 2835 /* Initialise the PCS devices */ 2836 for (i = 0; i < priv->ds->num_ports; i++) { 2837 priv->pcs[i].pcs.ops = priv->info->pcs_ops; 2838 priv->pcs[i].pcs.neg_mode = true; 2839 priv->pcs[i].priv = priv; 2840 priv->pcs[i].port = i; 2841 } 2842 2843 if (priv->create_sgmii) { 2844 ret = priv->create_sgmii(priv); 2845 if (ret && priv->irq) 2846 mt7530_free_irq(priv); 2847 } 2848 2849 return ret; 2850 } 2851 2852 static int mt753x_get_mac_eee(struct dsa_switch *ds, int port, 2853 struct ethtool_keee *e) 2854 { 2855 struct mt7530_priv *priv = ds->priv; 2856 u32 eeecr = mt7530_read(priv, MT7530_PMEEECR_P(port)); 2857 2858 e->tx_lpi_enabled = !(eeecr & LPI_MODE_EN); 2859 e->tx_lpi_timer = GET_LPI_THRESH(eeecr); 2860 2861 return 0; 2862 } 2863 2864 static int mt753x_set_mac_eee(struct dsa_switch *ds, int port, 2865 struct ethtool_keee *e) 2866 { 2867 struct mt7530_priv *priv = ds->priv; 2868 u32 set, mask = LPI_THRESH_MASK | LPI_MODE_EN; 2869 2870 if (e->tx_lpi_timer > 0xFFF) 2871 return -EINVAL; 2872 2873 set = SET_LPI_THRESH(e->tx_lpi_timer); 2874 if (!e->tx_lpi_enabled) 2875 /* Force LPI Mode without a delay */ 2876 set |= LPI_MODE_EN; 2877 mt7530_rmw(priv, MT7530_PMEEECR_P(port), mask, set); 2878 2879 return 0; 2880 } 2881 2882 static void 2883 mt753x_conduit_state_change(struct dsa_switch *ds, 2884 const struct net_device *conduit, 2885 bool operational) 2886 { 2887 struct dsa_port *cpu_dp = conduit->dsa_ptr; 2888 struct mt7530_priv *priv = ds->priv; 2889 int val = 0; 2890 u8 mask; 2891 2892 /* Set the CPU port to trap frames to for MT7530. Trapped frames will be 2893 * forwarded to the numerically smallest CPU port whose conduit 2894 * interface is up. 2895 */ 2896 if (priv->id != ID_MT7530 && priv->id != ID_MT7621) 2897 return; 2898 2899 mask = BIT(cpu_dp->index); 2900 2901 if (operational) 2902 priv->active_cpu_ports |= mask; 2903 else 2904 priv->active_cpu_ports &= ~mask; 2905 2906 if (priv->active_cpu_ports) 2907 val = CPU_EN | CPU_PORT(__ffs(priv->active_cpu_ports)); 2908 2909 mt7530_rmw(priv, MT7530_MFC, CPU_EN | CPU_PORT_MASK, val); 2910 } 2911 2912 static int mt7988_setup(struct dsa_switch *ds) 2913 { 2914 struct mt7530_priv *priv = ds->priv; 2915 2916 /* Reset the switch */ 2917 reset_control_assert(priv->rstc); 2918 usleep_range(20, 50); 2919 reset_control_deassert(priv->rstc); 2920 usleep_range(20, 50); 2921 2922 /* Reset the switch PHYs */ 2923 mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_PHY_RST); 2924 2925 return mt7531_setup_common(ds); 2926 } 2927 2928 const struct dsa_switch_ops mt7530_switch_ops = { 2929 .get_tag_protocol = mtk_get_tag_protocol, 2930 .setup = mt753x_setup, 2931 .preferred_default_local_cpu_port = mt753x_preferred_default_local_cpu_port, 2932 .get_strings = mt7530_get_strings, 2933 .get_ethtool_stats = mt7530_get_ethtool_stats, 2934 .get_sset_count = mt7530_get_sset_count, 2935 .set_ageing_time = mt7530_set_ageing_time, 2936 .port_enable = mt7530_port_enable, 2937 .port_disable = mt7530_port_disable, 2938 .port_change_mtu = mt7530_port_change_mtu, 2939 .port_max_mtu = mt7530_port_max_mtu, 2940 .port_stp_state_set = mt7530_stp_state_set, 2941 .port_pre_bridge_flags = mt7530_port_pre_bridge_flags, 2942 .port_bridge_flags = mt7530_port_bridge_flags, 2943 .port_bridge_join = mt7530_port_bridge_join, 2944 .port_bridge_leave = mt7530_port_bridge_leave, 2945 .port_fdb_add = mt7530_port_fdb_add, 2946 .port_fdb_del = mt7530_port_fdb_del, 2947 .port_fdb_dump = mt7530_port_fdb_dump, 2948 .port_mdb_add = mt7530_port_mdb_add, 2949 .port_mdb_del = mt7530_port_mdb_del, 2950 .port_vlan_filtering = mt7530_port_vlan_filtering, 2951 .port_vlan_add = mt7530_port_vlan_add, 2952 .port_vlan_del = mt7530_port_vlan_del, 2953 .port_mirror_add = mt753x_port_mirror_add, 2954 .port_mirror_del = mt753x_port_mirror_del, 2955 .phylink_get_caps = mt753x_phylink_get_caps, 2956 .phylink_mac_select_pcs = mt753x_phylink_mac_select_pcs, 2957 .phylink_mac_config = mt753x_phylink_mac_config, 2958 .phylink_mac_link_down = mt753x_phylink_mac_link_down, 2959 .phylink_mac_link_up = mt753x_phylink_mac_link_up, 2960 .get_mac_eee = mt753x_get_mac_eee, 2961 .set_mac_eee = mt753x_set_mac_eee, 2962 .conduit_state_change = mt753x_conduit_state_change, 2963 }; 2964 EXPORT_SYMBOL_GPL(mt7530_switch_ops); 2965 2966 const struct mt753x_info mt753x_table[] = { 2967 [ID_MT7621] = { 2968 .id = ID_MT7621, 2969 .pcs_ops = &mt7530_pcs_ops, 2970 .sw_setup = mt7530_setup, 2971 .phy_read_c22 = mt7530_phy_read_c22, 2972 .phy_write_c22 = mt7530_phy_write_c22, 2973 .phy_read_c45 = mt7530_phy_read_c45, 2974 .phy_write_c45 = mt7530_phy_write_c45, 2975 .mac_port_get_caps = mt7530_mac_port_get_caps, 2976 .mac_port_config = mt7530_mac_config, 2977 }, 2978 [ID_MT7530] = { 2979 .id = ID_MT7530, 2980 .pcs_ops = &mt7530_pcs_ops, 2981 .sw_setup = mt7530_setup, 2982 .phy_read_c22 = mt7530_phy_read_c22, 2983 .phy_write_c22 = mt7530_phy_write_c22, 2984 .phy_read_c45 = mt7530_phy_read_c45, 2985 .phy_write_c45 = mt7530_phy_write_c45, 2986 .mac_port_get_caps = mt7530_mac_port_get_caps, 2987 .mac_port_config = mt7530_mac_config, 2988 }, 2989 [ID_MT7531] = { 2990 .id = ID_MT7531, 2991 .pcs_ops = &mt7530_pcs_ops, 2992 .sw_setup = mt7531_setup, 2993 .phy_read_c22 = mt7531_ind_c22_phy_read, 2994 .phy_write_c22 = mt7531_ind_c22_phy_write, 2995 .phy_read_c45 = mt7531_ind_c45_phy_read, 2996 .phy_write_c45 = mt7531_ind_c45_phy_write, 2997 .mac_port_get_caps = mt7531_mac_port_get_caps, 2998 .mac_port_config = mt7531_mac_config, 2999 }, 3000 [ID_MT7988] = { 3001 .id = ID_MT7988, 3002 .pcs_ops = &mt7530_pcs_ops, 3003 .sw_setup = mt7988_setup, 3004 .phy_read_c22 = mt7531_ind_c22_phy_read, 3005 .phy_write_c22 = mt7531_ind_c22_phy_write, 3006 .phy_read_c45 = mt7531_ind_c45_phy_read, 3007 .phy_write_c45 = mt7531_ind_c45_phy_write, 3008 .mac_port_get_caps = mt7988_mac_port_get_caps, 3009 }, 3010 }; 3011 EXPORT_SYMBOL_GPL(mt753x_table); 3012 3013 int 3014 mt7530_probe_common(struct mt7530_priv *priv) 3015 { 3016 struct device *dev = priv->dev; 3017 3018 priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL); 3019 if (!priv->ds) 3020 return -ENOMEM; 3021 3022 priv->ds->dev = dev; 3023 priv->ds->num_ports = MT7530_NUM_PORTS; 3024 3025 /* Get the hardware identifier from the devicetree node. 3026 * We will need it for some of the clock and regulator setup. 3027 */ 3028 priv->info = of_device_get_match_data(dev); 3029 if (!priv->info) 3030 return -EINVAL; 3031 3032 /* Sanity check if these required device operations are filled 3033 * properly. 3034 */ 3035 if (!priv->info->sw_setup || !priv->info->phy_read_c22 || 3036 !priv->info->phy_write_c22 || !priv->info->mac_port_get_caps) 3037 return -EINVAL; 3038 3039 priv->id = priv->info->id; 3040 priv->dev = dev; 3041 priv->ds->priv = priv; 3042 priv->ds->ops = &mt7530_switch_ops; 3043 mutex_init(&priv->reg_mutex); 3044 dev_set_drvdata(dev, priv); 3045 3046 return 0; 3047 } 3048 EXPORT_SYMBOL_GPL(mt7530_probe_common); 3049 3050 void 3051 mt7530_remove_common(struct mt7530_priv *priv) 3052 { 3053 if (priv->irq) 3054 mt7530_free_irq(priv); 3055 3056 dsa_unregister_switch(priv->ds); 3057 3058 mutex_destroy(&priv->reg_mutex); 3059 } 3060 EXPORT_SYMBOL_GPL(mt7530_remove_common); 3061 3062 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); 3063 MODULE_DESCRIPTION("Driver for Mediatek MT7530 Switch"); 3064 MODULE_LICENSE("GPL"); 3065