1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Mediatek MT7530 DSA Switch driver
4 * Copyright (C) 2017 Sean Wang <sean.wang@mediatek.com>
5 */
6 #include <linux/etherdevice.h>
7 #include <linux/if_bridge.h>
8 #include <linux/iopoll.h>
9 #include <linux/mdio.h>
10 #include <linux/mfd/syscon.h>
11 #include <linux/module.h>
12 #include <linux/netdevice.h>
13 #include <linux/of_irq.h>
14 #include <linux/of_mdio.h>
15 #include <linux/of_net.h>
16 #include <linux/of_platform.h>
17 #include <linux/phylink.h>
18 #include <linux/regmap.h>
19 #include <linux/regulator/consumer.h>
20 #include <linux/reset.h>
21 #include <linux/gpio/consumer.h>
22 #include <linux/gpio/driver.h>
23 #include <net/dsa.h>
24
25 #include "mt7530.h"
26
pcs_to_mt753x_pcs(struct phylink_pcs * pcs)27 static struct mt753x_pcs *pcs_to_mt753x_pcs(struct phylink_pcs *pcs)
28 {
29 return container_of(pcs, struct mt753x_pcs, pcs);
30 }
31
32 /* String, offset, and register size in bytes if different from 4 bytes */
33 static const struct mt7530_mib_desc mt7530_mib[] = {
34 MIB_DESC(1, 0x00, "TxDrop"),
35 MIB_DESC(1, 0x04, "TxCrcErr"),
36 MIB_DESC(1, 0x08, "TxUnicast"),
37 MIB_DESC(1, 0x0c, "TxMulticast"),
38 MIB_DESC(1, 0x10, "TxBroadcast"),
39 MIB_DESC(1, 0x14, "TxCollision"),
40 MIB_DESC(1, 0x18, "TxSingleCollision"),
41 MIB_DESC(1, 0x1c, "TxMultipleCollision"),
42 MIB_DESC(1, 0x20, "TxDeferred"),
43 MIB_DESC(1, 0x24, "TxLateCollision"),
44 MIB_DESC(1, 0x28, "TxExcessiveCollistion"),
45 MIB_DESC(1, 0x2c, "TxPause"),
46 MIB_DESC(1, 0x30, "TxPktSz64"),
47 MIB_DESC(1, 0x34, "TxPktSz65To127"),
48 MIB_DESC(1, 0x38, "TxPktSz128To255"),
49 MIB_DESC(1, 0x3c, "TxPktSz256To511"),
50 MIB_DESC(1, 0x40, "TxPktSz512To1023"),
51 MIB_DESC(1, 0x44, "Tx1024ToMax"),
52 MIB_DESC(2, 0x48, "TxBytes"),
53 MIB_DESC(1, 0x60, "RxDrop"),
54 MIB_DESC(1, 0x64, "RxFiltering"),
55 MIB_DESC(1, 0x68, "RxUnicast"),
56 MIB_DESC(1, 0x6c, "RxMulticast"),
57 MIB_DESC(1, 0x70, "RxBroadcast"),
58 MIB_DESC(1, 0x74, "RxAlignErr"),
59 MIB_DESC(1, 0x78, "RxCrcErr"),
60 MIB_DESC(1, 0x7c, "RxUnderSizeErr"),
61 MIB_DESC(1, 0x80, "RxFragErr"),
62 MIB_DESC(1, 0x84, "RxOverSzErr"),
63 MIB_DESC(1, 0x88, "RxJabberErr"),
64 MIB_DESC(1, 0x8c, "RxPause"),
65 MIB_DESC(1, 0x90, "RxPktSz64"),
66 MIB_DESC(1, 0x94, "RxPktSz65To127"),
67 MIB_DESC(1, 0x98, "RxPktSz128To255"),
68 MIB_DESC(1, 0x9c, "RxPktSz256To511"),
69 MIB_DESC(1, 0xa0, "RxPktSz512To1023"),
70 MIB_DESC(1, 0xa4, "RxPktSz1024ToMax"),
71 MIB_DESC(2, 0xa8, "RxBytes"),
72 MIB_DESC(1, 0xb0, "RxCtrlDrop"),
73 MIB_DESC(1, 0xb4, "RxIngressDrop"),
74 MIB_DESC(1, 0xb8, "RxArlDrop"),
75 };
76
77 static void
mt7530_mutex_lock(struct mt7530_priv * priv)78 mt7530_mutex_lock(struct mt7530_priv *priv)
79 {
80 if (priv->bus)
81 mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
82 }
83
84 static void
mt7530_mutex_unlock(struct mt7530_priv * priv)85 mt7530_mutex_unlock(struct mt7530_priv *priv)
86 {
87 if (priv->bus)
88 mutex_unlock(&priv->bus->mdio_lock);
89 }
90
91 static void
core_write(struct mt7530_priv * priv,u32 reg,u32 val)92 core_write(struct mt7530_priv *priv, u32 reg, u32 val)
93 {
94 struct mii_bus *bus = priv->bus;
95 int ret;
96
97 mt7530_mutex_lock(priv);
98
99 /* Write the desired MMD Devad */
100 ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
101 MII_MMD_CTRL, MDIO_MMD_VEND2);
102 if (ret < 0)
103 goto err;
104
105 /* Write the desired MMD register address */
106 ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
107 MII_MMD_DATA, reg);
108 if (ret < 0)
109 goto err;
110
111 /* Select the Function : DATA with no post increment */
112 ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
113 MII_MMD_CTRL, MDIO_MMD_VEND2 | MII_MMD_CTRL_NOINCR);
114 if (ret < 0)
115 goto err;
116
117 /* Write the data into MMD's selected register */
118 ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
119 MII_MMD_DATA, val);
120 err:
121 if (ret < 0)
122 dev_err(&bus->dev, "failed to write mmd register\n");
123
124 mt7530_mutex_unlock(priv);
125 }
126
127 static void
core_rmw(struct mt7530_priv * priv,u32 reg,u32 mask,u32 set)128 core_rmw(struct mt7530_priv *priv, u32 reg, u32 mask, u32 set)
129 {
130 struct mii_bus *bus = priv->bus;
131 u32 val;
132 int ret;
133
134 mt7530_mutex_lock(priv);
135
136 /* Write the desired MMD Devad */
137 ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
138 MII_MMD_CTRL, MDIO_MMD_VEND2);
139 if (ret < 0)
140 goto err;
141
142 /* Write the desired MMD register address */
143 ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
144 MII_MMD_DATA, reg);
145 if (ret < 0)
146 goto err;
147
148 /* Select the Function : DATA with no post increment */
149 ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
150 MII_MMD_CTRL, MDIO_MMD_VEND2 | MII_MMD_CTRL_NOINCR);
151 if (ret < 0)
152 goto err;
153
154 /* Read the content of the MMD's selected register */
155 val = bus->read(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
156 MII_MMD_DATA);
157 val &= ~mask;
158 val |= set;
159 /* Write the data into MMD's selected register */
160 ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
161 MII_MMD_DATA, val);
162 err:
163 if (ret < 0)
164 dev_err(&bus->dev, "failed to write mmd register\n");
165
166 mt7530_mutex_unlock(priv);
167 }
168
169 static void
core_set(struct mt7530_priv * priv,u32 reg,u32 val)170 core_set(struct mt7530_priv *priv, u32 reg, u32 val)
171 {
172 core_rmw(priv, reg, 0, val);
173 }
174
175 static void
core_clear(struct mt7530_priv * priv,u32 reg,u32 val)176 core_clear(struct mt7530_priv *priv, u32 reg, u32 val)
177 {
178 core_rmw(priv, reg, val, 0);
179 }
180
181 static int
mt7530_mii_write(struct mt7530_priv * priv,u32 reg,u32 val)182 mt7530_mii_write(struct mt7530_priv *priv, u32 reg, u32 val)
183 {
184 int ret;
185
186 ret = regmap_write(priv->regmap, reg, val);
187
188 if (ret < 0)
189 dev_err(priv->dev,
190 "failed to write mt7530 register\n");
191
192 return ret;
193 }
194
195 static u32
mt7530_mii_read(struct mt7530_priv * priv,u32 reg)196 mt7530_mii_read(struct mt7530_priv *priv, u32 reg)
197 {
198 int ret;
199 u32 val;
200
201 ret = regmap_read(priv->regmap, reg, &val);
202 if (ret) {
203 WARN_ON_ONCE(1);
204 dev_err(priv->dev,
205 "failed to read mt7530 register\n");
206 return 0;
207 }
208
209 return val;
210 }
211
212 static void
mt7530_write(struct mt7530_priv * priv,u32 reg,u32 val)213 mt7530_write(struct mt7530_priv *priv, u32 reg, u32 val)
214 {
215 mt7530_mutex_lock(priv);
216
217 mt7530_mii_write(priv, reg, val);
218
219 mt7530_mutex_unlock(priv);
220 }
221
222 static u32
_mt7530_unlocked_read(struct mt7530_dummy_poll * p)223 _mt7530_unlocked_read(struct mt7530_dummy_poll *p)
224 {
225 return mt7530_mii_read(p->priv, p->reg);
226 }
227
228 static u32
_mt7530_read(struct mt7530_dummy_poll * p)229 _mt7530_read(struct mt7530_dummy_poll *p)
230 {
231 u32 val;
232
233 mt7530_mutex_lock(p->priv);
234
235 val = mt7530_mii_read(p->priv, p->reg);
236
237 mt7530_mutex_unlock(p->priv);
238
239 return val;
240 }
241
242 static u32
mt7530_read(struct mt7530_priv * priv,u32 reg)243 mt7530_read(struct mt7530_priv *priv, u32 reg)
244 {
245 struct mt7530_dummy_poll p;
246
247 INIT_MT7530_DUMMY_POLL(&p, priv, reg);
248 return _mt7530_read(&p);
249 }
250
251 static void
mt7530_rmw(struct mt7530_priv * priv,u32 reg,u32 mask,u32 set)252 mt7530_rmw(struct mt7530_priv *priv, u32 reg,
253 u32 mask, u32 set)
254 {
255 mt7530_mutex_lock(priv);
256
257 regmap_update_bits(priv->regmap, reg, mask, set);
258
259 mt7530_mutex_unlock(priv);
260 }
261
262 static void
mt7530_set(struct mt7530_priv * priv,u32 reg,u32 val)263 mt7530_set(struct mt7530_priv *priv, u32 reg, u32 val)
264 {
265 mt7530_rmw(priv, reg, val, val);
266 }
267
268 static void
mt7530_clear(struct mt7530_priv * priv,u32 reg,u32 val)269 mt7530_clear(struct mt7530_priv *priv, u32 reg, u32 val)
270 {
271 mt7530_rmw(priv, reg, val, 0);
272 }
273
274 static int
mt7530_fdb_cmd(struct mt7530_priv * priv,enum mt7530_fdb_cmd cmd,u32 * rsp)275 mt7530_fdb_cmd(struct mt7530_priv *priv, enum mt7530_fdb_cmd cmd, u32 *rsp)
276 {
277 u32 val;
278 int ret;
279 struct mt7530_dummy_poll p;
280
281 /* Set the command operating upon the MAC address entries */
282 val = ATC_BUSY | ATC_MAT(0) | cmd;
283 mt7530_write(priv, MT7530_ATC, val);
284
285 INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_ATC);
286 ret = readx_poll_timeout(_mt7530_read, &p, val,
287 !(val & ATC_BUSY), 20, 20000);
288 if (ret < 0) {
289 dev_err(priv->dev, "reset timeout\n");
290 return ret;
291 }
292
293 /* Additional sanity for read command if the specified
294 * entry is invalid
295 */
296 val = mt7530_read(priv, MT7530_ATC);
297 if ((cmd == MT7530_FDB_READ) && (val & ATC_INVALID))
298 return -EINVAL;
299
300 if (rsp)
301 *rsp = val;
302
303 return 0;
304 }
305
306 static void
mt7530_fdb_read(struct mt7530_priv * priv,struct mt7530_fdb * fdb)307 mt7530_fdb_read(struct mt7530_priv *priv, struct mt7530_fdb *fdb)
308 {
309 u32 reg[3];
310 int i;
311
312 /* Read from ARL table into an array */
313 for (i = 0; i < 3; i++) {
314 reg[i] = mt7530_read(priv, MT7530_TSRA1 + (i * 4));
315
316 dev_dbg(priv->dev, "%s(%d) reg[%d]=0x%x\n",
317 __func__, __LINE__, i, reg[i]);
318 }
319
320 fdb->vid = (reg[1] >> CVID) & CVID_MASK;
321 fdb->aging = (reg[2] >> AGE_TIMER) & AGE_TIMER_MASK;
322 fdb->port_mask = (reg[2] >> PORT_MAP) & PORT_MAP_MASK;
323 fdb->mac[0] = (reg[0] >> MAC_BYTE_0) & MAC_BYTE_MASK;
324 fdb->mac[1] = (reg[0] >> MAC_BYTE_1) & MAC_BYTE_MASK;
325 fdb->mac[2] = (reg[0] >> MAC_BYTE_2) & MAC_BYTE_MASK;
326 fdb->mac[3] = (reg[0] >> MAC_BYTE_3) & MAC_BYTE_MASK;
327 fdb->mac[4] = (reg[1] >> MAC_BYTE_4) & MAC_BYTE_MASK;
328 fdb->mac[5] = (reg[1] >> MAC_BYTE_5) & MAC_BYTE_MASK;
329 fdb->noarp = ((reg[2] >> ENT_STATUS) & ENT_STATUS_MASK) == STATIC_ENT;
330 }
331
332 static void
mt7530_fdb_write(struct mt7530_priv * priv,u16 vid,u8 port_mask,const u8 * mac,u8 aging,u8 type)333 mt7530_fdb_write(struct mt7530_priv *priv, u16 vid,
334 u8 port_mask, const u8 *mac,
335 u8 aging, u8 type)
336 {
337 u32 reg[3] = { 0 };
338 int i;
339
340 reg[1] |= vid & CVID_MASK;
341 reg[1] |= ATA2_IVL;
342 reg[1] |= ATA2_FID(FID_BRIDGED);
343 reg[2] |= (aging & AGE_TIMER_MASK) << AGE_TIMER;
344 reg[2] |= (port_mask & PORT_MAP_MASK) << PORT_MAP;
345 /* STATIC_ENT indicate that entry is static wouldn't
346 * be aged out and STATIC_EMP specified as erasing an
347 * entry
348 */
349 reg[2] |= (type & ENT_STATUS_MASK) << ENT_STATUS;
350 reg[1] |= mac[5] << MAC_BYTE_5;
351 reg[1] |= mac[4] << MAC_BYTE_4;
352 reg[0] |= mac[3] << MAC_BYTE_3;
353 reg[0] |= mac[2] << MAC_BYTE_2;
354 reg[0] |= mac[1] << MAC_BYTE_1;
355 reg[0] |= mac[0] << MAC_BYTE_0;
356
357 /* Write array into the ARL table */
358 for (i = 0; i < 3; i++)
359 mt7530_write(priv, MT7530_ATA1 + (i * 4), reg[i]);
360 }
361
362 /* Set up switch core clock for MT7530 */
mt7530_pll_setup(struct mt7530_priv * priv)363 static void mt7530_pll_setup(struct mt7530_priv *priv)
364 {
365 /* Disable core clock */
366 core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
367
368 /* Disable PLL */
369 core_write(priv, CORE_GSWPLL_GRP1, 0);
370
371 /* Set core clock into 500Mhz */
372 core_write(priv, CORE_GSWPLL_GRP2,
373 RG_GSWPLL_POSDIV_500M(1) |
374 RG_GSWPLL_FBKDIV_500M(25));
375
376 /* Enable PLL */
377 core_write(priv, CORE_GSWPLL_GRP1,
378 RG_GSWPLL_EN_PRE |
379 RG_GSWPLL_POSDIV_200M(2) |
380 RG_GSWPLL_FBKDIV_200M(32));
381
382 udelay(20);
383
384 /* Enable core clock */
385 core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
386 }
387
388 /* If port 6 is available as a CPU port, always prefer that as the default,
389 * otherwise don't care.
390 */
391 static struct dsa_port *
mt753x_preferred_default_local_cpu_port(struct dsa_switch * ds)392 mt753x_preferred_default_local_cpu_port(struct dsa_switch *ds)
393 {
394 struct dsa_port *cpu_dp = dsa_to_port(ds, 6);
395
396 if (dsa_port_is_cpu(cpu_dp))
397 return cpu_dp;
398
399 return NULL;
400 }
401
402 /* Setup port 6 interface mode and TRGMII TX circuit */
403 static void
mt7530_setup_port6(struct dsa_switch * ds,phy_interface_t interface)404 mt7530_setup_port6(struct dsa_switch *ds, phy_interface_t interface)
405 {
406 struct mt7530_priv *priv = ds->priv;
407 u32 ncpo1, ssc_delta, xtal;
408
409 /* Disable the MT7530 TRGMII clocks */
410 core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
411
412 if (interface == PHY_INTERFACE_MODE_RGMII) {
413 mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK,
414 P6_INTF_MODE(0));
415 return;
416 }
417
418 mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK, P6_INTF_MODE(1));
419
420 xtal = mt7530_read(priv, MT753X_MTRAP) & MT7530_XTAL_MASK;
421
422 if (xtal == MT7530_XTAL_25MHZ)
423 ssc_delta = 0x57;
424 else
425 ssc_delta = 0x87;
426
427 if (priv->id == ID_MT7621) {
428 /* PLL frequency: 125MHz: 1.0GBit */
429 if (xtal == MT7530_XTAL_40MHZ)
430 ncpo1 = 0x0640;
431 if (xtal == MT7530_XTAL_25MHZ)
432 ncpo1 = 0x0a00;
433 } else { /* PLL frequency: 250MHz: 2.0Gbit */
434 if (xtal == MT7530_XTAL_40MHZ)
435 ncpo1 = 0x0c80;
436 if (xtal == MT7530_XTAL_25MHZ)
437 ncpo1 = 0x1400;
438 }
439
440 /* Setup the MT7530 TRGMII Tx Clock */
441 core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
442 core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
443 core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
444 core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
445 core_write(priv, CORE_PLL_GROUP4, RG_SYSPLL_DDSFBK_EN |
446 RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN);
447 core_write(priv, CORE_PLL_GROUP2, RG_SYSPLL_EN_NORMAL |
448 RG_SYSPLL_VODEN | RG_SYSPLL_POSDIV(1));
449 core_write(priv, CORE_PLL_GROUP7, RG_LCDDS_PCW_NCPO_CHG |
450 RG_LCCDS_C(3) | RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
451
452 /* Enable the MT7530 TRGMII clocks */
453 core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
454 }
455
456 static void
mt7531_pll_setup(struct mt7530_priv * priv)457 mt7531_pll_setup(struct mt7530_priv *priv)
458 {
459 enum mt7531_xtal_fsel xtal;
460 u32 top_sig;
461 u32 hwstrap;
462 u32 val;
463
464 val = mt7530_read(priv, MT7531_CREV);
465 top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR);
466 hwstrap = mt7530_read(priv, MT753X_TRAP);
467 if ((val & CHIP_REV_M) > 0)
468 xtal = (top_sig & PAD_MCM_SMI_EN) ? MT7531_XTAL_FSEL_40MHZ :
469 MT7531_XTAL_FSEL_25MHZ;
470 else
471 xtal = (hwstrap & MT7531_XTAL25) ? MT7531_XTAL_FSEL_25MHZ :
472 MT7531_XTAL_FSEL_40MHZ;
473
474 /* Step 1 : Disable MT7531 COREPLL */
475 val = mt7530_read(priv, MT7531_PLLGP_EN);
476 val &= ~EN_COREPLL;
477 mt7530_write(priv, MT7531_PLLGP_EN, val);
478
479 /* Step 2: switch to XTAL output */
480 val = mt7530_read(priv, MT7531_PLLGP_EN);
481 val |= SW_CLKSW;
482 mt7530_write(priv, MT7531_PLLGP_EN, val);
483
484 val = mt7530_read(priv, MT7531_PLLGP_CR0);
485 val &= ~RG_COREPLL_EN;
486 mt7530_write(priv, MT7531_PLLGP_CR0, val);
487
488 /* Step 3: disable PLLGP and enable program PLLGP */
489 val = mt7530_read(priv, MT7531_PLLGP_EN);
490 val |= SW_PLLGP;
491 mt7530_write(priv, MT7531_PLLGP_EN, val);
492
493 /* Step 4: program COREPLL output frequency to 500MHz */
494 val = mt7530_read(priv, MT7531_PLLGP_CR0);
495 val &= ~RG_COREPLL_POSDIV_M;
496 val |= 2 << RG_COREPLL_POSDIV_S;
497 mt7530_write(priv, MT7531_PLLGP_CR0, val);
498 usleep_range(25, 35);
499
500 switch (xtal) {
501 case MT7531_XTAL_FSEL_25MHZ:
502 val = mt7530_read(priv, MT7531_PLLGP_CR0);
503 val &= ~RG_COREPLL_SDM_PCW_M;
504 val |= 0x140000 << RG_COREPLL_SDM_PCW_S;
505 mt7530_write(priv, MT7531_PLLGP_CR0, val);
506 break;
507 case MT7531_XTAL_FSEL_40MHZ:
508 val = mt7530_read(priv, MT7531_PLLGP_CR0);
509 val &= ~RG_COREPLL_SDM_PCW_M;
510 val |= 0x190000 << RG_COREPLL_SDM_PCW_S;
511 mt7530_write(priv, MT7531_PLLGP_CR0, val);
512 break;
513 }
514
515 /* Set feedback divide ratio update signal to high */
516 val = mt7530_read(priv, MT7531_PLLGP_CR0);
517 val |= RG_COREPLL_SDM_PCW_CHG;
518 mt7530_write(priv, MT7531_PLLGP_CR0, val);
519 /* Wait for at least 16 XTAL clocks */
520 usleep_range(10, 20);
521
522 /* Step 5: set feedback divide ratio update signal to low */
523 val = mt7530_read(priv, MT7531_PLLGP_CR0);
524 val &= ~RG_COREPLL_SDM_PCW_CHG;
525 mt7530_write(priv, MT7531_PLLGP_CR0, val);
526
527 /* Enable 325M clock for SGMII */
528 mt7530_write(priv, MT7531_ANA_PLLGP_CR5, 0xad0000);
529
530 /* Enable 250SSC clock for RGMII */
531 mt7530_write(priv, MT7531_ANA_PLLGP_CR2, 0x4f40000);
532
533 /* Step 6: Enable MT7531 PLL */
534 val = mt7530_read(priv, MT7531_PLLGP_CR0);
535 val |= RG_COREPLL_EN;
536 mt7530_write(priv, MT7531_PLLGP_CR0, val);
537
538 val = mt7530_read(priv, MT7531_PLLGP_EN);
539 val |= EN_COREPLL;
540 mt7530_write(priv, MT7531_PLLGP_EN, val);
541 usleep_range(25, 35);
542 }
543
544 static void
mt7530_mib_reset(struct dsa_switch * ds)545 mt7530_mib_reset(struct dsa_switch *ds)
546 {
547 struct mt7530_priv *priv = ds->priv;
548
549 mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_FLUSH);
550 mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_ACTIVATE);
551 }
552
mt7530_phy_read_c22(struct mt7530_priv * priv,int port,int regnum)553 static int mt7530_phy_read_c22(struct mt7530_priv *priv, int port, int regnum)
554 {
555 return mdiobus_read_nested(priv->bus, port, regnum);
556 }
557
mt7530_phy_write_c22(struct mt7530_priv * priv,int port,int regnum,u16 val)558 static int mt7530_phy_write_c22(struct mt7530_priv *priv, int port, int regnum,
559 u16 val)
560 {
561 return mdiobus_write_nested(priv->bus, port, regnum, val);
562 }
563
mt7530_phy_read_c45(struct mt7530_priv * priv,int port,int devad,int regnum)564 static int mt7530_phy_read_c45(struct mt7530_priv *priv, int port,
565 int devad, int regnum)
566 {
567 return mdiobus_c45_read_nested(priv->bus, port, devad, regnum);
568 }
569
mt7530_phy_write_c45(struct mt7530_priv * priv,int port,int devad,int regnum,u16 val)570 static int mt7530_phy_write_c45(struct mt7530_priv *priv, int port, int devad,
571 int regnum, u16 val)
572 {
573 return mdiobus_c45_write_nested(priv->bus, port, devad, regnum, val);
574 }
575
576 static int
mt7531_ind_c45_phy_read(struct mt7530_priv * priv,int port,int devad,int regnum)577 mt7531_ind_c45_phy_read(struct mt7530_priv *priv, int port, int devad,
578 int regnum)
579 {
580 struct mt7530_dummy_poll p;
581 u32 reg, val;
582 int ret;
583
584 INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
585
586 mt7530_mutex_lock(priv);
587
588 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
589 !(val & MT7531_PHY_ACS_ST), 20, 100000);
590 if (ret < 0) {
591 dev_err(priv->dev, "poll timeout\n");
592 goto out;
593 }
594
595 reg = MT7531_MDIO_CL45_ADDR | MT7531_MDIO_PHY_ADDR(port) |
596 MT7531_MDIO_DEV_ADDR(devad) | regnum;
597 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
598
599 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
600 !(val & MT7531_PHY_ACS_ST), 20, 100000);
601 if (ret < 0) {
602 dev_err(priv->dev, "poll timeout\n");
603 goto out;
604 }
605
606 reg = MT7531_MDIO_CL45_READ | MT7531_MDIO_PHY_ADDR(port) |
607 MT7531_MDIO_DEV_ADDR(devad);
608 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
609
610 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
611 !(val & MT7531_PHY_ACS_ST), 20, 100000);
612 if (ret < 0) {
613 dev_err(priv->dev, "poll timeout\n");
614 goto out;
615 }
616
617 ret = val & MT7531_MDIO_RW_DATA_MASK;
618 out:
619 mt7530_mutex_unlock(priv);
620
621 return ret;
622 }
623
624 static int
mt7531_ind_c45_phy_write(struct mt7530_priv * priv,int port,int devad,int regnum,u16 data)625 mt7531_ind_c45_phy_write(struct mt7530_priv *priv, int port, int devad,
626 int regnum, u16 data)
627 {
628 struct mt7530_dummy_poll p;
629 u32 val, reg;
630 int ret;
631
632 INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
633
634 mt7530_mutex_lock(priv);
635
636 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
637 !(val & MT7531_PHY_ACS_ST), 20, 100000);
638 if (ret < 0) {
639 dev_err(priv->dev, "poll timeout\n");
640 goto out;
641 }
642
643 reg = MT7531_MDIO_CL45_ADDR | MT7531_MDIO_PHY_ADDR(port) |
644 MT7531_MDIO_DEV_ADDR(devad) | regnum;
645 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
646
647 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
648 !(val & MT7531_PHY_ACS_ST), 20, 100000);
649 if (ret < 0) {
650 dev_err(priv->dev, "poll timeout\n");
651 goto out;
652 }
653
654 reg = MT7531_MDIO_CL45_WRITE | MT7531_MDIO_PHY_ADDR(port) |
655 MT7531_MDIO_DEV_ADDR(devad) | data;
656 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
657
658 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
659 !(val & MT7531_PHY_ACS_ST), 20, 100000);
660 if (ret < 0) {
661 dev_err(priv->dev, "poll timeout\n");
662 goto out;
663 }
664
665 out:
666 mt7530_mutex_unlock(priv);
667
668 return ret;
669 }
670
671 static int
mt7531_ind_c22_phy_read(struct mt7530_priv * priv,int port,int regnum)672 mt7531_ind_c22_phy_read(struct mt7530_priv *priv, int port, int regnum)
673 {
674 struct mt7530_dummy_poll p;
675 int ret;
676 u32 val;
677
678 INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
679
680 mt7530_mutex_lock(priv);
681
682 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
683 !(val & MT7531_PHY_ACS_ST), 20, 100000);
684 if (ret < 0) {
685 dev_err(priv->dev, "poll timeout\n");
686 goto out;
687 }
688
689 val = MT7531_MDIO_CL22_READ | MT7531_MDIO_PHY_ADDR(port) |
690 MT7531_MDIO_REG_ADDR(regnum);
691
692 mt7530_mii_write(priv, MT7531_PHY_IAC, val | MT7531_PHY_ACS_ST);
693
694 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
695 !(val & MT7531_PHY_ACS_ST), 20, 100000);
696 if (ret < 0) {
697 dev_err(priv->dev, "poll timeout\n");
698 goto out;
699 }
700
701 ret = val & MT7531_MDIO_RW_DATA_MASK;
702 out:
703 mt7530_mutex_unlock(priv);
704
705 return ret;
706 }
707
708 static int
mt7531_ind_c22_phy_write(struct mt7530_priv * priv,int port,int regnum,u16 data)709 mt7531_ind_c22_phy_write(struct mt7530_priv *priv, int port, int regnum,
710 u16 data)
711 {
712 struct mt7530_dummy_poll p;
713 int ret;
714 u32 reg;
715
716 INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
717
718 mt7530_mutex_lock(priv);
719
720 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, reg,
721 !(reg & MT7531_PHY_ACS_ST), 20, 100000);
722 if (ret < 0) {
723 dev_err(priv->dev, "poll timeout\n");
724 goto out;
725 }
726
727 reg = MT7531_MDIO_CL22_WRITE | MT7531_MDIO_PHY_ADDR(port) |
728 MT7531_MDIO_REG_ADDR(regnum) | data;
729
730 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
731
732 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, reg,
733 !(reg & MT7531_PHY_ACS_ST), 20, 100000);
734 if (ret < 0) {
735 dev_err(priv->dev, "poll timeout\n");
736 goto out;
737 }
738
739 out:
740 mt7530_mutex_unlock(priv);
741
742 return ret;
743 }
744
745 static int
mt753x_phy_read_c22(struct mii_bus * bus,int port,int regnum)746 mt753x_phy_read_c22(struct mii_bus *bus, int port, int regnum)
747 {
748 struct mt7530_priv *priv = bus->priv;
749
750 return priv->info->phy_read_c22(priv, port, regnum);
751 }
752
753 static int
mt753x_phy_read_c45(struct mii_bus * bus,int port,int devad,int regnum)754 mt753x_phy_read_c45(struct mii_bus *bus, int port, int devad, int regnum)
755 {
756 struct mt7530_priv *priv = bus->priv;
757
758 return priv->info->phy_read_c45(priv, port, devad, regnum);
759 }
760
761 static int
mt753x_phy_write_c22(struct mii_bus * bus,int port,int regnum,u16 val)762 mt753x_phy_write_c22(struct mii_bus *bus, int port, int regnum, u16 val)
763 {
764 struct mt7530_priv *priv = bus->priv;
765
766 return priv->info->phy_write_c22(priv, port, regnum, val);
767 }
768
769 static int
mt753x_phy_write_c45(struct mii_bus * bus,int port,int devad,int regnum,u16 val)770 mt753x_phy_write_c45(struct mii_bus *bus, int port, int devad, int regnum,
771 u16 val)
772 {
773 struct mt7530_priv *priv = bus->priv;
774
775 return priv->info->phy_write_c45(priv, port, devad, regnum, val);
776 }
777
778 static void
mt7530_get_strings(struct dsa_switch * ds,int port,u32 stringset,uint8_t * data)779 mt7530_get_strings(struct dsa_switch *ds, int port, u32 stringset,
780 uint8_t *data)
781 {
782 int i;
783
784 if (stringset != ETH_SS_STATS)
785 return;
786
787 for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++)
788 ethtool_puts(&data, mt7530_mib[i].name);
789 }
790
791 static void
mt7530_get_ethtool_stats(struct dsa_switch * ds,int port,uint64_t * data)792 mt7530_get_ethtool_stats(struct dsa_switch *ds, int port,
793 uint64_t *data)
794 {
795 struct mt7530_priv *priv = ds->priv;
796 const struct mt7530_mib_desc *mib;
797 u32 reg, i;
798 u64 hi;
799
800 for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++) {
801 mib = &mt7530_mib[i];
802 reg = MT7530_PORT_MIB_COUNTER(port) + mib->offset;
803
804 data[i] = mt7530_read(priv, reg);
805 if (mib->size == 2) {
806 hi = mt7530_read(priv, reg + 4);
807 data[i] |= hi << 32;
808 }
809 }
810 }
811
812 static int
mt7530_get_sset_count(struct dsa_switch * ds,int port,int sset)813 mt7530_get_sset_count(struct dsa_switch *ds, int port, int sset)
814 {
815 if (sset != ETH_SS_STATS)
816 return 0;
817
818 return ARRAY_SIZE(mt7530_mib);
819 }
820
821 static int
mt7530_set_ageing_time(struct dsa_switch * ds,unsigned int msecs)822 mt7530_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
823 {
824 struct mt7530_priv *priv = ds->priv;
825 unsigned int secs = msecs / 1000;
826 unsigned int tmp_age_count;
827 unsigned int error = -1;
828 unsigned int age_count;
829 unsigned int age_unit;
830
831 /* Applied timer is (AGE_CNT + 1) * (AGE_UNIT + 1) seconds */
832 if (secs < 1 || secs > (AGE_CNT_MAX + 1) * (AGE_UNIT_MAX + 1))
833 return -ERANGE;
834
835 /* iterate through all possible age_count to find the closest pair */
836 for (tmp_age_count = 0; tmp_age_count <= AGE_CNT_MAX; ++tmp_age_count) {
837 unsigned int tmp_age_unit = secs / (tmp_age_count + 1) - 1;
838
839 if (tmp_age_unit <= AGE_UNIT_MAX) {
840 unsigned int tmp_error = secs -
841 (tmp_age_count + 1) * (tmp_age_unit + 1);
842
843 /* found a closer pair */
844 if (error > tmp_error) {
845 error = tmp_error;
846 age_count = tmp_age_count;
847 age_unit = tmp_age_unit;
848 }
849
850 /* found the exact match, so break the loop */
851 if (!error)
852 break;
853 }
854 }
855
856 mt7530_write(priv, MT7530_AAC, AGE_CNT(age_count) | AGE_UNIT(age_unit));
857
858 return 0;
859 }
860
mt7530_p5_mode_str(unsigned int mode)861 static const char *mt7530_p5_mode_str(unsigned int mode)
862 {
863 switch (mode) {
864 case MUX_PHY_P0:
865 return "MUX PHY P0";
866 case MUX_PHY_P4:
867 return "MUX PHY P4";
868 default:
869 return "GMAC5";
870 }
871 }
872
mt7530_setup_port5(struct dsa_switch * ds,phy_interface_t interface)873 static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
874 {
875 struct mt7530_priv *priv = ds->priv;
876 u8 tx_delay = 0;
877 int val;
878
879 mutex_lock(&priv->reg_mutex);
880
881 val = mt7530_read(priv, MT753X_MTRAP);
882
883 val &= ~MT7530_P5_PHY0_SEL & ~MT7530_P5_MAC_SEL & ~MT7530_P5_RGMII_MODE;
884
885 switch (priv->p5_mode) {
886 /* MUX_PHY_P0: P0 -> P5 -> SoC MAC */
887 case MUX_PHY_P0:
888 val |= MT7530_P5_PHY0_SEL;
889 fallthrough;
890
891 /* MUX_PHY_P4: P4 -> P5 -> SoC MAC */
892 case MUX_PHY_P4:
893 /* Setup the MAC by default for the cpu port */
894 mt7530_write(priv, MT753X_PMCR_P(5), 0x56300);
895 break;
896
897 /* GMAC5: P5 -> SoC MAC or external PHY */
898 default:
899 val |= MT7530_P5_MAC_SEL;
900 break;
901 }
902
903 /* Setup RGMII settings */
904 if (phy_interface_mode_is_rgmii(interface)) {
905 val |= MT7530_P5_RGMII_MODE;
906
907 /* P5 RGMII RX Clock Control: delay setting for 1000M */
908 mt7530_write(priv, MT7530_P5RGMIIRXCR, CSR_RGMII_EDGE_ALIGN);
909
910 /* Don't set delay in DSA mode */
911 if (!dsa_is_dsa_port(priv->ds, 5) &&
912 (interface == PHY_INTERFACE_MODE_RGMII_TXID ||
913 interface == PHY_INTERFACE_MODE_RGMII_ID))
914 tx_delay = 4; /* n * 0.5 ns */
915
916 /* P5 RGMII TX Clock Control: delay x */
917 mt7530_write(priv, MT7530_P5RGMIITXCR,
918 CSR_RGMII_TXC_CFG(0x10 + tx_delay));
919
920 /* reduce P5 RGMII Tx driving, 8mA */
921 mt7530_write(priv, MT7530_IO_DRV_CR,
922 P5_IO_CLK_DRV(1) | P5_IO_DATA_DRV(1));
923 }
924
925 mt7530_write(priv, MT753X_MTRAP, val);
926
927 dev_dbg(ds->dev, "Setup P5, HWTRAP=0x%x, mode=%s, phy-mode=%s\n", val,
928 mt7530_p5_mode_str(priv->p5_mode), phy_modes(interface));
929
930 mutex_unlock(&priv->reg_mutex);
931 }
932
933 /* In Clause 5 of IEEE Std 802-2014, two sublayers of the data link layer (DLL)
934 * of the Open Systems Interconnection basic reference model (OSI/RM) are
935 * described; the medium access control (MAC) and logical link control (LLC)
936 * sublayers. The MAC sublayer is the one facing the physical layer.
937 *
938 * In 8.2 of IEEE Std 802.1Q-2022, the Bridge architecture is described. A
939 * Bridge component comprises a MAC Relay Entity for interconnecting the Ports
940 * of the Bridge, at least two Ports, and higher layer entities with at least a
941 * Spanning Tree Protocol Entity included.
942 *
943 * Each Bridge Port also functions as an end station and shall provide the MAC
944 * Service to an LLC Entity. Each instance of the MAC Service is provided to a
945 * distinct LLC Entity that supports protocol identification, multiplexing, and
946 * demultiplexing, for protocol data unit (PDU) transmission and reception by
947 * one or more higher layer entities.
948 *
949 * It is described in 8.13.9 of IEEE Std 802.1Q-2022 that in a Bridge, the LLC
950 * Entity associated with each Bridge Port is modeled as being directly
951 * connected to the attached Local Area Network (LAN).
952 *
953 * On the switch with CPU port architecture, CPU port functions as Management
954 * Port, and the Management Port functionality is provided by software which
955 * functions as an end station. Software is connected to an IEEE 802 LAN that is
956 * wholly contained within the system that incorporates the Bridge. Software
957 * provides access to the LLC Entity associated with each Bridge Port by the
958 * value of the source port field on the special tag on the frame received by
959 * software.
960 *
961 * We call frames that carry control information to determine the active
962 * topology and current extent of each Virtual Local Area Network (VLAN), i.e.,
963 * spanning tree or Shortest Path Bridging (SPB) and Multiple VLAN Registration
964 * Protocol Data Units (MVRPDUs), and frames from other link constrained
965 * protocols, such as Extensible Authentication Protocol over LAN (EAPOL) and
966 * Link Layer Discovery Protocol (LLDP), link-local frames. They are not
967 * forwarded by a Bridge. Permanently configured entries in the filtering
968 * database (FDB) ensure that such frames are discarded by the Forwarding
969 * Process. In 8.6.3 of IEEE Std 802.1Q-2022, this is described in detail:
970 *
971 * Each of the reserved MAC addresses specified in Table 8-1
972 * (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]) shall be
973 * permanently configured in the FDB in C-VLAN components and ERs.
974 *
975 * Each of the reserved MAC addresses specified in Table 8-2
976 * (01-80-C2-00-00-[01,02,03,04,05,06,07,08,09,0A,0E]) shall be permanently
977 * configured in the FDB in S-VLAN components.
978 *
979 * Each of the reserved MAC addresses specified in Table 8-3
980 * (01-80-C2-00-00-[01,02,04,0E]) shall be permanently configured in the FDB in
981 * TPMR components.
982 *
983 * The FDB entries for reserved MAC addresses shall specify filtering for all
984 * Bridge Ports and all VIDs. Management shall not provide the capability to
985 * modify or remove entries for reserved MAC addresses.
986 *
987 * The addresses in Table 8-1, Table 8-2, and Table 8-3 determine the scope of
988 * propagation of PDUs within a Bridged Network, as follows:
989 *
990 * The Nearest Bridge group address (01-80-C2-00-00-0E) is an address that no
991 * conformant Two-Port MAC Relay (TPMR) component, Service VLAN (S-VLAN)
992 * component, Customer VLAN (C-VLAN) component, or MAC Bridge can forward.
993 * PDUs transmitted using this destination address, or any other addresses
994 * that appear in Table 8-1, Table 8-2, and Table 8-3
995 * (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]), can
996 * therefore travel no further than those stations that can be reached via a
997 * single individual LAN from the originating station.
998 *
999 * The Nearest non-TPMR Bridge group address (01-80-C2-00-00-03), is an
1000 * address that no conformant S-VLAN component, C-VLAN component, or MAC
1001 * Bridge can forward; however, this address is relayed by a TPMR component.
1002 * PDUs using this destination address, or any of the other addresses that
1003 * appear in both Table 8-1 and Table 8-2 but not in Table 8-3
1004 * (01-80-C2-00-00-[00,03,05,06,07,08,09,0A,0B,0C,0D,0F]), will be relayed by
1005 * any TPMRs but will propagate no further than the nearest S-VLAN component,
1006 * C-VLAN component, or MAC Bridge.
1007 *
1008 * The Nearest Customer Bridge group address (01-80-C2-00-00-00) is an address
1009 * that no conformant C-VLAN component, MAC Bridge can forward; however, it is
1010 * relayed by TPMR components and S-VLAN components. PDUs using this
1011 * destination address, or any of the other addresses that appear in Table 8-1
1012 * but not in either Table 8-2 or Table 8-3 (01-80-C2-00-00-[00,0B,0C,0D,0F]),
1013 * will be relayed by TPMR components and S-VLAN components but will propagate
1014 * no further than the nearest C-VLAN component or MAC Bridge.
1015 *
1016 * Because the LLC Entity associated with each Bridge Port is provided via CPU
1017 * port, we must not filter these frames but forward them to CPU port.
1018 *
1019 * In a Bridge, the transmission Port is majorly decided by ingress and egress
1020 * rules, FDB, and spanning tree Port State functions of the Forwarding Process.
1021 * For link-local frames, only CPU port should be designated as destination port
1022 * in the FDB, and the other functions of the Forwarding Process must not
1023 * interfere with the decision of the transmission Port. We call this process
1024 * trapping frames to CPU port.
1025 *
1026 * Therefore, on the switch with CPU port architecture, link-local frames must
1027 * be trapped to CPU port, and certain link-local frames received by a Port of a
1028 * Bridge comprising a TPMR component or an S-VLAN component must be excluded
1029 * from it.
1030 *
1031 * A Bridge of the switch with CPU port architecture cannot comprise a Two-Port
1032 * MAC Relay (TPMR) component as a TPMR component supports only a subset of the
1033 * functionality of a MAC Bridge. A Bridge comprising two Ports (Management Port
1034 * doesn't count) of this architecture will either function as a standard MAC
1035 * Bridge or a standard VLAN Bridge.
1036 *
1037 * Therefore, a Bridge of this architecture can only comprise S-VLAN components,
1038 * C-VLAN components, or MAC Bridge components. Since there's no TPMR component,
1039 * we don't need to relay PDUs using the destination addresses specified on the
1040 * Nearest non-TPMR section, and the proportion of the Nearest Customer Bridge
1041 * section where they must be relayed by TPMR components.
1042 *
1043 * One option to trap link-local frames to CPU port is to add static FDB entries
1044 * with CPU port designated as destination port. However, because that
1045 * Independent VLAN Learning (IVL) is being used on every VID, each entry only
1046 * applies to a single VLAN Identifier (VID). For a Bridge comprising a MAC
1047 * Bridge component or a C-VLAN component, there would have to be 16 times 4096
1048 * entries. This switch intellectual property can only hold a maximum of 2048
1049 * entries. Using this option, there also isn't a mechanism to prevent
1050 * link-local frames from being discarded when the spanning tree Port State of
1051 * the reception Port is discarding.
1052 *
1053 * The remaining option is to utilise the BPC, RGAC1, RGAC2, RGAC3, and RGAC4
1054 * registers. Whilst this applies to every VID, it doesn't contain all of the
1055 * reserved MAC addresses without affecting the remaining Standard Group MAC
1056 * Addresses. The REV_UN frame tag utilised using the RGAC4 register covers the
1057 * remaining 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F] destination
1058 * addresses. It also includes the 01-80-C2-00-00-22 to 01-80-C2-00-00-FF
1059 * destination addresses which may be relayed by MAC Bridges or VLAN Bridges.
1060 * The latter option provides better but not complete conformance.
1061 *
1062 * This switch intellectual property also does not provide a mechanism to trap
1063 * link-local frames with specific destination addresses to CPU port by Bridge,
1064 * to conform to the filtering rules for the distinct Bridge components.
1065 *
1066 * Therefore, regardless of the type of the Bridge component, link-local frames
1067 * with these destination addresses will be trapped to CPU port:
1068 *
1069 * 01-80-C2-00-00-[00,01,02,03,0E]
1070 *
1071 * In a Bridge comprising a MAC Bridge component or a C-VLAN component:
1072 *
1073 * Link-local frames with these destination addresses won't be trapped to CPU
1074 * port which won't conform to IEEE Std 802.1Q-2022:
1075 *
1076 * 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F]
1077 *
1078 * In a Bridge comprising an S-VLAN component:
1079 *
1080 * Link-local frames with these destination addresses will be trapped to CPU
1081 * port which won't conform to IEEE Std 802.1Q-2022:
1082 *
1083 * 01-80-C2-00-00-00
1084 *
1085 * Link-local frames with these destination addresses won't be trapped to CPU
1086 * port which won't conform to IEEE Std 802.1Q-2022:
1087 *
1088 * 01-80-C2-00-00-[04,05,06,07,08,09,0A]
1089 *
1090 * To trap link-local frames to CPU port as conformant as this switch
1091 * intellectual property can allow, link-local frames are made to be regarded as
1092 * Bridge Protocol Data Units (BPDUs). This is because this switch intellectual
1093 * property only lets the frames regarded as BPDUs bypass the spanning tree Port
1094 * State function of the Forwarding Process.
1095 *
1096 * The only remaining interference is the ingress rules. When the reception Port
1097 * has no PVID assigned on software, VLAN-untagged frames won't be allowed in.
1098 * There doesn't seem to be a mechanism on the switch intellectual property to
1099 * have link-local frames bypass this function of the Forwarding Process.
1100 */
1101 static void
mt753x_trap_frames(struct mt7530_priv * priv)1102 mt753x_trap_frames(struct mt7530_priv *priv)
1103 {
1104 /* Trap 802.1X PAE frames and BPDUs to the CPU port(s) and egress them
1105 * VLAN-untagged.
1106 */
1107 mt7530_rmw(priv, MT753X_BPC,
1108 PAE_BPDU_FR | PAE_EG_TAG_MASK | PAE_PORT_FW_MASK |
1109 BPDU_EG_TAG_MASK | BPDU_PORT_FW_MASK,
1110 PAE_BPDU_FR | PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
1111 PAE_PORT_FW(TO_CPU_FW_CPU_ONLY) |
1112 BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
1113 TO_CPU_FW_CPU_ONLY);
1114
1115 /* Trap frames with :01 and :02 MAC DAs to the CPU port(s) and egress
1116 * them VLAN-untagged.
1117 */
1118 mt7530_rmw(priv, MT753X_RGAC1,
1119 R02_BPDU_FR | R02_EG_TAG_MASK | R02_PORT_FW_MASK |
1120 R01_BPDU_FR | R01_EG_TAG_MASK | R01_PORT_FW_MASK,
1121 R02_BPDU_FR | R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
1122 R02_PORT_FW(TO_CPU_FW_CPU_ONLY) | R01_BPDU_FR |
1123 R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
1124 TO_CPU_FW_CPU_ONLY);
1125
1126 /* Trap frames with :03 and :0E MAC DAs to the CPU port(s) and egress
1127 * them VLAN-untagged.
1128 */
1129 mt7530_rmw(priv, MT753X_RGAC2,
1130 R0E_BPDU_FR | R0E_EG_TAG_MASK | R0E_PORT_FW_MASK |
1131 R03_BPDU_FR | R03_EG_TAG_MASK | R03_PORT_FW_MASK,
1132 R0E_BPDU_FR | R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
1133 R0E_PORT_FW(TO_CPU_FW_CPU_ONLY) | R03_BPDU_FR |
1134 R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
1135 TO_CPU_FW_CPU_ONLY);
1136 }
1137
1138 static void
mt753x_cpu_port_enable(struct dsa_switch * ds,int port)1139 mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
1140 {
1141 struct mt7530_priv *priv = ds->priv;
1142
1143 /* Enable Mediatek header mode on the cpu port */
1144 mt7530_write(priv, MT7530_PVC_P(port),
1145 PORT_SPEC_TAG);
1146
1147 /* Enable flooding on the CPU port */
1148 mt7530_set(priv, MT753X_MFC, BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) |
1149 UNU_FFP(BIT(port)));
1150
1151 /* Add the CPU port to the CPU port bitmap for MT7531 and the switch on
1152 * the MT7988 SoC. Trapped frames will be forwarded to the CPU port that
1153 * is affine to the inbound user port.
1154 */
1155 if (priv->id == ID_MT7531 || priv->id == ID_MT7988 ||
1156 priv->id == ID_EN7581)
1157 mt7530_set(priv, MT7531_CFC, MT7531_CPU_PMAP(BIT(port)));
1158
1159 /* CPU port gets connected to all user ports of
1160 * the switch.
1161 */
1162 mt7530_write(priv, MT7530_PCR_P(port),
1163 PCR_MATRIX(dsa_user_ports(priv->ds)));
1164
1165 /* Set to fallback mode for independent VLAN learning */
1166 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
1167 MT7530_PORT_FALLBACK_MODE);
1168 }
1169
1170 static int
mt7530_port_enable(struct dsa_switch * ds,int port,struct phy_device * phy)1171 mt7530_port_enable(struct dsa_switch *ds, int port,
1172 struct phy_device *phy)
1173 {
1174 struct dsa_port *dp = dsa_to_port(ds, port);
1175 struct mt7530_priv *priv = ds->priv;
1176
1177 mutex_lock(&priv->reg_mutex);
1178
1179 /* Allow the user port gets connected to the cpu port and also
1180 * restore the port matrix if the port is the member of a certain
1181 * bridge.
1182 */
1183 if (dsa_port_is_user(dp)) {
1184 struct dsa_port *cpu_dp = dp->cpu_dp;
1185
1186 priv->ports[port].pm |= PCR_MATRIX(BIT(cpu_dp->index));
1187 }
1188 priv->ports[port].enable = true;
1189 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
1190 priv->ports[port].pm);
1191
1192 mutex_unlock(&priv->reg_mutex);
1193
1194 if (priv->id != ID_MT7530 && priv->id != ID_MT7621)
1195 return 0;
1196
1197 if (port == 5)
1198 mt7530_clear(priv, MT753X_MTRAP, MT7530_P5_DIS);
1199 else if (port == 6)
1200 mt7530_clear(priv, MT753X_MTRAP, MT7530_P6_DIS);
1201
1202 return 0;
1203 }
1204
1205 static void
mt7530_port_disable(struct dsa_switch * ds,int port)1206 mt7530_port_disable(struct dsa_switch *ds, int port)
1207 {
1208 struct mt7530_priv *priv = ds->priv;
1209
1210 mutex_lock(&priv->reg_mutex);
1211
1212 /* Clear up all port matrix which could be restored in the next
1213 * enablement for the port.
1214 */
1215 priv->ports[port].enable = false;
1216 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
1217 PCR_MATRIX_CLR);
1218
1219 mutex_unlock(&priv->reg_mutex);
1220
1221 if (priv->id != ID_MT7530 && priv->id != ID_MT7621)
1222 return;
1223
1224 /* Do not set MT7530_P5_DIS when port 5 is being used for PHY muxing. */
1225 if (port == 5 && priv->p5_mode == GMAC5)
1226 mt7530_set(priv, MT753X_MTRAP, MT7530_P5_DIS);
1227 else if (port == 6)
1228 mt7530_set(priv, MT753X_MTRAP, MT7530_P6_DIS);
1229 }
1230
1231 static int
mt7530_port_change_mtu(struct dsa_switch * ds,int port,int new_mtu)1232 mt7530_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
1233 {
1234 struct mt7530_priv *priv = ds->priv;
1235 int length;
1236 u32 val;
1237
1238 /* When a new MTU is set, DSA always set the CPU port's MTU to the
1239 * largest MTU of the user ports. Because the switch only has a global
1240 * RX length register, only allowing CPU port here is enough.
1241 */
1242 if (!dsa_is_cpu_port(ds, port))
1243 return 0;
1244
1245 mt7530_mutex_lock(priv);
1246
1247 val = mt7530_mii_read(priv, MT7530_GMACCR);
1248 val &= ~MAX_RX_PKT_LEN_MASK;
1249
1250 /* RX length also includes Ethernet header, MTK tag, and FCS length */
1251 length = new_mtu + ETH_HLEN + MTK_HDR_LEN + ETH_FCS_LEN;
1252 if (length <= 1522) {
1253 val |= MAX_RX_PKT_LEN_1522;
1254 } else if (length <= 1536) {
1255 val |= MAX_RX_PKT_LEN_1536;
1256 } else if (length <= 1552) {
1257 val |= MAX_RX_PKT_LEN_1552;
1258 } else {
1259 val &= ~MAX_RX_JUMBO_MASK;
1260 val |= MAX_RX_JUMBO(DIV_ROUND_UP(length, 1024));
1261 val |= MAX_RX_PKT_LEN_JUMBO;
1262 }
1263
1264 mt7530_mii_write(priv, MT7530_GMACCR, val);
1265
1266 mt7530_mutex_unlock(priv);
1267
1268 return 0;
1269 }
1270
1271 static int
mt7530_port_max_mtu(struct dsa_switch * ds,int port)1272 mt7530_port_max_mtu(struct dsa_switch *ds, int port)
1273 {
1274 return MT7530_MAX_MTU;
1275 }
1276
1277 static void
mt7530_stp_state_set(struct dsa_switch * ds,int port,u8 state)1278 mt7530_stp_state_set(struct dsa_switch *ds, int port, u8 state)
1279 {
1280 struct mt7530_priv *priv = ds->priv;
1281 u32 stp_state;
1282
1283 switch (state) {
1284 case BR_STATE_DISABLED:
1285 stp_state = MT7530_STP_DISABLED;
1286 break;
1287 case BR_STATE_BLOCKING:
1288 stp_state = MT7530_STP_BLOCKING;
1289 break;
1290 case BR_STATE_LISTENING:
1291 stp_state = MT7530_STP_LISTENING;
1292 break;
1293 case BR_STATE_LEARNING:
1294 stp_state = MT7530_STP_LEARNING;
1295 break;
1296 case BR_STATE_FORWARDING:
1297 default:
1298 stp_state = MT7530_STP_FORWARDING;
1299 break;
1300 }
1301
1302 mt7530_rmw(priv, MT7530_SSP_P(port), FID_PST_MASK(FID_BRIDGED),
1303 FID_PST(FID_BRIDGED, stp_state));
1304 }
1305
mt7530_update_port_member(struct mt7530_priv * priv,int port,const struct net_device * bridge_dev,bool join)1306 static void mt7530_update_port_member(struct mt7530_priv *priv, int port,
1307 const struct net_device *bridge_dev,
1308 bool join) __must_hold(&priv->reg_mutex)
1309 {
1310 struct dsa_port *dp = dsa_to_port(priv->ds, port), *other_dp;
1311 struct mt7530_port *p = &priv->ports[port], *other_p;
1312 struct dsa_port *cpu_dp = dp->cpu_dp;
1313 u32 port_bitmap = BIT(cpu_dp->index);
1314 int other_port;
1315 bool isolated;
1316
1317 dsa_switch_for_each_user_port(other_dp, priv->ds) {
1318 other_port = other_dp->index;
1319 other_p = &priv->ports[other_port];
1320
1321 if (dp == other_dp)
1322 continue;
1323
1324 /* Add/remove this port to/from the port matrix of the other
1325 * ports in the same bridge. If the port is disabled, port
1326 * matrix is kept and not being setup until the port becomes
1327 * enabled.
1328 */
1329 if (!dsa_port_offloads_bridge_dev(other_dp, bridge_dev))
1330 continue;
1331
1332 isolated = p->isolated && other_p->isolated;
1333
1334 if (join && !isolated) {
1335 other_p->pm |= PCR_MATRIX(BIT(port));
1336 port_bitmap |= BIT(other_port);
1337 } else {
1338 other_p->pm &= ~PCR_MATRIX(BIT(port));
1339 }
1340
1341 if (other_p->enable)
1342 mt7530_rmw(priv, MT7530_PCR_P(other_port),
1343 PCR_MATRIX_MASK, other_p->pm);
1344 }
1345
1346 /* Add/remove the all other ports to this port matrix. For !join
1347 * (leaving the bridge), only the CPU port will remain in the port matrix
1348 * of this port.
1349 */
1350 p->pm = PCR_MATRIX(port_bitmap);
1351 if (priv->ports[port].enable)
1352 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK, p->pm);
1353 }
1354
1355 static int
mt7530_port_pre_bridge_flags(struct dsa_switch * ds,int port,struct switchdev_brport_flags flags,struct netlink_ext_ack * extack)1356 mt7530_port_pre_bridge_flags(struct dsa_switch *ds, int port,
1357 struct switchdev_brport_flags flags,
1358 struct netlink_ext_ack *extack)
1359 {
1360 if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
1361 BR_BCAST_FLOOD | BR_ISOLATED))
1362 return -EINVAL;
1363
1364 return 0;
1365 }
1366
1367 static int
mt7530_port_bridge_flags(struct dsa_switch * ds,int port,struct switchdev_brport_flags flags,struct netlink_ext_ack * extack)1368 mt7530_port_bridge_flags(struct dsa_switch *ds, int port,
1369 struct switchdev_brport_flags flags,
1370 struct netlink_ext_ack *extack)
1371 {
1372 struct mt7530_priv *priv = ds->priv;
1373
1374 if (flags.mask & BR_LEARNING)
1375 mt7530_rmw(priv, MT7530_PSC_P(port), SA_DIS,
1376 flags.val & BR_LEARNING ? 0 : SA_DIS);
1377
1378 if (flags.mask & BR_FLOOD)
1379 mt7530_rmw(priv, MT753X_MFC, UNU_FFP(BIT(port)),
1380 flags.val & BR_FLOOD ? UNU_FFP(BIT(port)) : 0);
1381
1382 if (flags.mask & BR_MCAST_FLOOD)
1383 mt7530_rmw(priv, MT753X_MFC, UNM_FFP(BIT(port)),
1384 flags.val & BR_MCAST_FLOOD ? UNM_FFP(BIT(port)) : 0);
1385
1386 if (flags.mask & BR_BCAST_FLOOD)
1387 mt7530_rmw(priv, MT753X_MFC, BC_FFP(BIT(port)),
1388 flags.val & BR_BCAST_FLOOD ? BC_FFP(BIT(port)) : 0);
1389
1390 if (flags.mask & BR_ISOLATED) {
1391 struct dsa_port *dp = dsa_to_port(ds, port);
1392 struct net_device *bridge_dev = dsa_port_bridge_dev_get(dp);
1393
1394 priv->ports[port].isolated = !!(flags.val & BR_ISOLATED);
1395
1396 mutex_lock(&priv->reg_mutex);
1397 mt7530_update_port_member(priv, port, bridge_dev, true);
1398 mutex_unlock(&priv->reg_mutex);
1399 }
1400
1401 return 0;
1402 }
1403
1404 static int
mt7530_port_bridge_join(struct dsa_switch * ds,int port,struct dsa_bridge bridge,bool * tx_fwd_offload,struct netlink_ext_ack * extack)1405 mt7530_port_bridge_join(struct dsa_switch *ds, int port,
1406 struct dsa_bridge bridge, bool *tx_fwd_offload,
1407 struct netlink_ext_ack *extack)
1408 {
1409 struct mt7530_priv *priv = ds->priv;
1410
1411 mutex_lock(&priv->reg_mutex);
1412
1413 mt7530_update_port_member(priv, port, bridge.dev, true);
1414
1415 /* Set to fallback mode for independent VLAN learning */
1416 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
1417 MT7530_PORT_FALLBACK_MODE);
1418
1419 mutex_unlock(&priv->reg_mutex);
1420
1421 return 0;
1422 }
1423
1424 static void
mt7530_port_set_vlan_unaware(struct dsa_switch * ds,int port)1425 mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
1426 {
1427 struct mt7530_priv *priv = ds->priv;
1428 bool all_user_ports_removed = true;
1429 int i;
1430
1431 /* This is called after .port_bridge_leave when leaving a VLAN-aware
1432 * bridge. Don't set standalone ports to fallback mode.
1433 */
1434 if (dsa_port_bridge_dev_get(dsa_to_port(ds, port)))
1435 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
1436 MT7530_PORT_FALLBACK_MODE);
1437
1438 mt7530_rmw(priv, MT7530_PVC_P(port),
1439 VLAN_ATTR_MASK | PVC_EG_TAG_MASK | ACC_FRM_MASK,
1440 VLAN_ATTR(MT7530_VLAN_TRANSPARENT) |
1441 PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT) |
1442 MT7530_VLAN_ACC_ALL);
1443
1444 /* Set PVID to 0 */
1445 mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
1446 G0_PORT_VID_DEF);
1447
1448 for (i = 0; i < priv->ds->num_ports; i++) {
1449 if (dsa_is_user_port(ds, i) &&
1450 dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) {
1451 all_user_ports_removed = false;
1452 break;
1453 }
1454 }
1455
1456 /* CPU port also does the same thing until all user ports belonging to
1457 * the CPU port get out of VLAN filtering mode.
1458 */
1459 if (all_user_ports_removed) {
1460 struct dsa_port *dp = dsa_to_port(ds, port);
1461 struct dsa_port *cpu_dp = dp->cpu_dp;
1462
1463 mt7530_write(priv, MT7530_PCR_P(cpu_dp->index),
1464 PCR_MATRIX(dsa_user_ports(priv->ds)));
1465 mt7530_write(priv, MT7530_PVC_P(cpu_dp->index), PORT_SPEC_TAG
1466 | PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
1467 }
1468 }
1469
1470 static void
mt7530_port_set_vlan_aware(struct dsa_switch * ds,int port)1471 mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
1472 {
1473 struct mt7530_priv *priv = ds->priv;
1474
1475 /* Trapped into security mode allows packet forwarding through VLAN
1476 * table lookup.
1477 */
1478 if (dsa_is_user_port(ds, port)) {
1479 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
1480 MT7530_PORT_SECURITY_MODE);
1481 mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
1482 G0_PORT_VID(priv->ports[port].pvid));
1483
1484 /* Only accept tagged frames if PVID is not set */
1485 if (!priv->ports[port].pvid)
1486 mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
1487 MT7530_VLAN_ACC_TAGGED);
1488
1489 /* Set the port as a user port which is to be able to recognize
1490 * VID from incoming packets before fetching entry within the
1491 * VLAN table.
1492 */
1493 mt7530_rmw(priv, MT7530_PVC_P(port),
1494 VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
1495 VLAN_ATTR(MT7530_VLAN_USER) |
1496 PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
1497 } else {
1498 /* Also set CPU ports to the "user" VLAN port attribute, to
1499 * allow VLAN classification, but keep the EG_TAG attribute as
1500 * "consistent" (i.o.w. don't change its value) for packets
1501 * received by the switch from the CPU, so that tagged packets
1502 * are forwarded to user ports as tagged, and untagged as
1503 * untagged.
1504 */
1505 mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
1506 VLAN_ATTR(MT7530_VLAN_USER));
1507 }
1508 }
1509
1510 static void
mt7530_port_bridge_leave(struct dsa_switch * ds,int port,struct dsa_bridge bridge)1511 mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
1512 struct dsa_bridge bridge)
1513 {
1514 struct mt7530_priv *priv = ds->priv;
1515
1516 mutex_lock(&priv->reg_mutex);
1517
1518 mt7530_update_port_member(priv, port, bridge.dev, false);
1519
1520 /* When a port is removed from the bridge, the port would be set up
1521 * back to the default as is at initial boot which is a VLAN-unaware
1522 * port.
1523 */
1524 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
1525 MT7530_PORT_MATRIX_MODE);
1526
1527 mutex_unlock(&priv->reg_mutex);
1528 }
1529
1530 static int
mt7530_port_fdb_add(struct dsa_switch * ds,int port,const unsigned char * addr,u16 vid,struct dsa_db db)1531 mt7530_port_fdb_add(struct dsa_switch *ds, int port,
1532 const unsigned char *addr, u16 vid,
1533 struct dsa_db db)
1534 {
1535 struct mt7530_priv *priv = ds->priv;
1536 int ret;
1537 u8 port_mask = BIT(port);
1538
1539 mutex_lock(&priv->reg_mutex);
1540 mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_ENT);
1541 ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL);
1542 mutex_unlock(&priv->reg_mutex);
1543
1544 return ret;
1545 }
1546
1547 static int
mt7530_port_fdb_del(struct dsa_switch * ds,int port,const unsigned char * addr,u16 vid,struct dsa_db db)1548 mt7530_port_fdb_del(struct dsa_switch *ds, int port,
1549 const unsigned char *addr, u16 vid,
1550 struct dsa_db db)
1551 {
1552 struct mt7530_priv *priv = ds->priv;
1553 int ret;
1554 u8 port_mask = BIT(port);
1555
1556 mutex_lock(&priv->reg_mutex);
1557 mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_EMP);
1558 ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL);
1559 mutex_unlock(&priv->reg_mutex);
1560
1561 return ret;
1562 }
1563
1564 static int
mt7530_port_fdb_dump(struct dsa_switch * ds,int port,dsa_fdb_dump_cb_t * cb,void * data)1565 mt7530_port_fdb_dump(struct dsa_switch *ds, int port,
1566 dsa_fdb_dump_cb_t *cb, void *data)
1567 {
1568 struct mt7530_priv *priv = ds->priv;
1569 struct mt7530_fdb _fdb = { 0 };
1570 int cnt = MT7530_NUM_FDB_RECORDS;
1571 int ret = 0;
1572 u32 rsp = 0;
1573
1574 mutex_lock(&priv->reg_mutex);
1575
1576 ret = mt7530_fdb_cmd(priv, MT7530_FDB_START, &rsp);
1577 if (ret < 0)
1578 goto err;
1579
1580 do {
1581 if (rsp & ATC_SRCH_HIT) {
1582 mt7530_fdb_read(priv, &_fdb);
1583 if (_fdb.port_mask & BIT(port)) {
1584 ret = cb(_fdb.mac, _fdb.vid, _fdb.noarp,
1585 data);
1586 if (ret < 0)
1587 break;
1588 }
1589 }
1590 } while (--cnt &&
1591 !(rsp & ATC_SRCH_END) &&
1592 !mt7530_fdb_cmd(priv, MT7530_FDB_NEXT, &rsp));
1593 err:
1594 mutex_unlock(&priv->reg_mutex);
1595
1596 return 0;
1597 }
1598
1599 static int
mt7530_port_mdb_add(struct dsa_switch * ds,int port,const struct switchdev_obj_port_mdb * mdb,struct dsa_db db)1600 mt7530_port_mdb_add(struct dsa_switch *ds, int port,
1601 const struct switchdev_obj_port_mdb *mdb,
1602 struct dsa_db db)
1603 {
1604 struct mt7530_priv *priv = ds->priv;
1605 const u8 *addr = mdb->addr;
1606 u16 vid = mdb->vid;
1607 u8 port_mask = 0;
1608 int ret;
1609
1610 mutex_lock(&priv->reg_mutex);
1611
1612 mt7530_fdb_write(priv, vid, 0, addr, 0, STATIC_EMP);
1613 if (!mt7530_fdb_cmd(priv, MT7530_FDB_READ, NULL))
1614 port_mask = (mt7530_read(priv, MT7530_ATRD) >> PORT_MAP)
1615 & PORT_MAP_MASK;
1616
1617 port_mask |= BIT(port);
1618 mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_ENT);
1619 ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL);
1620
1621 mutex_unlock(&priv->reg_mutex);
1622
1623 return ret;
1624 }
1625
1626 static int
mt7530_port_mdb_del(struct dsa_switch * ds,int port,const struct switchdev_obj_port_mdb * mdb,struct dsa_db db)1627 mt7530_port_mdb_del(struct dsa_switch *ds, int port,
1628 const struct switchdev_obj_port_mdb *mdb,
1629 struct dsa_db db)
1630 {
1631 struct mt7530_priv *priv = ds->priv;
1632 const u8 *addr = mdb->addr;
1633 u16 vid = mdb->vid;
1634 u8 port_mask = 0;
1635 int ret;
1636
1637 mutex_lock(&priv->reg_mutex);
1638
1639 mt7530_fdb_write(priv, vid, 0, addr, 0, STATIC_EMP);
1640 if (!mt7530_fdb_cmd(priv, MT7530_FDB_READ, NULL))
1641 port_mask = (mt7530_read(priv, MT7530_ATRD) >> PORT_MAP)
1642 & PORT_MAP_MASK;
1643
1644 port_mask &= ~BIT(port);
1645 mt7530_fdb_write(priv, vid, port_mask, addr, -1,
1646 port_mask ? STATIC_ENT : STATIC_EMP);
1647 ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL);
1648
1649 mutex_unlock(&priv->reg_mutex);
1650
1651 return ret;
1652 }
1653
1654 static int
mt7530_vlan_cmd(struct mt7530_priv * priv,enum mt7530_vlan_cmd cmd,u16 vid)1655 mt7530_vlan_cmd(struct mt7530_priv *priv, enum mt7530_vlan_cmd cmd, u16 vid)
1656 {
1657 struct mt7530_dummy_poll p;
1658 u32 val;
1659 int ret;
1660
1661 val = VTCR_BUSY | VTCR_FUNC(cmd) | vid;
1662 mt7530_write(priv, MT7530_VTCR, val);
1663
1664 INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_VTCR);
1665 ret = readx_poll_timeout(_mt7530_read, &p, val,
1666 !(val & VTCR_BUSY), 20, 20000);
1667 if (ret < 0) {
1668 dev_err(priv->dev, "poll timeout\n");
1669 return ret;
1670 }
1671
1672 val = mt7530_read(priv, MT7530_VTCR);
1673 if (val & VTCR_INVALID) {
1674 dev_err(priv->dev, "read VTCR invalid\n");
1675 return -EINVAL;
1676 }
1677
1678 return 0;
1679 }
1680
1681 static int
mt7530_port_vlan_filtering(struct dsa_switch * ds,int port,bool vlan_filtering,struct netlink_ext_ack * extack)1682 mt7530_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
1683 struct netlink_ext_ack *extack)
1684 {
1685 struct dsa_port *dp = dsa_to_port(ds, port);
1686 struct dsa_port *cpu_dp = dp->cpu_dp;
1687
1688 if (vlan_filtering) {
1689 /* The port is being kept as VLAN-unaware port when bridge is
1690 * set up with vlan_filtering not being set, Otherwise, the
1691 * port and the corresponding CPU port is required the setup
1692 * for becoming a VLAN-aware port.
1693 */
1694 mt7530_port_set_vlan_aware(ds, port);
1695 mt7530_port_set_vlan_aware(ds, cpu_dp->index);
1696 } else {
1697 mt7530_port_set_vlan_unaware(ds, port);
1698 }
1699
1700 return 0;
1701 }
1702
1703 static void
mt7530_hw_vlan_add(struct mt7530_priv * priv,struct mt7530_hw_vlan_entry * entry)1704 mt7530_hw_vlan_add(struct mt7530_priv *priv,
1705 struct mt7530_hw_vlan_entry *entry)
1706 {
1707 struct dsa_port *dp = dsa_to_port(priv->ds, entry->port);
1708 u8 new_members;
1709 u32 val;
1710
1711 new_members = entry->old_members | BIT(entry->port);
1712
1713 /* Validate the entry with independent learning, create egress tag per
1714 * VLAN and joining the port as one of the port members.
1715 */
1716 val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) | FID(FID_BRIDGED) |
1717 VLAN_VALID;
1718 mt7530_write(priv, MT7530_VAWD1, val);
1719
1720 /* Decide whether adding tag or not for those outgoing packets from the
1721 * port inside the VLAN.
1722 * CPU port is always taken as a tagged port for serving more than one
1723 * VLANs across and also being applied with egress type stack mode for
1724 * that VLAN tags would be appended after hardware special tag used as
1725 * DSA tag.
1726 */
1727 if (dsa_port_is_cpu(dp))
1728 val = MT7530_VLAN_EGRESS_STACK;
1729 else if (entry->untagged)
1730 val = MT7530_VLAN_EGRESS_UNTAG;
1731 else
1732 val = MT7530_VLAN_EGRESS_TAG;
1733 mt7530_rmw(priv, MT7530_VAWD2,
1734 ETAG_CTRL_P_MASK(entry->port),
1735 ETAG_CTRL_P(entry->port, val));
1736 }
1737
1738 static void
mt7530_hw_vlan_del(struct mt7530_priv * priv,struct mt7530_hw_vlan_entry * entry)1739 mt7530_hw_vlan_del(struct mt7530_priv *priv,
1740 struct mt7530_hw_vlan_entry *entry)
1741 {
1742 u8 new_members;
1743 u32 val;
1744
1745 new_members = entry->old_members & ~BIT(entry->port);
1746
1747 val = mt7530_read(priv, MT7530_VAWD1);
1748 if (!(val & VLAN_VALID)) {
1749 dev_err(priv->dev,
1750 "Cannot be deleted due to invalid entry\n");
1751 return;
1752 }
1753
1754 if (new_members) {
1755 val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) |
1756 VLAN_VALID;
1757 mt7530_write(priv, MT7530_VAWD1, val);
1758 } else {
1759 mt7530_write(priv, MT7530_VAWD1, 0);
1760 mt7530_write(priv, MT7530_VAWD2, 0);
1761 }
1762 }
1763
1764 static void
mt7530_hw_vlan_update(struct mt7530_priv * priv,u16 vid,struct mt7530_hw_vlan_entry * entry,mt7530_vlan_op vlan_op)1765 mt7530_hw_vlan_update(struct mt7530_priv *priv, u16 vid,
1766 struct mt7530_hw_vlan_entry *entry,
1767 mt7530_vlan_op vlan_op)
1768 {
1769 u32 val;
1770
1771 /* Fetch entry */
1772 mt7530_vlan_cmd(priv, MT7530_VTCR_RD_VID, vid);
1773
1774 val = mt7530_read(priv, MT7530_VAWD1);
1775
1776 entry->old_members = (val >> PORT_MEM_SHFT) & PORT_MEM_MASK;
1777
1778 /* Manipulate entry */
1779 vlan_op(priv, entry);
1780
1781 /* Flush result to hardware */
1782 mt7530_vlan_cmd(priv, MT7530_VTCR_WR_VID, vid);
1783 }
1784
1785 static int
mt7530_setup_vlan0(struct mt7530_priv * priv)1786 mt7530_setup_vlan0(struct mt7530_priv *priv)
1787 {
1788 u32 val;
1789
1790 /* Validate the entry with independent learning, keep the original
1791 * ingress tag attribute.
1792 */
1793 val = IVL_MAC | EG_CON | PORT_MEM(MT7530_ALL_MEMBERS) | FID(FID_BRIDGED) |
1794 VLAN_VALID;
1795 mt7530_write(priv, MT7530_VAWD1, val);
1796
1797 return mt7530_vlan_cmd(priv, MT7530_VTCR_WR_VID, 0);
1798 }
1799
1800 static int
mt7530_port_vlan_add(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan,struct netlink_ext_ack * extack)1801 mt7530_port_vlan_add(struct dsa_switch *ds, int port,
1802 const struct switchdev_obj_port_vlan *vlan,
1803 struct netlink_ext_ack *extack)
1804 {
1805 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1806 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1807 struct mt7530_hw_vlan_entry new_entry;
1808 struct mt7530_priv *priv = ds->priv;
1809
1810 mutex_lock(&priv->reg_mutex);
1811
1812 mt7530_hw_vlan_entry_init(&new_entry, port, untagged);
1813 mt7530_hw_vlan_update(priv, vlan->vid, &new_entry, mt7530_hw_vlan_add);
1814
1815 if (pvid) {
1816 priv->ports[port].pvid = vlan->vid;
1817
1818 /* Accept all frames if PVID is set */
1819 mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
1820 MT7530_VLAN_ACC_ALL);
1821
1822 /* Only configure PVID if VLAN filtering is enabled */
1823 if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
1824 mt7530_rmw(priv, MT7530_PPBV1_P(port),
1825 G0_PORT_VID_MASK,
1826 G0_PORT_VID(vlan->vid));
1827 } else if (vlan->vid && priv->ports[port].pvid == vlan->vid) {
1828 /* This VLAN is overwritten without PVID, so unset it */
1829 priv->ports[port].pvid = G0_PORT_VID_DEF;
1830
1831 /* Only accept tagged frames if the port is VLAN-aware */
1832 if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
1833 mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
1834 MT7530_VLAN_ACC_TAGGED);
1835
1836 mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
1837 G0_PORT_VID_DEF);
1838 }
1839
1840 mutex_unlock(&priv->reg_mutex);
1841
1842 return 0;
1843 }
1844
1845 static int
mt7530_port_vlan_del(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan)1846 mt7530_port_vlan_del(struct dsa_switch *ds, int port,
1847 const struct switchdev_obj_port_vlan *vlan)
1848 {
1849 struct mt7530_hw_vlan_entry target_entry;
1850 struct mt7530_priv *priv = ds->priv;
1851
1852 mutex_lock(&priv->reg_mutex);
1853
1854 mt7530_hw_vlan_entry_init(&target_entry, port, 0);
1855 mt7530_hw_vlan_update(priv, vlan->vid, &target_entry,
1856 mt7530_hw_vlan_del);
1857
1858 /* PVID is being restored to the default whenever the PVID port
1859 * is being removed from the VLAN.
1860 */
1861 if (priv->ports[port].pvid == vlan->vid) {
1862 priv->ports[port].pvid = G0_PORT_VID_DEF;
1863
1864 /* Only accept tagged frames if the port is VLAN-aware */
1865 if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
1866 mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
1867 MT7530_VLAN_ACC_TAGGED);
1868
1869 mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
1870 G0_PORT_VID_DEF);
1871 }
1872
1873
1874 mutex_unlock(&priv->reg_mutex);
1875
1876 return 0;
1877 }
1878
mt753x_port_mirror_add(struct dsa_switch * ds,int port,struct dsa_mall_mirror_tc_entry * mirror,bool ingress,struct netlink_ext_ack * extack)1879 static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
1880 struct dsa_mall_mirror_tc_entry *mirror,
1881 bool ingress, struct netlink_ext_ack *extack)
1882 {
1883 struct mt7530_priv *priv = ds->priv;
1884 int monitor_port;
1885 u32 val;
1886
1887 /* Check for existent entry */
1888 if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
1889 return -EEXIST;
1890
1891 val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id));
1892
1893 /* MT7530 only supports one monitor port */
1894 monitor_port = MT753X_MIRROR_PORT_GET(priv->id, val);
1895 if (val & MT753X_MIRROR_EN(priv->id) &&
1896 monitor_port != mirror->to_local_port)
1897 return -EEXIST;
1898
1899 val |= MT753X_MIRROR_EN(priv->id);
1900 val &= ~MT753X_MIRROR_PORT_MASK(priv->id);
1901 val |= MT753X_MIRROR_PORT_SET(priv->id, mirror->to_local_port);
1902 mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val);
1903
1904 val = mt7530_read(priv, MT7530_PCR_P(port));
1905 if (ingress) {
1906 val |= PORT_RX_MIR;
1907 priv->mirror_rx |= BIT(port);
1908 } else {
1909 val |= PORT_TX_MIR;
1910 priv->mirror_tx |= BIT(port);
1911 }
1912 mt7530_write(priv, MT7530_PCR_P(port), val);
1913
1914 return 0;
1915 }
1916
mt753x_port_mirror_del(struct dsa_switch * ds,int port,struct dsa_mall_mirror_tc_entry * mirror)1917 static void mt753x_port_mirror_del(struct dsa_switch *ds, int port,
1918 struct dsa_mall_mirror_tc_entry *mirror)
1919 {
1920 struct mt7530_priv *priv = ds->priv;
1921 u32 val;
1922
1923 val = mt7530_read(priv, MT7530_PCR_P(port));
1924 if (mirror->ingress) {
1925 val &= ~PORT_RX_MIR;
1926 priv->mirror_rx &= ~BIT(port);
1927 } else {
1928 val &= ~PORT_TX_MIR;
1929 priv->mirror_tx &= ~BIT(port);
1930 }
1931 mt7530_write(priv, MT7530_PCR_P(port), val);
1932
1933 if (!priv->mirror_rx && !priv->mirror_tx) {
1934 val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id));
1935 val &= ~MT753X_MIRROR_EN(priv->id);
1936 mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val);
1937 }
1938 }
1939
1940 static enum dsa_tag_protocol
mtk_get_tag_protocol(struct dsa_switch * ds,int port,enum dsa_tag_protocol mp)1941 mtk_get_tag_protocol(struct dsa_switch *ds, int port,
1942 enum dsa_tag_protocol mp)
1943 {
1944 return DSA_TAG_PROTO_MTK;
1945 }
1946
1947 #ifdef CONFIG_GPIOLIB
1948 static inline u32
mt7530_gpio_to_bit(unsigned int offset)1949 mt7530_gpio_to_bit(unsigned int offset)
1950 {
1951 /* Map GPIO offset to register bit
1952 * [ 2: 0] port 0 LED 0..2 as GPIO 0..2
1953 * [ 6: 4] port 1 LED 0..2 as GPIO 3..5
1954 * [10: 8] port 2 LED 0..2 as GPIO 6..8
1955 * [14:12] port 3 LED 0..2 as GPIO 9..11
1956 * [18:16] port 4 LED 0..2 as GPIO 12..14
1957 */
1958 return BIT(offset + offset / 3);
1959 }
1960
1961 static int
mt7530_gpio_get(struct gpio_chip * gc,unsigned int offset)1962 mt7530_gpio_get(struct gpio_chip *gc, unsigned int offset)
1963 {
1964 struct mt7530_priv *priv = gpiochip_get_data(gc);
1965 u32 bit = mt7530_gpio_to_bit(offset);
1966
1967 return !!(mt7530_read(priv, MT7530_LED_GPIO_DATA) & bit);
1968 }
1969
1970 static void
mt7530_gpio_set(struct gpio_chip * gc,unsigned int offset,int value)1971 mt7530_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
1972 {
1973 struct mt7530_priv *priv = gpiochip_get_data(gc);
1974 u32 bit = mt7530_gpio_to_bit(offset);
1975
1976 if (value)
1977 mt7530_set(priv, MT7530_LED_GPIO_DATA, bit);
1978 else
1979 mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit);
1980 }
1981
1982 static int
mt7530_gpio_get_direction(struct gpio_chip * gc,unsigned int offset)1983 mt7530_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
1984 {
1985 struct mt7530_priv *priv = gpiochip_get_data(gc);
1986 u32 bit = mt7530_gpio_to_bit(offset);
1987
1988 return (mt7530_read(priv, MT7530_LED_GPIO_DIR) & bit) ?
1989 GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
1990 }
1991
1992 static int
mt7530_gpio_direction_input(struct gpio_chip * gc,unsigned int offset)1993 mt7530_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
1994 {
1995 struct mt7530_priv *priv = gpiochip_get_data(gc);
1996 u32 bit = mt7530_gpio_to_bit(offset);
1997
1998 mt7530_clear(priv, MT7530_LED_GPIO_OE, bit);
1999 mt7530_clear(priv, MT7530_LED_GPIO_DIR, bit);
2000
2001 return 0;
2002 }
2003
2004 static int
mt7530_gpio_direction_output(struct gpio_chip * gc,unsigned int offset,int value)2005 mt7530_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value)
2006 {
2007 struct mt7530_priv *priv = gpiochip_get_data(gc);
2008 u32 bit = mt7530_gpio_to_bit(offset);
2009
2010 mt7530_set(priv, MT7530_LED_GPIO_DIR, bit);
2011
2012 if (value)
2013 mt7530_set(priv, MT7530_LED_GPIO_DATA, bit);
2014 else
2015 mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit);
2016
2017 mt7530_set(priv, MT7530_LED_GPIO_OE, bit);
2018
2019 return 0;
2020 }
2021
2022 static int
mt7530_setup_gpio(struct mt7530_priv * priv)2023 mt7530_setup_gpio(struct mt7530_priv *priv)
2024 {
2025 struct device *dev = priv->dev;
2026 struct gpio_chip *gc;
2027
2028 gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
2029 if (!gc)
2030 return -ENOMEM;
2031
2032 mt7530_write(priv, MT7530_LED_GPIO_OE, 0);
2033 mt7530_write(priv, MT7530_LED_GPIO_DIR, 0);
2034 mt7530_write(priv, MT7530_LED_IO_MODE, 0);
2035
2036 gc->label = "mt7530";
2037 gc->parent = dev;
2038 gc->owner = THIS_MODULE;
2039 gc->get_direction = mt7530_gpio_get_direction;
2040 gc->direction_input = mt7530_gpio_direction_input;
2041 gc->direction_output = mt7530_gpio_direction_output;
2042 gc->get = mt7530_gpio_get;
2043 gc->set = mt7530_gpio_set;
2044 gc->base = -1;
2045 gc->ngpio = 15;
2046 gc->can_sleep = true;
2047
2048 return devm_gpiochip_add_data(dev, gc, priv);
2049 }
2050 #endif /* CONFIG_GPIOLIB */
2051
2052 static irqreturn_t
mt7530_irq_thread_fn(int irq,void * dev_id)2053 mt7530_irq_thread_fn(int irq, void *dev_id)
2054 {
2055 struct mt7530_priv *priv = dev_id;
2056 bool handled = false;
2057 u32 val;
2058 int p;
2059
2060 mt7530_mutex_lock(priv);
2061 val = mt7530_mii_read(priv, MT7530_SYS_INT_STS);
2062 mt7530_mii_write(priv, MT7530_SYS_INT_STS, val);
2063 mt7530_mutex_unlock(priv);
2064
2065 for (p = 0; p < MT7530_NUM_PHYS; p++) {
2066 if (BIT(p) & val) {
2067 unsigned int irq;
2068
2069 irq = irq_find_mapping(priv->irq_domain, p);
2070 handle_nested_irq(irq);
2071 handled = true;
2072 }
2073 }
2074
2075 return IRQ_RETVAL(handled);
2076 }
2077
2078 static void
mt7530_irq_mask(struct irq_data * d)2079 mt7530_irq_mask(struct irq_data *d)
2080 {
2081 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
2082
2083 priv->irq_enable &= ~BIT(d->hwirq);
2084 }
2085
2086 static void
mt7530_irq_unmask(struct irq_data * d)2087 mt7530_irq_unmask(struct irq_data *d)
2088 {
2089 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
2090
2091 priv->irq_enable |= BIT(d->hwirq);
2092 }
2093
2094 static void
mt7530_irq_bus_lock(struct irq_data * d)2095 mt7530_irq_bus_lock(struct irq_data *d)
2096 {
2097 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
2098
2099 mt7530_mutex_lock(priv);
2100 }
2101
2102 static void
mt7530_irq_bus_sync_unlock(struct irq_data * d)2103 mt7530_irq_bus_sync_unlock(struct irq_data *d)
2104 {
2105 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
2106
2107 mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable);
2108 mt7530_mutex_unlock(priv);
2109 }
2110
2111 static struct irq_chip mt7530_irq_chip = {
2112 .name = KBUILD_MODNAME,
2113 .irq_mask = mt7530_irq_mask,
2114 .irq_unmask = mt7530_irq_unmask,
2115 .irq_bus_lock = mt7530_irq_bus_lock,
2116 .irq_bus_sync_unlock = mt7530_irq_bus_sync_unlock,
2117 };
2118
2119 static int
mt7530_irq_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)2120 mt7530_irq_map(struct irq_domain *domain, unsigned int irq,
2121 irq_hw_number_t hwirq)
2122 {
2123 irq_set_chip_data(irq, domain->host_data);
2124 irq_set_chip_and_handler(irq, &mt7530_irq_chip, handle_simple_irq);
2125 irq_set_nested_thread(irq, true);
2126 irq_set_noprobe(irq);
2127
2128 return 0;
2129 }
2130
2131 static const struct irq_domain_ops mt7530_irq_domain_ops = {
2132 .map = mt7530_irq_map,
2133 .xlate = irq_domain_xlate_onecell,
2134 };
2135
2136 static void
mt7988_irq_mask(struct irq_data * d)2137 mt7988_irq_mask(struct irq_data *d)
2138 {
2139 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
2140
2141 priv->irq_enable &= ~BIT(d->hwirq);
2142 mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable);
2143 }
2144
2145 static void
mt7988_irq_unmask(struct irq_data * d)2146 mt7988_irq_unmask(struct irq_data *d)
2147 {
2148 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
2149
2150 priv->irq_enable |= BIT(d->hwirq);
2151 mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable);
2152 }
2153
2154 static struct irq_chip mt7988_irq_chip = {
2155 .name = KBUILD_MODNAME,
2156 .irq_mask = mt7988_irq_mask,
2157 .irq_unmask = mt7988_irq_unmask,
2158 };
2159
2160 static int
mt7988_irq_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)2161 mt7988_irq_map(struct irq_domain *domain, unsigned int irq,
2162 irq_hw_number_t hwirq)
2163 {
2164 irq_set_chip_data(irq, domain->host_data);
2165 irq_set_chip_and_handler(irq, &mt7988_irq_chip, handle_simple_irq);
2166 irq_set_nested_thread(irq, true);
2167 irq_set_noprobe(irq);
2168
2169 return 0;
2170 }
2171
2172 static const struct irq_domain_ops mt7988_irq_domain_ops = {
2173 .map = mt7988_irq_map,
2174 .xlate = irq_domain_xlate_onecell,
2175 };
2176
2177 static void
mt7530_setup_mdio_irq(struct mt7530_priv * priv)2178 mt7530_setup_mdio_irq(struct mt7530_priv *priv)
2179 {
2180 struct dsa_switch *ds = priv->ds;
2181 int p;
2182
2183 for (p = 0; p < MT7530_NUM_PHYS; p++) {
2184 if (BIT(p) & ds->phys_mii_mask) {
2185 unsigned int irq;
2186
2187 irq = irq_create_mapping(priv->irq_domain, p);
2188 ds->user_mii_bus->irq[p] = irq;
2189 }
2190 }
2191 }
2192
2193 static int
mt7530_setup_irq(struct mt7530_priv * priv)2194 mt7530_setup_irq(struct mt7530_priv *priv)
2195 {
2196 struct device *dev = priv->dev;
2197 struct device_node *np = dev->of_node;
2198 int ret;
2199
2200 if (!of_property_read_bool(np, "interrupt-controller")) {
2201 dev_info(dev, "no interrupt support\n");
2202 return 0;
2203 }
2204
2205 priv->irq = of_irq_get(np, 0);
2206 if (priv->irq <= 0) {
2207 dev_err(dev, "failed to get parent IRQ: %d\n", priv->irq);
2208 return priv->irq ? : -EINVAL;
2209 }
2210
2211 if (priv->id == ID_MT7988 || priv->id == ID_EN7581)
2212 priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS,
2213 &mt7988_irq_domain_ops,
2214 priv);
2215 else
2216 priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS,
2217 &mt7530_irq_domain_ops,
2218 priv);
2219
2220 if (!priv->irq_domain) {
2221 dev_err(dev, "failed to create IRQ domain\n");
2222 return -ENOMEM;
2223 }
2224
2225 /* This register must be set for MT7530 to properly fire interrupts */
2226 if (priv->id == ID_MT7530 || priv->id == ID_MT7621)
2227 mt7530_set(priv, MT7530_TOP_SIG_CTRL, TOP_SIG_CTRL_NORMAL);
2228
2229 ret = request_threaded_irq(priv->irq, NULL, mt7530_irq_thread_fn,
2230 IRQF_ONESHOT, KBUILD_MODNAME, priv);
2231 if (ret) {
2232 irq_domain_remove(priv->irq_domain);
2233 dev_err(dev, "failed to request IRQ: %d\n", ret);
2234 return ret;
2235 }
2236
2237 return 0;
2238 }
2239
2240 static void
mt7530_free_mdio_irq(struct mt7530_priv * priv)2241 mt7530_free_mdio_irq(struct mt7530_priv *priv)
2242 {
2243 int p;
2244
2245 for (p = 0; p < MT7530_NUM_PHYS; p++) {
2246 if (BIT(p) & priv->ds->phys_mii_mask) {
2247 unsigned int irq;
2248
2249 irq = irq_find_mapping(priv->irq_domain, p);
2250 irq_dispose_mapping(irq);
2251 }
2252 }
2253 }
2254
2255 static void
mt7530_free_irq_common(struct mt7530_priv * priv)2256 mt7530_free_irq_common(struct mt7530_priv *priv)
2257 {
2258 free_irq(priv->irq, priv);
2259 irq_domain_remove(priv->irq_domain);
2260 }
2261
2262 static void
mt7530_free_irq(struct mt7530_priv * priv)2263 mt7530_free_irq(struct mt7530_priv *priv)
2264 {
2265 struct device_node *mnp, *np = priv->dev->of_node;
2266
2267 mnp = of_get_child_by_name(np, "mdio");
2268 if (!mnp)
2269 mt7530_free_mdio_irq(priv);
2270 of_node_put(mnp);
2271
2272 mt7530_free_irq_common(priv);
2273 }
2274
2275 static int
mt7530_setup_mdio(struct mt7530_priv * priv)2276 mt7530_setup_mdio(struct mt7530_priv *priv)
2277 {
2278 struct device_node *mnp, *np = priv->dev->of_node;
2279 struct dsa_switch *ds = priv->ds;
2280 struct device *dev = priv->dev;
2281 struct mii_bus *bus;
2282 static int idx;
2283 int ret = 0;
2284
2285 mnp = of_get_child_by_name(np, "mdio");
2286
2287 if (mnp && !of_device_is_available(mnp))
2288 goto out;
2289
2290 bus = devm_mdiobus_alloc(dev);
2291 if (!bus) {
2292 ret = -ENOMEM;
2293 goto out;
2294 }
2295
2296 if (!mnp)
2297 ds->user_mii_bus = bus;
2298
2299 bus->priv = priv;
2300 bus->name = KBUILD_MODNAME "-mii";
2301 snprintf(bus->id, MII_BUS_ID_SIZE, KBUILD_MODNAME "-%d", idx++);
2302 bus->read = mt753x_phy_read_c22;
2303 bus->write = mt753x_phy_write_c22;
2304 bus->read_c45 = mt753x_phy_read_c45;
2305 bus->write_c45 = mt753x_phy_write_c45;
2306 bus->parent = dev;
2307 bus->phy_mask = ~ds->phys_mii_mask;
2308
2309 if (priv->irq && !mnp)
2310 mt7530_setup_mdio_irq(priv);
2311
2312 ret = devm_of_mdiobus_register(dev, bus, mnp);
2313 if (ret) {
2314 dev_err(dev, "failed to register MDIO bus: %d\n", ret);
2315 if (priv->irq && !mnp)
2316 mt7530_free_mdio_irq(priv);
2317 }
2318
2319 out:
2320 of_node_put(mnp);
2321 return ret;
2322 }
2323
2324 static int
mt7530_setup(struct dsa_switch * ds)2325 mt7530_setup(struct dsa_switch *ds)
2326 {
2327 struct mt7530_priv *priv = ds->priv;
2328 struct device_node *dn = NULL;
2329 struct device_node *phy_node;
2330 struct device_node *mac_np;
2331 struct mt7530_dummy_poll p;
2332 phy_interface_t interface;
2333 struct dsa_port *cpu_dp;
2334 u32 id, val;
2335 int ret, i;
2336
2337 /* The parent node of conduit netdev which holds the common system
2338 * controller also is the container for two GMACs nodes representing
2339 * as two netdev instances.
2340 */
2341 dsa_switch_for_each_cpu_port(cpu_dp, ds) {
2342 dn = cpu_dp->conduit->dev.of_node->parent;
2343 /* It doesn't matter which CPU port is found first,
2344 * their conduits should share the same parent OF node
2345 */
2346 break;
2347 }
2348
2349 if (!dn) {
2350 dev_err(ds->dev, "parent OF node of DSA conduit not found");
2351 return -EINVAL;
2352 }
2353
2354 ds->assisted_learning_on_cpu_port = true;
2355 ds->mtu_enforcement_ingress = true;
2356
2357 if (priv->id == ID_MT7530) {
2358 regulator_set_voltage(priv->core_pwr, 1000000, 1000000);
2359 ret = regulator_enable(priv->core_pwr);
2360 if (ret < 0) {
2361 dev_err(priv->dev,
2362 "Failed to enable core power: %d\n", ret);
2363 return ret;
2364 }
2365
2366 regulator_set_voltage(priv->io_pwr, 3300000, 3300000);
2367 ret = regulator_enable(priv->io_pwr);
2368 if (ret < 0) {
2369 dev_err(priv->dev, "Failed to enable io pwr: %d\n",
2370 ret);
2371 return ret;
2372 }
2373 }
2374
2375 /* Reset whole chip through gpio pin or memory-mapped registers for
2376 * different type of hardware
2377 */
2378 if (priv->mcm) {
2379 reset_control_assert(priv->rstc);
2380 usleep_range(5000, 5100);
2381 reset_control_deassert(priv->rstc);
2382 } else {
2383 gpiod_set_value_cansleep(priv->reset, 0);
2384 usleep_range(5000, 5100);
2385 gpiod_set_value_cansleep(priv->reset, 1);
2386 }
2387
2388 /* Waiting for MT7530 got to stable */
2389 INIT_MT7530_DUMMY_POLL(&p, priv, MT753X_TRAP);
2390 ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0,
2391 20, 1000000);
2392 if (ret < 0) {
2393 dev_err(priv->dev, "reset timeout\n");
2394 return ret;
2395 }
2396
2397 id = mt7530_read(priv, MT7530_CREV);
2398 id >>= CHIP_NAME_SHIFT;
2399 if (id != MT7530_ID) {
2400 dev_err(priv->dev, "chip %x can't be supported\n", id);
2401 return -ENODEV;
2402 }
2403
2404 if ((val & MT7530_XTAL_MASK) == MT7530_XTAL_20MHZ) {
2405 dev_err(priv->dev,
2406 "MT7530 with a 20MHz XTAL is not supported!\n");
2407 return -EINVAL;
2408 }
2409
2410 /* Reset the switch through internal reset */
2411 mt7530_write(priv, MT7530_SYS_CTRL,
2412 SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
2413 SYS_CTRL_REG_RST);
2414
2415 /* Lower Tx driving for TRGMII path */
2416 for (i = 0; i < NUM_TRGMII_CTRL; i++)
2417 mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
2418 TD_DM_DRVP(8) | TD_DM_DRVN(8));
2419
2420 for (i = 0; i < NUM_TRGMII_CTRL; i++)
2421 mt7530_rmw(priv, MT7530_TRGMII_RD(i),
2422 RD_TAP_MASK, RD_TAP(16));
2423
2424 /* Allow modifying the trap and directly access PHY registers via the
2425 * MDIO bus the switch is on.
2426 */
2427 mt7530_rmw(priv, MT753X_MTRAP, MT7530_CHG_TRAP |
2428 MT7530_PHY_INDIRECT_ACCESS, MT7530_CHG_TRAP);
2429
2430 if ((val & MT7530_XTAL_MASK) == MT7530_XTAL_40MHZ)
2431 mt7530_pll_setup(priv);
2432
2433 mt753x_trap_frames(priv);
2434
2435 /* Enable and reset MIB counters */
2436 mt7530_mib_reset(ds);
2437
2438 for (i = 0; i < priv->ds->num_ports; i++) {
2439 /* Clear link settings and enable force mode to force link down
2440 * on all ports until they're enabled later.
2441 */
2442 mt7530_rmw(priv, MT753X_PMCR_P(i),
2443 PMCR_LINK_SETTINGS_MASK |
2444 MT753X_FORCE_MODE(priv->id),
2445 MT753X_FORCE_MODE(priv->id));
2446
2447 /* Disable forwarding by default on all ports */
2448 mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
2449 PCR_MATRIX_CLR);
2450
2451 /* Disable learning by default on all ports */
2452 mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
2453
2454 if (dsa_is_cpu_port(ds, i)) {
2455 mt753x_cpu_port_enable(ds, i);
2456 } else {
2457 mt7530_port_disable(ds, i);
2458
2459 /* Set default PVID to 0 on all user ports */
2460 mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK,
2461 G0_PORT_VID_DEF);
2462 }
2463 /* Enable consistent egress tag */
2464 mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
2465 PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
2466 }
2467
2468 /* Allow mirroring frames received on the local port (monitor port). */
2469 mt7530_set(priv, MT753X_AGC, LOCAL_EN);
2470
2471 /* Setup VLAN ID 0 for VLAN-unaware bridges */
2472 ret = mt7530_setup_vlan0(priv);
2473 if (ret)
2474 return ret;
2475
2476 /* Check for PHY muxing on port 5 */
2477 if (dsa_is_unused_port(ds, 5)) {
2478 /* Scan the ethernet nodes. Look for GMAC1, lookup the used PHY.
2479 * Set priv->p5_mode to the appropriate value if PHY muxing is
2480 * detected.
2481 */
2482 for_each_child_of_node(dn, mac_np) {
2483 if (!of_device_is_compatible(mac_np,
2484 "mediatek,eth-mac"))
2485 continue;
2486
2487 ret = of_property_read_u32(mac_np, "reg", &id);
2488 if (ret < 0 || id != 1)
2489 continue;
2490
2491 phy_node = of_parse_phandle(mac_np, "phy-handle", 0);
2492 if (!phy_node)
2493 continue;
2494
2495 if (phy_node->parent == priv->dev->of_node->parent ||
2496 phy_node->parent->parent == priv->dev->of_node) {
2497 ret = of_get_phy_mode(mac_np, &interface);
2498 if (ret && ret != -ENODEV) {
2499 of_node_put(mac_np);
2500 of_node_put(phy_node);
2501 return ret;
2502 }
2503 id = of_mdio_parse_addr(ds->dev, phy_node);
2504 if (id == 0)
2505 priv->p5_mode = MUX_PHY_P0;
2506 if (id == 4)
2507 priv->p5_mode = MUX_PHY_P4;
2508 }
2509 of_node_put(mac_np);
2510 of_node_put(phy_node);
2511 break;
2512 }
2513
2514 if (priv->p5_mode == MUX_PHY_P0 ||
2515 priv->p5_mode == MUX_PHY_P4) {
2516 mt7530_clear(priv, MT753X_MTRAP, MT7530_P5_DIS);
2517 mt7530_setup_port5(ds, interface);
2518 }
2519 }
2520
2521 #ifdef CONFIG_GPIOLIB
2522 if (of_property_read_bool(priv->dev->of_node, "gpio-controller")) {
2523 ret = mt7530_setup_gpio(priv);
2524 if (ret)
2525 return ret;
2526 }
2527 #endif /* CONFIG_GPIOLIB */
2528
2529 /* Flush the FDB table */
2530 ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
2531 if (ret < 0)
2532 return ret;
2533
2534 return 0;
2535 }
2536
2537 static int
mt7531_setup_common(struct dsa_switch * ds)2538 mt7531_setup_common(struct dsa_switch *ds)
2539 {
2540 struct mt7530_priv *priv = ds->priv;
2541 int ret, i;
2542
2543 mt753x_trap_frames(priv);
2544
2545 /* Enable and reset MIB counters */
2546 mt7530_mib_reset(ds);
2547
2548 /* Disable flooding on all ports */
2549 mt7530_clear(priv, MT753X_MFC, BC_FFP_MASK | UNM_FFP_MASK |
2550 UNU_FFP_MASK);
2551
2552 for (i = 0; i < priv->ds->num_ports; i++) {
2553 /* Clear link settings and enable force mode to force link down
2554 * on all ports until they're enabled later.
2555 */
2556 mt7530_rmw(priv, MT753X_PMCR_P(i),
2557 PMCR_LINK_SETTINGS_MASK |
2558 MT753X_FORCE_MODE(priv->id),
2559 MT753X_FORCE_MODE(priv->id));
2560
2561 /* Disable forwarding by default on all ports */
2562 mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
2563 PCR_MATRIX_CLR);
2564
2565 /* Disable learning by default on all ports */
2566 mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
2567
2568 mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR);
2569
2570 if (dsa_is_cpu_port(ds, i)) {
2571 mt753x_cpu_port_enable(ds, i);
2572 } else {
2573 mt7530_port_disable(ds, i);
2574
2575 /* Set default PVID to 0 on all user ports */
2576 mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK,
2577 G0_PORT_VID_DEF);
2578 }
2579
2580 /* Enable consistent egress tag */
2581 mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
2582 PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
2583 }
2584
2585 /* Allow mirroring frames received on the local port (monitor port). */
2586 mt7530_set(priv, MT753X_AGC, LOCAL_EN);
2587
2588 /* Flush the FDB table */
2589 ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
2590 if (ret < 0)
2591 return ret;
2592
2593 return 0;
2594 }
2595
2596 static int
mt7531_setup(struct dsa_switch * ds)2597 mt7531_setup(struct dsa_switch *ds)
2598 {
2599 struct mt7530_priv *priv = ds->priv;
2600 struct mt7530_dummy_poll p;
2601 u32 val, id;
2602 int ret, i;
2603
2604 /* Reset whole chip through gpio pin or memory-mapped registers for
2605 * different type of hardware
2606 */
2607 if (priv->mcm) {
2608 reset_control_assert(priv->rstc);
2609 usleep_range(5000, 5100);
2610 reset_control_deassert(priv->rstc);
2611 } else {
2612 gpiod_set_value_cansleep(priv->reset, 0);
2613 usleep_range(5000, 5100);
2614 gpiod_set_value_cansleep(priv->reset, 1);
2615 }
2616
2617 /* Waiting for MT7530 got to stable */
2618 INIT_MT7530_DUMMY_POLL(&p, priv, MT753X_TRAP);
2619 ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0,
2620 20, 1000000);
2621 if (ret < 0) {
2622 dev_err(priv->dev, "reset timeout\n");
2623 return ret;
2624 }
2625
2626 id = mt7530_read(priv, MT7531_CREV);
2627 id >>= CHIP_NAME_SHIFT;
2628
2629 if (id != MT7531_ID) {
2630 dev_err(priv->dev, "chip %x can't be supported\n", id);
2631 return -ENODEV;
2632 }
2633
2634 /* MT7531AE has got two SGMII units. One for port 5, one for port 6.
2635 * MT7531BE has got only one SGMII unit which is for port 6.
2636 */
2637 val = mt7530_read(priv, MT7531_TOP_SIG_SR);
2638 priv->p5_sgmii = !!(val & PAD_DUAL_SGMII_EN);
2639
2640 /* Force link down on all ports before internal reset */
2641 for (i = 0; i < priv->ds->num_ports; i++)
2642 mt7530_write(priv, MT753X_PMCR_P(i), MT7531_FORCE_MODE_LNK);
2643
2644 /* Reset the switch through internal reset */
2645 mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_SW_RST | SYS_CTRL_REG_RST);
2646
2647 if (!priv->p5_sgmii) {
2648 mt7531_pll_setup(priv);
2649 } else {
2650 /* Unlike MT7531BE, the GPIO 6-12 pins are not used for RGMII on
2651 * MT7531AE. Set the GPIO 11-12 pins to function as MDC and MDIO
2652 * to expose the MDIO bus of the switch.
2653 */
2654 mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO11_RG_RXD2_MASK,
2655 MT7531_EXT_P_MDC_11);
2656 mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO12_RG_RXD3_MASK,
2657 MT7531_EXT_P_MDIO_12);
2658 }
2659
2660 mt7530_rmw(priv, MT7531_GPIO_MODE0, MT7531_GPIO0_MASK,
2661 MT7531_GPIO0_INTERRUPT);
2662
2663 /* Enable Energy-Efficient Ethernet (EEE) and PHY core PLL, since
2664 * phy_device has not yet been created provided for
2665 * phy_[read,write]_mmd_indirect is called, we provide our own
2666 * mt7531_ind_mmd_phy_[read,write] to complete this function.
2667 */
2668 val = mt7531_ind_c45_phy_read(priv,
2669 MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
2670 MDIO_MMD_VEND2, CORE_PLL_GROUP4);
2671 val |= MT7531_RG_SYSPLL_DMY2 | MT7531_PHY_PLL_BYPASS_MODE;
2672 val &= ~MT7531_PHY_PLL_OFF;
2673 mt7531_ind_c45_phy_write(priv,
2674 MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
2675 MDIO_MMD_VEND2, CORE_PLL_GROUP4, val);
2676
2677 /* Disable EEE advertisement on the switch PHYs. */
2678 for (i = MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr);
2679 i < MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr) + MT7530_NUM_PHYS;
2680 i++) {
2681 mt7531_ind_c45_phy_write(priv, i, MDIO_MMD_AN, MDIO_AN_EEE_ADV,
2682 0);
2683 }
2684
2685 ret = mt7531_setup_common(ds);
2686 if (ret)
2687 return ret;
2688
2689 /* Setup VLAN ID 0 for VLAN-unaware bridges */
2690 ret = mt7530_setup_vlan0(priv);
2691 if (ret)
2692 return ret;
2693
2694 ds->assisted_learning_on_cpu_port = true;
2695 ds->mtu_enforcement_ingress = true;
2696
2697 return 0;
2698 }
2699
mt7530_mac_port_get_caps(struct dsa_switch * ds,int port,struct phylink_config * config)2700 static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port,
2701 struct phylink_config *config)
2702 {
2703 config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
2704
2705 switch (port) {
2706 /* Ports which are connected to switch PHYs. There is no MII pinout. */
2707 case 0 ... 4:
2708 __set_bit(PHY_INTERFACE_MODE_GMII,
2709 config->supported_interfaces);
2710 break;
2711
2712 /* Port 5 supports rgmii with delays, mii, and gmii. */
2713 case 5:
2714 phy_interface_set_rgmii(config->supported_interfaces);
2715 __set_bit(PHY_INTERFACE_MODE_MII,
2716 config->supported_interfaces);
2717 __set_bit(PHY_INTERFACE_MODE_GMII,
2718 config->supported_interfaces);
2719 break;
2720
2721 /* Port 6 supports rgmii and trgmii. */
2722 case 6:
2723 __set_bit(PHY_INTERFACE_MODE_RGMII,
2724 config->supported_interfaces);
2725 __set_bit(PHY_INTERFACE_MODE_TRGMII,
2726 config->supported_interfaces);
2727 break;
2728 }
2729 }
2730
mt7531_mac_port_get_caps(struct dsa_switch * ds,int port,struct phylink_config * config)2731 static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port,
2732 struct phylink_config *config)
2733 {
2734 struct mt7530_priv *priv = ds->priv;
2735
2736 config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
2737
2738 switch (port) {
2739 /* Ports which are connected to switch PHYs. There is no MII pinout. */
2740 case 0 ... 4:
2741 __set_bit(PHY_INTERFACE_MODE_GMII,
2742 config->supported_interfaces);
2743 break;
2744
2745 /* Port 5 supports rgmii with delays on MT7531BE, sgmii/802.3z on
2746 * MT7531AE.
2747 */
2748 case 5:
2749 if (!priv->p5_sgmii) {
2750 phy_interface_set_rgmii(config->supported_interfaces);
2751 break;
2752 }
2753 fallthrough;
2754
2755 /* Port 6 supports sgmii/802.3z. */
2756 case 6:
2757 __set_bit(PHY_INTERFACE_MODE_SGMII,
2758 config->supported_interfaces);
2759 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
2760 config->supported_interfaces);
2761 __set_bit(PHY_INTERFACE_MODE_2500BASEX,
2762 config->supported_interfaces);
2763
2764 config->mac_capabilities |= MAC_2500FD;
2765 break;
2766 }
2767 }
2768
mt7988_mac_port_get_caps(struct dsa_switch * ds,int port,struct phylink_config * config)2769 static void mt7988_mac_port_get_caps(struct dsa_switch *ds, int port,
2770 struct phylink_config *config)
2771 {
2772 switch (port) {
2773 /* Ports which are connected to switch PHYs. There is no MII pinout. */
2774 case 0 ... 3:
2775 __set_bit(PHY_INTERFACE_MODE_INTERNAL,
2776 config->supported_interfaces);
2777
2778 config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
2779 break;
2780
2781 /* Port 6 is connected to SoC's XGMII MAC. There is no MII pinout. */
2782 case 6:
2783 __set_bit(PHY_INTERFACE_MODE_INTERNAL,
2784 config->supported_interfaces);
2785
2786 config->mac_capabilities |= MAC_10000FD;
2787 break;
2788 }
2789 }
2790
en7581_mac_port_get_caps(struct dsa_switch * ds,int port,struct phylink_config * config)2791 static void en7581_mac_port_get_caps(struct dsa_switch *ds, int port,
2792 struct phylink_config *config)
2793 {
2794 switch (port) {
2795 /* Ports which are connected to switch PHYs. There is no MII pinout. */
2796 case 0 ... 4:
2797 __set_bit(PHY_INTERFACE_MODE_INTERNAL,
2798 config->supported_interfaces);
2799
2800 config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
2801 break;
2802
2803 /* Port 6 is connected to SoC's XGMII MAC. There is no MII pinout. */
2804 case 6:
2805 __set_bit(PHY_INTERFACE_MODE_INTERNAL,
2806 config->supported_interfaces);
2807
2808 config->mac_capabilities |= MAC_10000FD;
2809 break;
2810 }
2811 }
2812
2813 static void
mt7530_mac_config(struct dsa_switch * ds,int port,unsigned int mode,phy_interface_t interface)2814 mt7530_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
2815 phy_interface_t interface)
2816 {
2817 struct mt7530_priv *priv = ds->priv;
2818
2819 if (port == 5)
2820 mt7530_setup_port5(priv->ds, interface);
2821 else if (port == 6)
2822 mt7530_setup_port6(priv->ds, interface);
2823 }
2824
mt7531_rgmii_setup(struct mt7530_priv * priv,phy_interface_t interface,struct phy_device * phydev)2825 static void mt7531_rgmii_setup(struct mt7530_priv *priv,
2826 phy_interface_t interface,
2827 struct phy_device *phydev)
2828 {
2829 u32 val;
2830
2831 val = mt7530_read(priv, MT7531_CLKGEN_CTRL);
2832 val |= GP_CLK_EN;
2833 val &= ~GP_MODE_MASK;
2834 val |= GP_MODE(MT7531_GP_MODE_RGMII);
2835 val &= ~CLK_SKEW_IN_MASK;
2836 val |= CLK_SKEW_IN(MT7531_CLK_SKEW_NO_CHG);
2837 val &= ~CLK_SKEW_OUT_MASK;
2838 val |= CLK_SKEW_OUT(MT7531_CLK_SKEW_NO_CHG);
2839 val |= TXCLK_NO_REVERSE | RXCLK_NO_DELAY;
2840
2841 /* Do not adjust rgmii delay when vendor phy driver presents. */
2842 if (!phydev || phy_driver_is_genphy(phydev)) {
2843 val &= ~(TXCLK_NO_REVERSE | RXCLK_NO_DELAY);
2844 switch (interface) {
2845 case PHY_INTERFACE_MODE_RGMII:
2846 val |= TXCLK_NO_REVERSE;
2847 val |= RXCLK_NO_DELAY;
2848 break;
2849 case PHY_INTERFACE_MODE_RGMII_RXID:
2850 val |= TXCLK_NO_REVERSE;
2851 break;
2852 case PHY_INTERFACE_MODE_RGMII_TXID:
2853 val |= RXCLK_NO_DELAY;
2854 break;
2855 case PHY_INTERFACE_MODE_RGMII_ID:
2856 break;
2857 default:
2858 break;
2859 }
2860 }
2861
2862 mt7530_write(priv, MT7531_CLKGEN_CTRL, val);
2863 }
2864
2865 static void
mt7531_mac_config(struct dsa_switch * ds,int port,unsigned int mode,phy_interface_t interface)2866 mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
2867 phy_interface_t interface)
2868 {
2869 struct mt7530_priv *priv = ds->priv;
2870 struct phy_device *phydev;
2871 struct dsa_port *dp;
2872
2873 if (phy_interface_mode_is_rgmii(interface)) {
2874 dp = dsa_to_port(ds, port);
2875 phydev = dp->user->phydev;
2876 mt7531_rgmii_setup(priv, interface, phydev);
2877 }
2878 }
2879
2880 static struct phylink_pcs *
mt753x_phylink_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)2881 mt753x_phylink_mac_select_pcs(struct phylink_config *config,
2882 phy_interface_t interface)
2883 {
2884 struct dsa_port *dp = dsa_phylink_to_port(config);
2885 struct mt7530_priv *priv = dp->ds->priv;
2886
2887 switch (interface) {
2888 case PHY_INTERFACE_MODE_TRGMII:
2889 return &priv->pcs[dp->index].pcs;
2890 case PHY_INTERFACE_MODE_SGMII:
2891 case PHY_INTERFACE_MODE_1000BASEX:
2892 case PHY_INTERFACE_MODE_2500BASEX:
2893 return priv->ports[dp->index].sgmii_pcs;
2894 default:
2895 return NULL;
2896 }
2897 }
2898
2899 static void
mt753x_phylink_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)2900 mt753x_phylink_mac_config(struct phylink_config *config, unsigned int mode,
2901 const struct phylink_link_state *state)
2902 {
2903 struct dsa_port *dp = dsa_phylink_to_port(config);
2904 struct dsa_switch *ds = dp->ds;
2905 struct mt7530_priv *priv;
2906 int port = dp->index;
2907
2908 priv = ds->priv;
2909
2910 if ((port == 5 || port == 6) && priv->info->mac_port_config)
2911 priv->info->mac_port_config(ds, port, mode, state->interface);
2912
2913 /* Are we connected to external phy */
2914 if (port == 5 && dsa_is_user_port(ds, 5))
2915 mt7530_set(priv, MT753X_PMCR_P(port), PMCR_EXT_PHY);
2916 }
2917
mt753x_phylink_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)2918 static void mt753x_phylink_mac_link_down(struct phylink_config *config,
2919 unsigned int mode,
2920 phy_interface_t interface)
2921 {
2922 struct dsa_port *dp = dsa_phylink_to_port(config);
2923 struct mt7530_priv *priv = dp->ds->priv;
2924
2925 mt7530_clear(priv, MT753X_PMCR_P(dp->index), PMCR_LINK_SETTINGS_MASK);
2926 }
2927
mt753x_phylink_mac_link_up(struct phylink_config * config,struct phy_device * phydev,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)2928 static void mt753x_phylink_mac_link_up(struct phylink_config *config,
2929 struct phy_device *phydev,
2930 unsigned int mode,
2931 phy_interface_t interface,
2932 int speed, int duplex,
2933 bool tx_pause, bool rx_pause)
2934 {
2935 struct dsa_port *dp = dsa_phylink_to_port(config);
2936 struct mt7530_priv *priv = dp->ds->priv;
2937 u32 mcr;
2938
2939 mcr = PMCR_MAC_RX_EN | PMCR_MAC_TX_EN | PMCR_FORCE_LNK;
2940
2941 switch (speed) {
2942 case SPEED_1000:
2943 case SPEED_2500:
2944 case SPEED_10000:
2945 mcr |= PMCR_FORCE_SPEED_1000;
2946 break;
2947 case SPEED_100:
2948 mcr |= PMCR_FORCE_SPEED_100;
2949 break;
2950 }
2951 if (duplex == DUPLEX_FULL) {
2952 mcr |= PMCR_FORCE_FDX;
2953 if (tx_pause)
2954 mcr |= PMCR_FORCE_TX_FC_EN;
2955 if (rx_pause)
2956 mcr |= PMCR_FORCE_RX_FC_EN;
2957 }
2958
2959 if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, false) >= 0) {
2960 switch (speed) {
2961 case SPEED_1000:
2962 case SPEED_2500:
2963 mcr |= PMCR_FORCE_EEE1G;
2964 break;
2965 case SPEED_100:
2966 mcr |= PMCR_FORCE_EEE100;
2967 break;
2968 }
2969 }
2970
2971 mt7530_set(priv, MT753X_PMCR_P(dp->index), mcr);
2972 }
2973
mt753x_phylink_get_caps(struct dsa_switch * ds,int port,struct phylink_config * config)2974 static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port,
2975 struct phylink_config *config)
2976 {
2977 struct mt7530_priv *priv = ds->priv;
2978
2979 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE;
2980
2981 priv->info->mac_port_get_caps(ds, port, config);
2982 }
2983
mt753x_pcs_validate(struct phylink_pcs * pcs,unsigned long * supported,const struct phylink_link_state * state)2984 static int mt753x_pcs_validate(struct phylink_pcs *pcs,
2985 unsigned long *supported,
2986 const struct phylink_link_state *state)
2987 {
2988 /* Autonegotiation is not supported in TRGMII nor 802.3z modes */
2989 if (state->interface == PHY_INTERFACE_MODE_TRGMII ||
2990 phy_interface_mode_is_8023z(state->interface))
2991 phylink_clear(supported, Autoneg);
2992
2993 return 0;
2994 }
2995
mt7530_pcs_get_state(struct phylink_pcs * pcs,struct phylink_link_state * state)2996 static void mt7530_pcs_get_state(struct phylink_pcs *pcs,
2997 struct phylink_link_state *state)
2998 {
2999 struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
3000 int port = pcs_to_mt753x_pcs(pcs)->port;
3001 u32 pmsr;
3002
3003 pmsr = mt7530_read(priv, MT7530_PMSR_P(port));
3004
3005 state->link = (pmsr & PMSR_LINK);
3006 state->an_complete = state->link;
3007 state->duplex = !!(pmsr & PMSR_DPX);
3008
3009 switch (pmsr & PMSR_SPEED_MASK) {
3010 case PMSR_SPEED_10:
3011 state->speed = SPEED_10;
3012 break;
3013 case PMSR_SPEED_100:
3014 state->speed = SPEED_100;
3015 break;
3016 case PMSR_SPEED_1000:
3017 state->speed = SPEED_1000;
3018 break;
3019 default:
3020 state->speed = SPEED_UNKNOWN;
3021 break;
3022 }
3023
3024 state->pause &= ~(MLO_PAUSE_RX | MLO_PAUSE_TX);
3025 if (pmsr & PMSR_RX_FC)
3026 state->pause |= MLO_PAUSE_RX;
3027 if (pmsr & PMSR_TX_FC)
3028 state->pause |= MLO_PAUSE_TX;
3029 }
3030
mt753x_pcs_config(struct phylink_pcs * pcs,unsigned int neg_mode,phy_interface_t interface,const unsigned long * advertising,bool permit_pause_to_mac)3031 static int mt753x_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
3032 phy_interface_t interface,
3033 const unsigned long *advertising,
3034 bool permit_pause_to_mac)
3035 {
3036 return 0;
3037 }
3038
mt7530_pcs_an_restart(struct phylink_pcs * pcs)3039 static void mt7530_pcs_an_restart(struct phylink_pcs *pcs)
3040 {
3041 }
3042
3043 static const struct phylink_pcs_ops mt7530_pcs_ops = {
3044 .pcs_validate = mt753x_pcs_validate,
3045 .pcs_get_state = mt7530_pcs_get_state,
3046 .pcs_config = mt753x_pcs_config,
3047 .pcs_an_restart = mt7530_pcs_an_restart,
3048 };
3049
3050 static int
mt753x_setup(struct dsa_switch * ds)3051 mt753x_setup(struct dsa_switch *ds)
3052 {
3053 struct mt7530_priv *priv = ds->priv;
3054 int ret = priv->info->sw_setup(ds);
3055 int i;
3056
3057 if (ret)
3058 return ret;
3059
3060 ret = mt7530_setup_irq(priv);
3061 if (ret)
3062 return ret;
3063
3064 ret = mt7530_setup_mdio(priv);
3065 if (ret && priv->irq)
3066 mt7530_free_irq_common(priv);
3067 if (ret)
3068 return ret;
3069
3070 /* Initialise the PCS devices */
3071 for (i = 0; i < priv->ds->num_ports; i++) {
3072 priv->pcs[i].pcs.ops = priv->info->pcs_ops;
3073 priv->pcs[i].pcs.neg_mode = true;
3074 priv->pcs[i].priv = priv;
3075 priv->pcs[i].port = i;
3076 }
3077
3078 if (priv->create_sgmii) {
3079 ret = priv->create_sgmii(priv);
3080 if (ret && priv->irq)
3081 mt7530_free_irq(priv);
3082 }
3083
3084 return ret;
3085 }
3086
mt753x_get_mac_eee(struct dsa_switch * ds,int port,struct ethtool_keee * e)3087 static int mt753x_get_mac_eee(struct dsa_switch *ds, int port,
3088 struct ethtool_keee *e)
3089 {
3090 struct mt7530_priv *priv = ds->priv;
3091 u32 eeecr = mt7530_read(priv, MT753X_PMEEECR_P(port));
3092
3093 e->tx_lpi_enabled = !(eeecr & LPI_MODE_EN);
3094 e->tx_lpi_timer = LPI_THRESH_GET(eeecr);
3095
3096 return 0;
3097 }
3098
mt753x_set_mac_eee(struct dsa_switch * ds,int port,struct ethtool_keee * e)3099 static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
3100 struct ethtool_keee *e)
3101 {
3102 struct mt7530_priv *priv = ds->priv;
3103 u32 set, mask = LPI_THRESH_MASK | LPI_MODE_EN;
3104
3105 if (e->tx_lpi_timer > 0xFFF)
3106 return -EINVAL;
3107
3108 set = LPI_THRESH_SET(e->tx_lpi_timer);
3109 if (!e->tx_lpi_enabled)
3110 /* Force LPI Mode without a delay */
3111 set |= LPI_MODE_EN;
3112 mt7530_rmw(priv, MT753X_PMEEECR_P(port), mask, set);
3113
3114 return 0;
3115 }
3116
3117 static void
mt753x_conduit_state_change(struct dsa_switch * ds,const struct net_device * conduit,bool operational)3118 mt753x_conduit_state_change(struct dsa_switch *ds,
3119 const struct net_device *conduit,
3120 bool operational)
3121 {
3122 struct dsa_port *cpu_dp = conduit->dsa_ptr;
3123 struct mt7530_priv *priv = ds->priv;
3124 int val = 0;
3125 u8 mask;
3126
3127 /* Set the CPU port to trap frames to for MT7530. Trapped frames will be
3128 * forwarded to the numerically smallest CPU port whose conduit
3129 * interface is up.
3130 */
3131 if (priv->id != ID_MT7530 && priv->id != ID_MT7621)
3132 return;
3133
3134 mask = BIT(cpu_dp->index);
3135
3136 if (operational)
3137 priv->active_cpu_ports |= mask;
3138 else
3139 priv->active_cpu_ports &= ~mask;
3140
3141 if (priv->active_cpu_ports) {
3142 val = MT7530_CPU_EN |
3143 MT7530_CPU_PORT(__ffs(priv->active_cpu_ports));
3144 }
3145
3146 mt7530_rmw(priv, MT753X_MFC, MT7530_CPU_EN | MT7530_CPU_PORT_MASK, val);
3147 }
3148
mt7988_setup(struct dsa_switch * ds)3149 static int mt7988_setup(struct dsa_switch *ds)
3150 {
3151 struct mt7530_priv *priv = ds->priv;
3152
3153 /* Reset the switch */
3154 reset_control_assert(priv->rstc);
3155 usleep_range(20, 50);
3156 reset_control_deassert(priv->rstc);
3157 usleep_range(20, 50);
3158
3159 /* Reset the switch PHYs */
3160 mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_PHY_RST);
3161
3162 return mt7531_setup_common(ds);
3163 }
3164
3165 const struct dsa_switch_ops mt7530_switch_ops = {
3166 .get_tag_protocol = mtk_get_tag_protocol,
3167 .setup = mt753x_setup,
3168 .preferred_default_local_cpu_port = mt753x_preferred_default_local_cpu_port,
3169 .get_strings = mt7530_get_strings,
3170 .get_ethtool_stats = mt7530_get_ethtool_stats,
3171 .get_sset_count = mt7530_get_sset_count,
3172 .set_ageing_time = mt7530_set_ageing_time,
3173 .port_enable = mt7530_port_enable,
3174 .port_disable = mt7530_port_disable,
3175 .port_change_mtu = mt7530_port_change_mtu,
3176 .port_max_mtu = mt7530_port_max_mtu,
3177 .port_stp_state_set = mt7530_stp_state_set,
3178 .port_pre_bridge_flags = mt7530_port_pre_bridge_flags,
3179 .port_bridge_flags = mt7530_port_bridge_flags,
3180 .port_bridge_join = mt7530_port_bridge_join,
3181 .port_bridge_leave = mt7530_port_bridge_leave,
3182 .port_fdb_add = mt7530_port_fdb_add,
3183 .port_fdb_del = mt7530_port_fdb_del,
3184 .port_fdb_dump = mt7530_port_fdb_dump,
3185 .port_mdb_add = mt7530_port_mdb_add,
3186 .port_mdb_del = mt7530_port_mdb_del,
3187 .port_vlan_filtering = mt7530_port_vlan_filtering,
3188 .port_vlan_add = mt7530_port_vlan_add,
3189 .port_vlan_del = mt7530_port_vlan_del,
3190 .port_mirror_add = mt753x_port_mirror_add,
3191 .port_mirror_del = mt753x_port_mirror_del,
3192 .phylink_get_caps = mt753x_phylink_get_caps,
3193 .get_mac_eee = mt753x_get_mac_eee,
3194 .set_mac_eee = mt753x_set_mac_eee,
3195 .conduit_state_change = mt753x_conduit_state_change,
3196 };
3197 EXPORT_SYMBOL_GPL(mt7530_switch_ops);
3198
3199 static const struct phylink_mac_ops mt753x_phylink_mac_ops = {
3200 .mac_select_pcs = mt753x_phylink_mac_select_pcs,
3201 .mac_config = mt753x_phylink_mac_config,
3202 .mac_link_down = mt753x_phylink_mac_link_down,
3203 .mac_link_up = mt753x_phylink_mac_link_up,
3204 };
3205
3206 const struct mt753x_info mt753x_table[] = {
3207 [ID_MT7621] = {
3208 .id = ID_MT7621,
3209 .pcs_ops = &mt7530_pcs_ops,
3210 .sw_setup = mt7530_setup,
3211 .phy_read_c22 = mt7530_phy_read_c22,
3212 .phy_write_c22 = mt7530_phy_write_c22,
3213 .phy_read_c45 = mt7530_phy_read_c45,
3214 .phy_write_c45 = mt7530_phy_write_c45,
3215 .mac_port_get_caps = mt7530_mac_port_get_caps,
3216 .mac_port_config = mt7530_mac_config,
3217 },
3218 [ID_MT7530] = {
3219 .id = ID_MT7530,
3220 .pcs_ops = &mt7530_pcs_ops,
3221 .sw_setup = mt7530_setup,
3222 .phy_read_c22 = mt7530_phy_read_c22,
3223 .phy_write_c22 = mt7530_phy_write_c22,
3224 .phy_read_c45 = mt7530_phy_read_c45,
3225 .phy_write_c45 = mt7530_phy_write_c45,
3226 .mac_port_get_caps = mt7530_mac_port_get_caps,
3227 .mac_port_config = mt7530_mac_config,
3228 },
3229 [ID_MT7531] = {
3230 .id = ID_MT7531,
3231 .pcs_ops = &mt7530_pcs_ops,
3232 .sw_setup = mt7531_setup,
3233 .phy_read_c22 = mt7531_ind_c22_phy_read,
3234 .phy_write_c22 = mt7531_ind_c22_phy_write,
3235 .phy_read_c45 = mt7531_ind_c45_phy_read,
3236 .phy_write_c45 = mt7531_ind_c45_phy_write,
3237 .mac_port_get_caps = mt7531_mac_port_get_caps,
3238 .mac_port_config = mt7531_mac_config,
3239 },
3240 [ID_MT7988] = {
3241 .id = ID_MT7988,
3242 .pcs_ops = &mt7530_pcs_ops,
3243 .sw_setup = mt7988_setup,
3244 .phy_read_c22 = mt7531_ind_c22_phy_read,
3245 .phy_write_c22 = mt7531_ind_c22_phy_write,
3246 .phy_read_c45 = mt7531_ind_c45_phy_read,
3247 .phy_write_c45 = mt7531_ind_c45_phy_write,
3248 .mac_port_get_caps = mt7988_mac_port_get_caps,
3249 },
3250 [ID_EN7581] = {
3251 .id = ID_EN7581,
3252 .pcs_ops = &mt7530_pcs_ops,
3253 .sw_setup = mt7988_setup,
3254 .phy_read_c22 = mt7531_ind_c22_phy_read,
3255 .phy_write_c22 = mt7531_ind_c22_phy_write,
3256 .phy_read_c45 = mt7531_ind_c45_phy_read,
3257 .phy_write_c45 = mt7531_ind_c45_phy_write,
3258 .mac_port_get_caps = en7581_mac_port_get_caps,
3259 },
3260 };
3261 EXPORT_SYMBOL_GPL(mt753x_table);
3262
3263 int
mt7530_probe_common(struct mt7530_priv * priv)3264 mt7530_probe_common(struct mt7530_priv *priv)
3265 {
3266 struct device *dev = priv->dev;
3267
3268 priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
3269 if (!priv->ds)
3270 return -ENOMEM;
3271
3272 priv->ds->dev = dev;
3273 priv->ds->num_ports = MT7530_NUM_PORTS;
3274
3275 /* Get the hardware identifier from the devicetree node.
3276 * We will need it for some of the clock and regulator setup.
3277 */
3278 priv->info = of_device_get_match_data(dev);
3279 if (!priv->info)
3280 return -EINVAL;
3281
3282 priv->id = priv->info->id;
3283 priv->dev = dev;
3284 priv->ds->priv = priv;
3285 priv->ds->ops = &mt7530_switch_ops;
3286 priv->ds->phylink_mac_ops = &mt753x_phylink_mac_ops;
3287 mutex_init(&priv->reg_mutex);
3288 dev_set_drvdata(dev, priv);
3289
3290 return 0;
3291 }
3292 EXPORT_SYMBOL_GPL(mt7530_probe_common);
3293
3294 void
mt7530_remove_common(struct mt7530_priv * priv)3295 mt7530_remove_common(struct mt7530_priv *priv)
3296 {
3297 if (priv->irq)
3298 mt7530_free_irq(priv);
3299
3300 dsa_unregister_switch(priv->ds);
3301
3302 mutex_destroy(&priv->reg_mutex);
3303 }
3304 EXPORT_SYMBOL_GPL(mt7530_remove_common);
3305
3306 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
3307 MODULE_DESCRIPTION("Driver for Mediatek MT7530 Switch");
3308 MODULE_LICENSE("GPL");
3309