1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Mediatek MT7530 DSA Switch driver
4 * Copyright (C) 2017 Sean Wang <sean.wang@mediatek.com>
5 */
6 #include <linux/etherdevice.h>
7 #include <linux/if_bridge.h>
8 #include <linux/iopoll.h>
9 #include <linux/mdio.h>
10 #include <linux/mfd/syscon.h>
11 #include <linux/module.h>
12 #include <linux/netdevice.h>
13 #include <linux/of_irq.h>
14 #include <linux/of_mdio.h>
15 #include <linux/of_net.h>
16 #include <linux/of_platform.h>
17 #include <linux/phylink.h>
18 #include <linux/regmap.h>
19 #include <linux/regulator/consumer.h>
20 #include <linux/reset.h>
21 #include <linux/gpio/consumer.h>
22 #include <linux/gpio/driver.h>
23 #include <net/dsa.h>
24 #include <net/pkt_cls.h>
25
26 #include "mt7530.h"
27
pcs_to_mt753x_pcs(struct phylink_pcs * pcs)28 static struct mt753x_pcs *pcs_to_mt753x_pcs(struct phylink_pcs *pcs)
29 {
30 return container_of(pcs, struct mt753x_pcs, pcs);
31 }
32
33 /* String, offset, and register size in bytes if different from 4 bytes */
34 static const struct mt7530_mib_desc mt7530_mib[] = {
35 MIB_DESC(1, 0x00, "TxDrop"),
36 MIB_DESC(1, 0x04, "TxCrcErr"),
37 MIB_DESC(1, 0x08, "TxUnicast"),
38 MIB_DESC(1, 0x0c, "TxMulticast"),
39 MIB_DESC(1, 0x10, "TxBroadcast"),
40 MIB_DESC(1, 0x14, "TxCollision"),
41 MIB_DESC(1, 0x18, "TxSingleCollision"),
42 MIB_DESC(1, 0x1c, "TxMultipleCollision"),
43 MIB_DESC(1, 0x20, "TxDeferred"),
44 MIB_DESC(1, 0x24, "TxLateCollision"),
45 MIB_DESC(1, 0x28, "TxExcessiveCollistion"),
46 MIB_DESC(1, 0x2c, "TxPause"),
47 MIB_DESC(1, 0x30, "TxPktSz64"),
48 MIB_DESC(1, 0x34, "TxPktSz65To127"),
49 MIB_DESC(1, 0x38, "TxPktSz128To255"),
50 MIB_DESC(1, 0x3c, "TxPktSz256To511"),
51 MIB_DESC(1, 0x40, "TxPktSz512To1023"),
52 MIB_DESC(1, 0x44, "Tx1024ToMax"),
53 MIB_DESC(2, 0x48, "TxBytes"),
54 MIB_DESC(1, 0x60, "RxDrop"),
55 MIB_DESC(1, 0x64, "RxFiltering"),
56 MIB_DESC(1, 0x68, "RxUnicast"),
57 MIB_DESC(1, 0x6c, "RxMulticast"),
58 MIB_DESC(1, 0x70, "RxBroadcast"),
59 MIB_DESC(1, 0x74, "RxAlignErr"),
60 MIB_DESC(1, 0x78, "RxCrcErr"),
61 MIB_DESC(1, 0x7c, "RxUnderSizeErr"),
62 MIB_DESC(1, 0x80, "RxFragErr"),
63 MIB_DESC(1, 0x84, "RxOverSzErr"),
64 MIB_DESC(1, 0x88, "RxJabberErr"),
65 MIB_DESC(1, 0x8c, "RxPause"),
66 MIB_DESC(1, 0x90, "RxPktSz64"),
67 MIB_DESC(1, 0x94, "RxPktSz65To127"),
68 MIB_DESC(1, 0x98, "RxPktSz128To255"),
69 MIB_DESC(1, 0x9c, "RxPktSz256To511"),
70 MIB_DESC(1, 0xa0, "RxPktSz512To1023"),
71 MIB_DESC(1, 0xa4, "RxPktSz1024ToMax"),
72 MIB_DESC(2, 0xa8, "RxBytes"),
73 MIB_DESC(1, 0xb0, "RxCtrlDrop"),
74 MIB_DESC(1, 0xb4, "RxIngressDrop"),
75 MIB_DESC(1, 0xb8, "RxArlDrop"),
76 };
77
78 static void
mt7530_mutex_lock(struct mt7530_priv * priv)79 mt7530_mutex_lock(struct mt7530_priv *priv)
80 {
81 if (priv->bus)
82 mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
83 }
84
85 static void
mt7530_mutex_unlock(struct mt7530_priv * priv)86 mt7530_mutex_unlock(struct mt7530_priv *priv)
87 {
88 if (priv->bus)
89 mutex_unlock(&priv->bus->mdio_lock);
90 }
91
92 static void
core_write(struct mt7530_priv * priv,u32 reg,u32 val)93 core_write(struct mt7530_priv *priv, u32 reg, u32 val)
94 {
95 struct mii_bus *bus = priv->bus;
96 int ret;
97
98 mt7530_mutex_lock(priv);
99
100 /* Write the desired MMD Devad */
101 ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
102 MII_MMD_CTRL, MDIO_MMD_VEND2);
103 if (ret < 0)
104 goto err;
105
106 /* Write the desired MMD register address */
107 ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
108 MII_MMD_DATA, reg);
109 if (ret < 0)
110 goto err;
111
112 /* Select the Function : DATA with no post increment */
113 ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
114 MII_MMD_CTRL, MDIO_MMD_VEND2 | MII_MMD_CTRL_NOINCR);
115 if (ret < 0)
116 goto err;
117
118 /* Write the data into MMD's selected register */
119 ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
120 MII_MMD_DATA, val);
121 err:
122 if (ret < 0)
123 dev_err(&bus->dev, "failed to write mmd register\n");
124
125 mt7530_mutex_unlock(priv);
126 }
127
128 static void
core_rmw(struct mt7530_priv * priv,u32 reg,u32 mask,u32 set)129 core_rmw(struct mt7530_priv *priv, u32 reg, u32 mask, u32 set)
130 {
131 struct mii_bus *bus = priv->bus;
132 u32 val;
133 int ret;
134
135 mt7530_mutex_lock(priv);
136
137 /* Write the desired MMD Devad */
138 ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
139 MII_MMD_CTRL, MDIO_MMD_VEND2);
140 if (ret < 0)
141 goto err;
142
143 /* Write the desired MMD register address */
144 ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
145 MII_MMD_DATA, reg);
146 if (ret < 0)
147 goto err;
148
149 /* Select the Function : DATA with no post increment */
150 ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
151 MII_MMD_CTRL, MDIO_MMD_VEND2 | MII_MMD_CTRL_NOINCR);
152 if (ret < 0)
153 goto err;
154
155 /* Read the content of the MMD's selected register */
156 val = bus->read(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
157 MII_MMD_DATA);
158 val &= ~mask;
159 val |= set;
160 /* Write the data into MMD's selected register */
161 ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
162 MII_MMD_DATA, val);
163 err:
164 if (ret < 0)
165 dev_err(&bus->dev, "failed to write mmd register\n");
166
167 mt7530_mutex_unlock(priv);
168 }
169
170 static void
core_set(struct mt7530_priv * priv,u32 reg,u32 val)171 core_set(struct mt7530_priv *priv, u32 reg, u32 val)
172 {
173 core_rmw(priv, reg, 0, val);
174 }
175
176 static void
core_clear(struct mt7530_priv * priv,u32 reg,u32 val)177 core_clear(struct mt7530_priv *priv, u32 reg, u32 val)
178 {
179 core_rmw(priv, reg, val, 0);
180 }
181
182 static int
mt7530_mii_write(struct mt7530_priv * priv,u32 reg,u32 val)183 mt7530_mii_write(struct mt7530_priv *priv, u32 reg, u32 val)
184 {
185 int ret;
186
187 ret = regmap_write(priv->regmap, reg, val);
188
189 if (ret < 0)
190 dev_err(priv->dev,
191 "failed to write mt7530 register\n");
192
193 return ret;
194 }
195
196 static u32
mt7530_mii_read(struct mt7530_priv * priv,u32 reg)197 mt7530_mii_read(struct mt7530_priv *priv, u32 reg)
198 {
199 int ret;
200 u32 val;
201
202 ret = regmap_read(priv->regmap, reg, &val);
203 if (ret) {
204 WARN_ON_ONCE(1);
205 dev_err(priv->dev,
206 "failed to read mt7530 register\n");
207 return 0;
208 }
209
210 return val;
211 }
212
213 static void
mt7530_write(struct mt7530_priv * priv,u32 reg,u32 val)214 mt7530_write(struct mt7530_priv *priv, u32 reg, u32 val)
215 {
216 mt7530_mutex_lock(priv);
217
218 mt7530_mii_write(priv, reg, val);
219
220 mt7530_mutex_unlock(priv);
221 }
222
223 static u32
_mt7530_unlocked_read(struct mt7530_dummy_poll * p)224 _mt7530_unlocked_read(struct mt7530_dummy_poll *p)
225 {
226 return mt7530_mii_read(p->priv, p->reg);
227 }
228
229 static u32
_mt7530_read(struct mt7530_dummy_poll * p)230 _mt7530_read(struct mt7530_dummy_poll *p)
231 {
232 u32 val;
233
234 mt7530_mutex_lock(p->priv);
235
236 val = mt7530_mii_read(p->priv, p->reg);
237
238 mt7530_mutex_unlock(p->priv);
239
240 return val;
241 }
242
243 static u32
mt7530_read(struct mt7530_priv * priv,u32 reg)244 mt7530_read(struct mt7530_priv *priv, u32 reg)
245 {
246 struct mt7530_dummy_poll p;
247
248 INIT_MT7530_DUMMY_POLL(&p, priv, reg);
249 return _mt7530_read(&p);
250 }
251
252 static void
mt7530_rmw(struct mt7530_priv * priv,u32 reg,u32 mask,u32 set)253 mt7530_rmw(struct mt7530_priv *priv, u32 reg,
254 u32 mask, u32 set)
255 {
256 mt7530_mutex_lock(priv);
257
258 regmap_update_bits(priv->regmap, reg, mask, set);
259
260 mt7530_mutex_unlock(priv);
261 }
262
263 static void
mt7530_set(struct mt7530_priv * priv,u32 reg,u32 val)264 mt7530_set(struct mt7530_priv *priv, u32 reg, u32 val)
265 {
266 mt7530_rmw(priv, reg, val, val);
267 }
268
269 static void
mt7530_clear(struct mt7530_priv * priv,u32 reg,u32 val)270 mt7530_clear(struct mt7530_priv *priv, u32 reg, u32 val)
271 {
272 mt7530_rmw(priv, reg, val, 0);
273 }
274
275 static int
mt7530_fdb_cmd(struct mt7530_priv * priv,enum mt7530_fdb_cmd cmd,u32 * rsp)276 mt7530_fdb_cmd(struct mt7530_priv *priv, enum mt7530_fdb_cmd cmd, u32 *rsp)
277 {
278 u32 val;
279 int ret;
280 struct mt7530_dummy_poll p;
281
282 /* Set the command operating upon the MAC address entries */
283 val = ATC_BUSY | ATC_MAT(0) | cmd;
284 mt7530_write(priv, MT7530_ATC, val);
285
286 INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_ATC);
287 ret = readx_poll_timeout(_mt7530_read, &p, val,
288 !(val & ATC_BUSY), 20, 20000);
289 if (ret < 0) {
290 dev_err(priv->dev, "reset timeout\n");
291 return ret;
292 }
293
294 /* Additional sanity for read command if the specified
295 * entry is invalid
296 */
297 val = mt7530_read(priv, MT7530_ATC);
298 if ((cmd == MT7530_FDB_READ) && (val & ATC_INVALID))
299 return -EINVAL;
300
301 if (rsp)
302 *rsp = val;
303
304 return 0;
305 }
306
307 static void
mt7530_fdb_read(struct mt7530_priv * priv,struct mt7530_fdb * fdb)308 mt7530_fdb_read(struct mt7530_priv *priv, struct mt7530_fdb *fdb)
309 {
310 u32 reg[3];
311 int i;
312
313 /* Read from ARL table into an array */
314 for (i = 0; i < 3; i++) {
315 reg[i] = mt7530_read(priv, MT7530_TSRA1 + (i * 4));
316
317 dev_dbg(priv->dev, "%s(%d) reg[%d]=0x%x\n",
318 __func__, __LINE__, i, reg[i]);
319 }
320
321 fdb->vid = (reg[1] >> CVID) & CVID_MASK;
322 fdb->aging = (reg[2] >> AGE_TIMER) & AGE_TIMER_MASK;
323 fdb->port_mask = (reg[2] >> PORT_MAP) & PORT_MAP_MASK;
324 fdb->mac[0] = (reg[0] >> MAC_BYTE_0) & MAC_BYTE_MASK;
325 fdb->mac[1] = (reg[0] >> MAC_BYTE_1) & MAC_BYTE_MASK;
326 fdb->mac[2] = (reg[0] >> MAC_BYTE_2) & MAC_BYTE_MASK;
327 fdb->mac[3] = (reg[0] >> MAC_BYTE_3) & MAC_BYTE_MASK;
328 fdb->mac[4] = (reg[1] >> MAC_BYTE_4) & MAC_BYTE_MASK;
329 fdb->mac[5] = (reg[1] >> MAC_BYTE_5) & MAC_BYTE_MASK;
330 fdb->noarp = ((reg[2] >> ENT_STATUS) & ENT_STATUS_MASK) == STATIC_ENT;
331 }
332
333 static void
mt7530_fdb_write(struct mt7530_priv * priv,u16 vid,u8 port_mask,const u8 * mac,u8 aging,u8 type)334 mt7530_fdb_write(struct mt7530_priv *priv, u16 vid,
335 u8 port_mask, const u8 *mac,
336 u8 aging, u8 type)
337 {
338 u32 reg[3] = { 0 };
339 int i;
340
341 reg[1] |= vid & CVID_MASK;
342 reg[1] |= ATA2_IVL;
343 reg[1] |= ATA2_FID(FID_BRIDGED);
344 reg[2] |= (aging & AGE_TIMER_MASK) << AGE_TIMER;
345 reg[2] |= (port_mask & PORT_MAP_MASK) << PORT_MAP;
346 /* STATIC_ENT indicate that entry is static wouldn't
347 * be aged out and STATIC_EMP specified as erasing an
348 * entry
349 */
350 reg[2] |= (type & ENT_STATUS_MASK) << ENT_STATUS;
351 reg[1] |= mac[5] << MAC_BYTE_5;
352 reg[1] |= mac[4] << MAC_BYTE_4;
353 reg[0] |= mac[3] << MAC_BYTE_3;
354 reg[0] |= mac[2] << MAC_BYTE_2;
355 reg[0] |= mac[1] << MAC_BYTE_1;
356 reg[0] |= mac[0] << MAC_BYTE_0;
357
358 /* Write array into the ARL table */
359 for (i = 0; i < 3; i++)
360 mt7530_write(priv, MT7530_ATA1 + (i * 4), reg[i]);
361 }
362
363 /* Set up switch core clock for MT7530 */
mt7530_pll_setup(struct mt7530_priv * priv)364 static void mt7530_pll_setup(struct mt7530_priv *priv)
365 {
366 /* Disable core clock */
367 core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
368
369 /* Disable PLL */
370 core_write(priv, CORE_GSWPLL_GRP1, 0);
371
372 /* Set core clock into 500Mhz */
373 core_write(priv, CORE_GSWPLL_GRP2,
374 RG_GSWPLL_POSDIV_500M(1) |
375 RG_GSWPLL_FBKDIV_500M(25));
376
377 /* Enable PLL */
378 core_write(priv, CORE_GSWPLL_GRP1,
379 RG_GSWPLL_EN_PRE |
380 RG_GSWPLL_POSDIV_200M(2) |
381 RG_GSWPLL_FBKDIV_200M(32));
382
383 udelay(20);
384
385 /* Enable core clock */
386 core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
387 }
388
389 /* If port 6 is available as a CPU port, always prefer that as the default,
390 * otherwise don't care.
391 */
392 static struct dsa_port *
mt753x_preferred_default_local_cpu_port(struct dsa_switch * ds)393 mt753x_preferred_default_local_cpu_port(struct dsa_switch *ds)
394 {
395 struct dsa_port *cpu_dp = dsa_to_port(ds, 6);
396
397 if (dsa_port_is_cpu(cpu_dp))
398 return cpu_dp;
399
400 return NULL;
401 }
402
403 /* Setup port 6 interface mode and TRGMII TX circuit */
404 static void
mt7530_setup_port6(struct dsa_switch * ds,phy_interface_t interface)405 mt7530_setup_port6(struct dsa_switch *ds, phy_interface_t interface)
406 {
407 struct mt7530_priv *priv = ds->priv;
408 u32 ncpo1, ssc_delta, xtal;
409
410 /* Disable the MT7530 TRGMII clocks */
411 core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
412
413 if (interface == PHY_INTERFACE_MODE_RGMII) {
414 mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK,
415 P6_INTF_MODE(0));
416 return;
417 }
418
419 mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK, P6_INTF_MODE(1));
420
421 xtal = mt7530_read(priv, MT753X_MTRAP) & MT7530_XTAL_MASK;
422
423 if (xtal == MT7530_XTAL_25MHZ)
424 ssc_delta = 0x57;
425 else
426 ssc_delta = 0x87;
427
428 if (priv->id == ID_MT7621) {
429 /* PLL frequency: 125MHz: 1.0GBit */
430 if (xtal == MT7530_XTAL_40MHZ)
431 ncpo1 = 0x0640;
432 if (xtal == MT7530_XTAL_25MHZ)
433 ncpo1 = 0x0a00;
434 } else { /* PLL frequency: 250MHz: 2.0Gbit */
435 if (xtal == MT7530_XTAL_40MHZ)
436 ncpo1 = 0x0c80;
437 if (xtal == MT7530_XTAL_25MHZ)
438 ncpo1 = 0x1400;
439 }
440
441 /* Setup the MT7530 TRGMII Tx Clock */
442 core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
443 core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
444 core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
445 core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
446 core_write(priv, CORE_PLL_GROUP4, RG_SYSPLL_DDSFBK_EN |
447 RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN);
448 core_write(priv, CORE_PLL_GROUP2, RG_SYSPLL_EN_NORMAL |
449 RG_SYSPLL_VODEN | RG_SYSPLL_POSDIV(1));
450 core_write(priv, CORE_PLL_GROUP7, RG_LCDDS_PCW_NCPO_CHG |
451 RG_LCCDS_C(3) | RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
452
453 /* Enable the MT7530 TRGMII clocks */
454 core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
455 }
456
457 static void
mt7531_pll_setup(struct mt7530_priv * priv)458 mt7531_pll_setup(struct mt7530_priv *priv)
459 {
460 enum mt7531_xtal_fsel xtal;
461 u32 top_sig;
462 u32 hwstrap;
463 u32 val;
464
465 val = mt7530_read(priv, MT7531_CREV);
466 top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR);
467 hwstrap = mt7530_read(priv, MT753X_TRAP);
468 if ((val & CHIP_REV_M) > 0)
469 xtal = (top_sig & PAD_MCM_SMI_EN) ? MT7531_XTAL_FSEL_40MHZ :
470 MT7531_XTAL_FSEL_25MHZ;
471 else
472 xtal = (hwstrap & MT7531_XTAL25) ? MT7531_XTAL_FSEL_25MHZ :
473 MT7531_XTAL_FSEL_40MHZ;
474
475 /* Step 1 : Disable MT7531 COREPLL */
476 val = mt7530_read(priv, MT7531_PLLGP_EN);
477 val &= ~EN_COREPLL;
478 mt7530_write(priv, MT7531_PLLGP_EN, val);
479
480 /* Step 2: switch to XTAL output */
481 val = mt7530_read(priv, MT7531_PLLGP_EN);
482 val |= SW_CLKSW;
483 mt7530_write(priv, MT7531_PLLGP_EN, val);
484
485 val = mt7530_read(priv, MT7531_PLLGP_CR0);
486 val &= ~RG_COREPLL_EN;
487 mt7530_write(priv, MT7531_PLLGP_CR0, val);
488
489 /* Step 3: disable PLLGP and enable program PLLGP */
490 val = mt7530_read(priv, MT7531_PLLGP_EN);
491 val |= SW_PLLGP;
492 mt7530_write(priv, MT7531_PLLGP_EN, val);
493
494 /* Step 4: program COREPLL output frequency to 500MHz */
495 val = mt7530_read(priv, MT7531_PLLGP_CR0);
496 val &= ~RG_COREPLL_POSDIV_M;
497 val |= 2 << RG_COREPLL_POSDIV_S;
498 mt7530_write(priv, MT7531_PLLGP_CR0, val);
499 usleep_range(25, 35);
500
501 switch (xtal) {
502 case MT7531_XTAL_FSEL_25MHZ:
503 val = mt7530_read(priv, MT7531_PLLGP_CR0);
504 val &= ~RG_COREPLL_SDM_PCW_M;
505 val |= 0x140000 << RG_COREPLL_SDM_PCW_S;
506 mt7530_write(priv, MT7531_PLLGP_CR0, val);
507 break;
508 case MT7531_XTAL_FSEL_40MHZ:
509 val = mt7530_read(priv, MT7531_PLLGP_CR0);
510 val &= ~RG_COREPLL_SDM_PCW_M;
511 val |= 0x190000 << RG_COREPLL_SDM_PCW_S;
512 mt7530_write(priv, MT7531_PLLGP_CR0, val);
513 break;
514 }
515
516 /* Set feedback divide ratio update signal to high */
517 val = mt7530_read(priv, MT7531_PLLGP_CR0);
518 val |= RG_COREPLL_SDM_PCW_CHG;
519 mt7530_write(priv, MT7531_PLLGP_CR0, val);
520 /* Wait for at least 16 XTAL clocks */
521 usleep_range(10, 20);
522
523 /* Step 5: set feedback divide ratio update signal to low */
524 val = mt7530_read(priv, MT7531_PLLGP_CR0);
525 val &= ~RG_COREPLL_SDM_PCW_CHG;
526 mt7530_write(priv, MT7531_PLLGP_CR0, val);
527
528 /* Enable 325M clock for SGMII */
529 mt7530_write(priv, MT7531_ANA_PLLGP_CR5, 0xad0000);
530
531 /* Enable 250SSC clock for RGMII */
532 mt7530_write(priv, MT7531_ANA_PLLGP_CR2, 0x4f40000);
533
534 /* Step 6: Enable MT7531 PLL */
535 val = mt7530_read(priv, MT7531_PLLGP_CR0);
536 val |= RG_COREPLL_EN;
537 mt7530_write(priv, MT7531_PLLGP_CR0, val);
538
539 val = mt7530_read(priv, MT7531_PLLGP_EN);
540 val |= EN_COREPLL;
541 mt7530_write(priv, MT7531_PLLGP_EN, val);
542 usleep_range(25, 35);
543 }
544
545 static void
mt7530_mib_reset(struct dsa_switch * ds)546 mt7530_mib_reset(struct dsa_switch *ds)
547 {
548 struct mt7530_priv *priv = ds->priv;
549
550 mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_FLUSH);
551 mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_ACTIVATE);
552 }
553
mt7530_phy_read_c22(struct mt7530_priv * priv,int port,int regnum)554 static int mt7530_phy_read_c22(struct mt7530_priv *priv, int port, int regnum)
555 {
556 return mdiobus_read_nested(priv->bus, port, regnum);
557 }
558
mt7530_phy_write_c22(struct mt7530_priv * priv,int port,int regnum,u16 val)559 static int mt7530_phy_write_c22(struct mt7530_priv *priv, int port, int regnum,
560 u16 val)
561 {
562 return mdiobus_write_nested(priv->bus, port, regnum, val);
563 }
564
mt7530_phy_read_c45(struct mt7530_priv * priv,int port,int devad,int regnum)565 static int mt7530_phy_read_c45(struct mt7530_priv *priv, int port,
566 int devad, int regnum)
567 {
568 return mdiobus_c45_read_nested(priv->bus, port, devad, regnum);
569 }
570
mt7530_phy_write_c45(struct mt7530_priv * priv,int port,int devad,int regnum,u16 val)571 static int mt7530_phy_write_c45(struct mt7530_priv *priv, int port, int devad,
572 int regnum, u16 val)
573 {
574 return mdiobus_c45_write_nested(priv->bus, port, devad, regnum, val);
575 }
576
577 static int
mt7531_ind_c45_phy_read(struct mt7530_priv * priv,int port,int devad,int regnum)578 mt7531_ind_c45_phy_read(struct mt7530_priv *priv, int port, int devad,
579 int regnum)
580 {
581 struct mt7530_dummy_poll p;
582 u32 reg, val;
583 int ret;
584
585 INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
586
587 mt7530_mutex_lock(priv);
588
589 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
590 !(val & MT7531_PHY_ACS_ST), 20, 100000);
591 if (ret < 0) {
592 dev_err(priv->dev, "poll timeout\n");
593 goto out;
594 }
595
596 reg = MT7531_MDIO_CL45_ADDR | MT7531_MDIO_PHY_ADDR(port) |
597 MT7531_MDIO_DEV_ADDR(devad) | regnum;
598 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
599
600 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
601 !(val & MT7531_PHY_ACS_ST), 20, 100000);
602 if (ret < 0) {
603 dev_err(priv->dev, "poll timeout\n");
604 goto out;
605 }
606
607 reg = MT7531_MDIO_CL45_READ | MT7531_MDIO_PHY_ADDR(port) |
608 MT7531_MDIO_DEV_ADDR(devad);
609 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
610
611 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
612 !(val & MT7531_PHY_ACS_ST), 20, 100000);
613 if (ret < 0) {
614 dev_err(priv->dev, "poll timeout\n");
615 goto out;
616 }
617
618 ret = val & MT7531_MDIO_RW_DATA_MASK;
619 out:
620 mt7530_mutex_unlock(priv);
621
622 return ret;
623 }
624
625 static int
mt7531_ind_c45_phy_write(struct mt7530_priv * priv,int port,int devad,int regnum,u16 data)626 mt7531_ind_c45_phy_write(struct mt7530_priv *priv, int port, int devad,
627 int regnum, u16 data)
628 {
629 struct mt7530_dummy_poll p;
630 u32 val, reg;
631 int ret;
632
633 INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
634
635 mt7530_mutex_lock(priv);
636
637 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
638 !(val & MT7531_PHY_ACS_ST), 20, 100000);
639 if (ret < 0) {
640 dev_err(priv->dev, "poll timeout\n");
641 goto out;
642 }
643
644 reg = MT7531_MDIO_CL45_ADDR | MT7531_MDIO_PHY_ADDR(port) |
645 MT7531_MDIO_DEV_ADDR(devad) | regnum;
646 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
647
648 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
649 !(val & MT7531_PHY_ACS_ST), 20, 100000);
650 if (ret < 0) {
651 dev_err(priv->dev, "poll timeout\n");
652 goto out;
653 }
654
655 reg = MT7531_MDIO_CL45_WRITE | MT7531_MDIO_PHY_ADDR(port) |
656 MT7531_MDIO_DEV_ADDR(devad) | data;
657 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
658
659 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
660 !(val & MT7531_PHY_ACS_ST), 20, 100000);
661 if (ret < 0) {
662 dev_err(priv->dev, "poll timeout\n");
663 goto out;
664 }
665
666 out:
667 mt7530_mutex_unlock(priv);
668
669 return ret;
670 }
671
672 static int
mt7531_ind_c22_phy_read(struct mt7530_priv * priv,int port,int regnum)673 mt7531_ind_c22_phy_read(struct mt7530_priv *priv, int port, int regnum)
674 {
675 struct mt7530_dummy_poll p;
676 int ret;
677 u32 val;
678
679 INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
680
681 mt7530_mutex_lock(priv);
682
683 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
684 !(val & MT7531_PHY_ACS_ST), 20, 100000);
685 if (ret < 0) {
686 dev_err(priv->dev, "poll timeout\n");
687 goto out;
688 }
689
690 val = MT7531_MDIO_CL22_READ | MT7531_MDIO_PHY_ADDR(port) |
691 MT7531_MDIO_REG_ADDR(regnum);
692
693 mt7530_mii_write(priv, MT7531_PHY_IAC, val | MT7531_PHY_ACS_ST);
694
695 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
696 !(val & MT7531_PHY_ACS_ST), 20, 100000);
697 if (ret < 0) {
698 dev_err(priv->dev, "poll timeout\n");
699 goto out;
700 }
701
702 ret = val & MT7531_MDIO_RW_DATA_MASK;
703 out:
704 mt7530_mutex_unlock(priv);
705
706 return ret;
707 }
708
709 static int
mt7531_ind_c22_phy_write(struct mt7530_priv * priv,int port,int regnum,u16 data)710 mt7531_ind_c22_phy_write(struct mt7530_priv *priv, int port, int regnum,
711 u16 data)
712 {
713 struct mt7530_dummy_poll p;
714 int ret;
715 u32 reg;
716
717 INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
718
719 mt7530_mutex_lock(priv);
720
721 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, reg,
722 !(reg & MT7531_PHY_ACS_ST), 20, 100000);
723 if (ret < 0) {
724 dev_err(priv->dev, "poll timeout\n");
725 goto out;
726 }
727
728 reg = MT7531_MDIO_CL22_WRITE | MT7531_MDIO_PHY_ADDR(port) |
729 MT7531_MDIO_REG_ADDR(regnum) | data;
730
731 mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
732
733 ret = readx_poll_timeout(_mt7530_unlocked_read, &p, reg,
734 !(reg & MT7531_PHY_ACS_ST), 20, 100000);
735 if (ret < 0) {
736 dev_err(priv->dev, "poll timeout\n");
737 goto out;
738 }
739
740 out:
741 mt7530_mutex_unlock(priv);
742
743 return ret;
744 }
745
746 static int
mt753x_phy_read_c22(struct mii_bus * bus,int port,int regnum)747 mt753x_phy_read_c22(struct mii_bus *bus, int port, int regnum)
748 {
749 struct mt7530_priv *priv = bus->priv;
750
751 return priv->info->phy_read_c22(priv, port, regnum);
752 }
753
754 static int
mt753x_phy_read_c45(struct mii_bus * bus,int port,int devad,int regnum)755 mt753x_phy_read_c45(struct mii_bus *bus, int port, int devad, int regnum)
756 {
757 struct mt7530_priv *priv = bus->priv;
758
759 return priv->info->phy_read_c45(priv, port, devad, regnum);
760 }
761
762 static int
mt753x_phy_write_c22(struct mii_bus * bus,int port,int regnum,u16 val)763 mt753x_phy_write_c22(struct mii_bus *bus, int port, int regnum, u16 val)
764 {
765 struct mt7530_priv *priv = bus->priv;
766
767 return priv->info->phy_write_c22(priv, port, regnum, val);
768 }
769
770 static int
mt753x_phy_write_c45(struct mii_bus * bus,int port,int devad,int regnum,u16 val)771 mt753x_phy_write_c45(struct mii_bus *bus, int port, int devad, int regnum,
772 u16 val)
773 {
774 struct mt7530_priv *priv = bus->priv;
775
776 return priv->info->phy_write_c45(priv, port, devad, regnum, val);
777 }
778
779 static void
mt7530_get_strings(struct dsa_switch * ds,int port,u32 stringset,uint8_t * data)780 mt7530_get_strings(struct dsa_switch *ds, int port, u32 stringset,
781 uint8_t *data)
782 {
783 int i;
784
785 if (stringset != ETH_SS_STATS)
786 return;
787
788 for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++)
789 ethtool_puts(&data, mt7530_mib[i].name);
790 }
791
792 static void
mt7530_get_ethtool_stats(struct dsa_switch * ds,int port,uint64_t * data)793 mt7530_get_ethtool_stats(struct dsa_switch *ds, int port,
794 uint64_t *data)
795 {
796 struct mt7530_priv *priv = ds->priv;
797 const struct mt7530_mib_desc *mib;
798 u32 reg, i;
799 u64 hi;
800
801 for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++) {
802 mib = &mt7530_mib[i];
803 reg = MT7530_PORT_MIB_COUNTER(port) + mib->offset;
804
805 data[i] = mt7530_read(priv, reg);
806 if (mib->size == 2) {
807 hi = mt7530_read(priv, reg + 4);
808 data[i] |= hi << 32;
809 }
810 }
811 }
812
813 static int
mt7530_get_sset_count(struct dsa_switch * ds,int port,int sset)814 mt7530_get_sset_count(struct dsa_switch *ds, int port, int sset)
815 {
816 if (sset != ETH_SS_STATS)
817 return 0;
818
819 return ARRAY_SIZE(mt7530_mib);
820 }
821
822 static int
mt7530_set_ageing_time(struct dsa_switch * ds,unsigned int msecs)823 mt7530_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
824 {
825 struct mt7530_priv *priv = ds->priv;
826 unsigned int secs = msecs / 1000;
827 unsigned int tmp_age_count;
828 unsigned int error = -1;
829 unsigned int age_count;
830 unsigned int age_unit;
831
832 /* Applied timer is (AGE_CNT + 1) * (AGE_UNIT + 1) seconds */
833 if (secs < 1 || secs > (AGE_CNT_MAX + 1) * (AGE_UNIT_MAX + 1))
834 return -ERANGE;
835
836 /* iterate through all possible age_count to find the closest pair */
837 for (tmp_age_count = 0; tmp_age_count <= AGE_CNT_MAX; ++tmp_age_count) {
838 unsigned int tmp_age_unit = secs / (tmp_age_count + 1) - 1;
839
840 if (tmp_age_unit <= AGE_UNIT_MAX) {
841 unsigned int tmp_error = secs -
842 (tmp_age_count + 1) * (tmp_age_unit + 1);
843
844 /* found a closer pair */
845 if (error > tmp_error) {
846 error = tmp_error;
847 age_count = tmp_age_count;
848 age_unit = tmp_age_unit;
849 }
850
851 /* found the exact match, so break the loop */
852 if (!error)
853 break;
854 }
855 }
856
857 mt7530_write(priv, MT7530_AAC, AGE_CNT(age_count) | AGE_UNIT(age_unit));
858
859 return 0;
860 }
861
mt7530_p5_mode_str(unsigned int mode)862 static const char *mt7530_p5_mode_str(unsigned int mode)
863 {
864 switch (mode) {
865 case MUX_PHY_P0:
866 return "MUX PHY P0";
867 case MUX_PHY_P4:
868 return "MUX PHY P4";
869 default:
870 return "GMAC5";
871 }
872 }
873
mt7530_setup_port5(struct dsa_switch * ds,phy_interface_t interface)874 static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
875 {
876 struct mt7530_priv *priv = ds->priv;
877 u8 tx_delay = 0;
878 int val;
879
880 mutex_lock(&priv->reg_mutex);
881
882 val = mt7530_read(priv, MT753X_MTRAP);
883
884 val &= ~MT7530_P5_PHY0_SEL & ~MT7530_P5_MAC_SEL & ~MT7530_P5_RGMII_MODE;
885
886 switch (priv->p5_mode) {
887 /* MUX_PHY_P0: P0 -> P5 -> SoC MAC */
888 case MUX_PHY_P0:
889 val |= MT7530_P5_PHY0_SEL;
890 fallthrough;
891
892 /* MUX_PHY_P4: P4 -> P5 -> SoC MAC */
893 case MUX_PHY_P4:
894 /* Setup the MAC by default for the cpu port */
895 mt7530_write(priv, MT753X_PMCR_P(5), 0x56300);
896 break;
897
898 /* GMAC5: P5 -> SoC MAC or external PHY */
899 default:
900 val |= MT7530_P5_MAC_SEL;
901 break;
902 }
903
904 /* Setup RGMII settings */
905 if (phy_interface_mode_is_rgmii(interface)) {
906 val |= MT7530_P5_RGMII_MODE;
907
908 /* P5 RGMII RX Clock Control: delay setting for 1000M */
909 mt7530_write(priv, MT7530_P5RGMIIRXCR, CSR_RGMII_EDGE_ALIGN);
910
911 /* Don't set delay in DSA mode */
912 if (!dsa_is_dsa_port(priv->ds, 5) &&
913 (interface == PHY_INTERFACE_MODE_RGMII_TXID ||
914 interface == PHY_INTERFACE_MODE_RGMII_ID))
915 tx_delay = 4; /* n * 0.5 ns */
916
917 /* P5 RGMII TX Clock Control: delay x */
918 mt7530_write(priv, MT7530_P5RGMIITXCR,
919 CSR_RGMII_TXC_CFG(0x10 + tx_delay));
920
921 /* reduce P5 RGMII Tx driving, 8mA */
922 mt7530_write(priv, MT7530_IO_DRV_CR,
923 P5_IO_CLK_DRV(1) | P5_IO_DATA_DRV(1));
924 }
925
926 mt7530_write(priv, MT753X_MTRAP, val);
927
928 dev_dbg(ds->dev, "Setup P5, HWTRAP=0x%x, mode=%s, phy-mode=%s\n", val,
929 mt7530_p5_mode_str(priv->p5_mode), phy_modes(interface));
930
931 mutex_unlock(&priv->reg_mutex);
932 }
933
934 /* In Clause 5 of IEEE Std 802-2014, two sublayers of the data link layer (DLL)
935 * of the Open Systems Interconnection basic reference model (OSI/RM) are
936 * described; the medium access control (MAC) and logical link control (LLC)
937 * sublayers. The MAC sublayer is the one facing the physical layer.
938 *
939 * In 8.2 of IEEE Std 802.1Q-2022, the Bridge architecture is described. A
940 * Bridge component comprises a MAC Relay Entity for interconnecting the Ports
941 * of the Bridge, at least two Ports, and higher layer entities with at least a
942 * Spanning Tree Protocol Entity included.
943 *
944 * Each Bridge Port also functions as an end station and shall provide the MAC
945 * Service to an LLC Entity. Each instance of the MAC Service is provided to a
946 * distinct LLC Entity that supports protocol identification, multiplexing, and
947 * demultiplexing, for protocol data unit (PDU) transmission and reception by
948 * one or more higher layer entities.
949 *
950 * It is described in 8.13.9 of IEEE Std 802.1Q-2022 that in a Bridge, the LLC
951 * Entity associated with each Bridge Port is modeled as being directly
952 * connected to the attached Local Area Network (LAN).
953 *
954 * On the switch with CPU port architecture, CPU port functions as Management
955 * Port, and the Management Port functionality is provided by software which
956 * functions as an end station. Software is connected to an IEEE 802 LAN that is
957 * wholly contained within the system that incorporates the Bridge. Software
958 * provides access to the LLC Entity associated with each Bridge Port by the
959 * value of the source port field on the special tag on the frame received by
960 * software.
961 *
962 * We call frames that carry control information to determine the active
963 * topology and current extent of each Virtual Local Area Network (VLAN), i.e.,
964 * spanning tree or Shortest Path Bridging (SPB) and Multiple VLAN Registration
965 * Protocol Data Units (MVRPDUs), and frames from other link constrained
966 * protocols, such as Extensible Authentication Protocol over LAN (EAPOL) and
967 * Link Layer Discovery Protocol (LLDP), link-local frames. They are not
968 * forwarded by a Bridge. Permanently configured entries in the filtering
969 * database (FDB) ensure that such frames are discarded by the Forwarding
970 * Process. In 8.6.3 of IEEE Std 802.1Q-2022, this is described in detail:
971 *
972 * Each of the reserved MAC addresses specified in Table 8-1
973 * (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]) shall be
974 * permanently configured in the FDB in C-VLAN components and ERs.
975 *
976 * Each of the reserved MAC addresses specified in Table 8-2
977 * (01-80-C2-00-00-[01,02,03,04,05,06,07,08,09,0A,0E]) shall be permanently
978 * configured in the FDB in S-VLAN components.
979 *
980 * Each of the reserved MAC addresses specified in Table 8-3
981 * (01-80-C2-00-00-[01,02,04,0E]) shall be permanently configured in the FDB in
982 * TPMR components.
983 *
984 * The FDB entries for reserved MAC addresses shall specify filtering for all
985 * Bridge Ports and all VIDs. Management shall not provide the capability to
986 * modify or remove entries for reserved MAC addresses.
987 *
988 * The addresses in Table 8-1, Table 8-2, and Table 8-3 determine the scope of
989 * propagation of PDUs within a Bridged Network, as follows:
990 *
991 * The Nearest Bridge group address (01-80-C2-00-00-0E) is an address that no
992 * conformant Two-Port MAC Relay (TPMR) component, Service VLAN (S-VLAN)
993 * component, Customer VLAN (C-VLAN) component, or MAC Bridge can forward.
994 * PDUs transmitted using this destination address, or any other addresses
995 * that appear in Table 8-1, Table 8-2, and Table 8-3
996 * (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]), can
997 * therefore travel no further than those stations that can be reached via a
998 * single individual LAN from the originating station.
999 *
1000 * The Nearest non-TPMR Bridge group address (01-80-C2-00-00-03), is an
1001 * address that no conformant S-VLAN component, C-VLAN component, or MAC
1002 * Bridge can forward; however, this address is relayed by a TPMR component.
1003 * PDUs using this destination address, or any of the other addresses that
1004 * appear in both Table 8-1 and Table 8-2 but not in Table 8-3
1005 * (01-80-C2-00-00-[00,03,05,06,07,08,09,0A,0B,0C,0D,0F]), will be relayed by
1006 * any TPMRs but will propagate no further than the nearest S-VLAN component,
1007 * C-VLAN component, or MAC Bridge.
1008 *
1009 * The Nearest Customer Bridge group address (01-80-C2-00-00-00) is an address
1010 * that no conformant C-VLAN component, MAC Bridge can forward; however, it is
1011 * relayed by TPMR components and S-VLAN components. PDUs using this
1012 * destination address, or any of the other addresses that appear in Table 8-1
1013 * but not in either Table 8-2 or Table 8-3 (01-80-C2-00-00-[00,0B,0C,0D,0F]),
1014 * will be relayed by TPMR components and S-VLAN components but will propagate
1015 * no further than the nearest C-VLAN component or MAC Bridge.
1016 *
1017 * Because the LLC Entity associated with each Bridge Port is provided via CPU
1018 * port, we must not filter these frames but forward them to CPU port.
1019 *
1020 * In a Bridge, the transmission Port is majorly decided by ingress and egress
1021 * rules, FDB, and spanning tree Port State functions of the Forwarding Process.
1022 * For link-local frames, only CPU port should be designated as destination port
1023 * in the FDB, and the other functions of the Forwarding Process must not
1024 * interfere with the decision of the transmission Port. We call this process
1025 * trapping frames to CPU port.
1026 *
1027 * Therefore, on the switch with CPU port architecture, link-local frames must
1028 * be trapped to CPU port, and certain link-local frames received by a Port of a
1029 * Bridge comprising a TPMR component or an S-VLAN component must be excluded
1030 * from it.
1031 *
1032 * A Bridge of the switch with CPU port architecture cannot comprise a Two-Port
1033 * MAC Relay (TPMR) component as a TPMR component supports only a subset of the
1034 * functionality of a MAC Bridge. A Bridge comprising two Ports (Management Port
1035 * doesn't count) of this architecture will either function as a standard MAC
1036 * Bridge or a standard VLAN Bridge.
1037 *
1038 * Therefore, a Bridge of this architecture can only comprise S-VLAN components,
1039 * C-VLAN components, or MAC Bridge components. Since there's no TPMR component,
1040 * we don't need to relay PDUs using the destination addresses specified on the
1041 * Nearest non-TPMR section, and the proportion of the Nearest Customer Bridge
1042 * section where they must be relayed by TPMR components.
1043 *
1044 * One option to trap link-local frames to CPU port is to add static FDB entries
1045 * with CPU port designated as destination port. However, because that
1046 * Independent VLAN Learning (IVL) is being used on every VID, each entry only
1047 * applies to a single VLAN Identifier (VID). For a Bridge comprising a MAC
1048 * Bridge component or a C-VLAN component, there would have to be 16 times 4096
1049 * entries. This switch intellectual property can only hold a maximum of 2048
1050 * entries. Using this option, there also isn't a mechanism to prevent
1051 * link-local frames from being discarded when the spanning tree Port State of
1052 * the reception Port is discarding.
1053 *
1054 * The remaining option is to utilise the BPC, RGAC1, RGAC2, RGAC3, and RGAC4
1055 * registers. Whilst this applies to every VID, it doesn't contain all of the
1056 * reserved MAC addresses without affecting the remaining Standard Group MAC
1057 * Addresses. The REV_UN frame tag utilised using the RGAC4 register covers the
1058 * remaining 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F] destination
1059 * addresses. It also includes the 01-80-C2-00-00-22 to 01-80-C2-00-00-FF
1060 * destination addresses which may be relayed by MAC Bridges or VLAN Bridges.
1061 * The latter option provides better but not complete conformance.
1062 *
1063 * This switch intellectual property also does not provide a mechanism to trap
1064 * link-local frames with specific destination addresses to CPU port by Bridge,
1065 * to conform to the filtering rules for the distinct Bridge components.
1066 *
1067 * Therefore, regardless of the type of the Bridge component, link-local frames
1068 * with these destination addresses will be trapped to CPU port:
1069 *
1070 * 01-80-C2-00-00-[00,01,02,03,0E]
1071 *
1072 * In a Bridge comprising a MAC Bridge component or a C-VLAN component:
1073 *
1074 * Link-local frames with these destination addresses won't be trapped to CPU
1075 * port which won't conform to IEEE Std 802.1Q-2022:
1076 *
1077 * 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F]
1078 *
1079 * In a Bridge comprising an S-VLAN component:
1080 *
1081 * Link-local frames with these destination addresses will be trapped to CPU
1082 * port which won't conform to IEEE Std 802.1Q-2022:
1083 *
1084 * 01-80-C2-00-00-00
1085 *
1086 * Link-local frames with these destination addresses won't be trapped to CPU
1087 * port which won't conform to IEEE Std 802.1Q-2022:
1088 *
1089 * 01-80-C2-00-00-[04,05,06,07,08,09,0A]
1090 *
1091 * To trap link-local frames to CPU port as conformant as this switch
1092 * intellectual property can allow, link-local frames are made to be regarded as
1093 * Bridge Protocol Data Units (BPDUs). This is because this switch intellectual
1094 * property only lets the frames regarded as BPDUs bypass the spanning tree Port
1095 * State function of the Forwarding Process.
1096 *
1097 * The only remaining interference is the ingress rules. When the reception Port
1098 * has no PVID assigned on software, VLAN-untagged frames won't be allowed in.
1099 * There doesn't seem to be a mechanism on the switch intellectual property to
1100 * have link-local frames bypass this function of the Forwarding Process.
1101 */
1102 static void
mt753x_trap_frames(struct mt7530_priv * priv)1103 mt753x_trap_frames(struct mt7530_priv *priv)
1104 {
1105 /* Trap 802.1X PAE frames and BPDUs to the CPU port(s) and egress them
1106 * VLAN-untagged.
1107 */
1108 mt7530_rmw(priv, MT753X_BPC,
1109 PAE_BPDU_FR | PAE_EG_TAG_MASK | PAE_PORT_FW_MASK |
1110 BPDU_EG_TAG_MASK | BPDU_PORT_FW_MASK,
1111 PAE_BPDU_FR | PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
1112 PAE_PORT_FW(TO_CPU_FW_CPU_ONLY) |
1113 BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
1114 TO_CPU_FW_CPU_ONLY);
1115
1116 /* Trap frames with :01 and :02 MAC DAs to the CPU port(s) and egress
1117 * them VLAN-untagged.
1118 */
1119 mt7530_rmw(priv, MT753X_RGAC1,
1120 R02_BPDU_FR | R02_EG_TAG_MASK | R02_PORT_FW_MASK |
1121 R01_BPDU_FR | R01_EG_TAG_MASK | R01_PORT_FW_MASK,
1122 R02_BPDU_FR | R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
1123 R02_PORT_FW(TO_CPU_FW_CPU_ONLY) | R01_BPDU_FR |
1124 R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
1125 TO_CPU_FW_CPU_ONLY);
1126
1127 /* Trap frames with :03 and :0E MAC DAs to the CPU port(s) and egress
1128 * them VLAN-untagged.
1129 */
1130 mt7530_rmw(priv, MT753X_RGAC2,
1131 R0E_BPDU_FR | R0E_EG_TAG_MASK | R0E_PORT_FW_MASK |
1132 R03_BPDU_FR | R03_EG_TAG_MASK | R03_PORT_FW_MASK,
1133 R0E_BPDU_FR | R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
1134 R0E_PORT_FW(TO_CPU_FW_CPU_ONLY) | R03_BPDU_FR |
1135 R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
1136 TO_CPU_FW_CPU_ONLY);
1137 }
1138
1139 static void
mt753x_cpu_port_enable(struct dsa_switch * ds,int port)1140 mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
1141 {
1142 struct mt7530_priv *priv = ds->priv;
1143
1144 /* Enable Mediatek header mode on the cpu port */
1145 mt7530_write(priv, MT7530_PVC_P(port),
1146 PORT_SPEC_TAG);
1147
1148 /* Enable flooding on the CPU port */
1149 mt7530_set(priv, MT753X_MFC, BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) |
1150 UNU_FFP(BIT(port)));
1151
1152 /* Add the CPU port to the CPU port bitmap for MT7531 and the switch on
1153 * the MT7988 SoC. Trapped frames will be forwarded to the CPU port that
1154 * is affine to the inbound user port.
1155 */
1156 if (priv->id == ID_MT7531 || priv->id == ID_MT7988 ||
1157 priv->id == ID_EN7581)
1158 mt7530_set(priv, MT7531_CFC, MT7531_CPU_PMAP(BIT(port)));
1159
1160 /* CPU port gets connected to all user ports of
1161 * the switch.
1162 */
1163 mt7530_write(priv, MT7530_PCR_P(port),
1164 PCR_MATRIX(dsa_user_ports(priv->ds)));
1165
1166 /* Set to fallback mode for independent VLAN learning */
1167 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
1168 MT7530_PORT_FALLBACK_MODE);
1169 }
1170
1171 static int
mt7530_port_enable(struct dsa_switch * ds,int port,struct phy_device * phy)1172 mt7530_port_enable(struct dsa_switch *ds, int port,
1173 struct phy_device *phy)
1174 {
1175 struct dsa_port *dp = dsa_to_port(ds, port);
1176 struct mt7530_priv *priv = ds->priv;
1177
1178 mutex_lock(&priv->reg_mutex);
1179
1180 /* Allow the user port gets connected to the cpu port and also
1181 * restore the port matrix if the port is the member of a certain
1182 * bridge.
1183 */
1184 if (dsa_port_is_user(dp)) {
1185 struct dsa_port *cpu_dp = dp->cpu_dp;
1186
1187 priv->ports[port].pm |= PCR_MATRIX(BIT(cpu_dp->index));
1188 }
1189 priv->ports[port].enable = true;
1190 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
1191 priv->ports[port].pm);
1192
1193 mutex_unlock(&priv->reg_mutex);
1194
1195 if (priv->id != ID_MT7530 && priv->id != ID_MT7621)
1196 return 0;
1197
1198 if (port == 5)
1199 mt7530_clear(priv, MT753X_MTRAP, MT7530_P5_DIS);
1200 else if (port == 6)
1201 mt7530_clear(priv, MT753X_MTRAP, MT7530_P6_DIS);
1202
1203 return 0;
1204 }
1205
1206 static void
mt7530_port_disable(struct dsa_switch * ds,int port)1207 mt7530_port_disable(struct dsa_switch *ds, int port)
1208 {
1209 struct mt7530_priv *priv = ds->priv;
1210
1211 mutex_lock(&priv->reg_mutex);
1212
1213 /* Clear up all port matrix which could be restored in the next
1214 * enablement for the port.
1215 */
1216 priv->ports[port].enable = false;
1217 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
1218 PCR_MATRIX_CLR);
1219
1220 mutex_unlock(&priv->reg_mutex);
1221
1222 if (priv->id != ID_MT7530 && priv->id != ID_MT7621)
1223 return;
1224
1225 /* Do not set MT7530_P5_DIS when port 5 is being used for PHY muxing. */
1226 if (port == 5 && priv->p5_mode == GMAC5)
1227 mt7530_set(priv, MT753X_MTRAP, MT7530_P5_DIS);
1228 else if (port == 6)
1229 mt7530_set(priv, MT753X_MTRAP, MT7530_P6_DIS);
1230 }
1231
1232 static int
mt7530_port_change_mtu(struct dsa_switch * ds,int port,int new_mtu)1233 mt7530_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
1234 {
1235 struct mt7530_priv *priv = ds->priv;
1236 int length;
1237 u32 val;
1238
1239 /* When a new MTU is set, DSA always set the CPU port's MTU to the
1240 * largest MTU of the user ports. Because the switch only has a global
1241 * RX length register, only allowing CPU port here is enough.
1242 */
1243 if (!dsa_is_cpu_port(ds, port))
1244 return 0;
1245
1246 mt7530_mutex_lock(priv);
1247
1248 val = mt7530_mii_read(priv, MT7530_GMACCR);
1249 val &= ~MAX_RX_PKT_LEN_MASK;
1250
1251 /* RX length also includes Ethernet header, MTK tag, and FCS length */
1252 length = new_mtu + ETH_HLEN + MTK_HDR_LEN + ETH_FCS_LEN;
1253 if (length <= 1522) {
1254 val |= MAX_RX_PKT_LEN_1522;
1255 } else if (length <= 1536) {
1256 val |= MAX_RX_PKT_LEN_1536;
1257 } else if (length <= 1552) {
1258 val |= MAX_RX_PKT_LEN_1552;
1259 } else {
1260 val &= ~MAX_RX_JUMBO_MASK;
1261 val |= MAX_RX_JUMBO(DIV_ROUND_UP(length, 1024));
1262 val |= MAX_RX_PKT_LEN_JUMBO;
1263 }
1264
1265 mt7530_mii_write(priv, MT7530_GMACCR, val);
1266
1267 mt7530_mutex_unlock(priv);
1268
1269 return 0;
1270 }
1271
1272 static int
mt7530_port_max_mtu(struct dsa_switch * ds,int port)1273 mt7530_port_max_mtu(struct dsa_switch *ds, int port)
1274 {
1275 return MT7530_MAX_MTU;
1276 }
1277
1278 static void
mt7530_stp_state_set(struct dsa_switch * ds,int port,u8 state)1279 mt7530_stp_state_set(struct dsa_switch *ds, int port, u8 state)
1280 {
1281 struct mt7530_priv *priv = ds->priv;
1282 u32 stp_state;
1283
1284 switch (state) {
1285 case BR_STATE_DISABLED:
1286 stp_state = MT7530_STP_DISABLED;
1287 break;
1288 case BR_STATE_BLOCKING:
1289 stp_state = MT7530_STP_BLOCKING;
1290 break;
1291 case BR_STATE_LISTENING:
1292 stp_state = MT7530_STP_LISTENING;
1293 break;
1294 case BR_STATE_LEARNING:
1295 stp_state = MT7530_STP_LEARNING;
1296 break;
1297 case BR_STATE_FORWARDING:
1298 default:
1299 stp_state = MT7530_STP_FORWARDING;
1300 break;
1301 }
1302
1303 mt7530_rmw(priv, MT7530_SSP_P(port), FID_PST_MASK(FID_BRIDGED),
1304 FID_PST(FID_BRIDGED, stp_state));
1305 }
1306
mt7530_update_port_member(struct mt7530_priv * priv,int port,const struct net_device * bridge_dev,bool join)1307 static void mt7530_update_port_member(struct mt7530_priv *priv, int port,
1308 const struct net_device *bridge_dev,
1309 bool join) __must_hold(&priv->reg_mutex)
1310 {
1311 struct dsa_port *dp = dsa_to_port(priv->ds, port), *other_dp;
1312 struct mt7530_port *p = &priv->ports[port], *other_p;
1313 struct dsa_port *cpu_dp = dp->cpu_dp;
1314 u32 port_bitmap = BIT(cpu_dp->index);
1315 int other_port;
1316 bool isolated;
1317
1318 dsa_switch_for_each_user_port(other_dp, priv->ds) {
1319 other_port = other_dp->index;
1320 other_p = &priv->ports[other_port];
1321
1322 if (dp == other_dp)
1323 continue;
1324
1325 /* Add/remove this port to/from the port matrix of the other
1326 * ports in the same bridge. If the port is disabled, port
1327 * matrix is kept and not being setup until the port becomes
1328 * enabled.
1329 */
1330 if (!dsa_port_offloads_bridge_dev(other_dp, bridge_dev))
1331 continue;
1332
1333 isolated = p->isolated && other_p->isolated;
1334
1335 if (join && !isolated) {
1336 other_p->pm |= PCR_MATRIX(BIT(port));
1337 port_bitmap |= BIT(other_port);
1338 } else {
1339 other_p->pm &= ~PCR_MATRIX(BIT(port));
1340 }
1341
1342 if (other_p->enable)
1343 mt7530_rmw(priv, MT7530_PCR_P(other_port),
1344 PCR_MATRIX_MASK, other_p->pm);
1345 }
1346
1347 /* Add/remove the all other ports to this port matrix. For !join
1348 * (leaving the bridge), only the CPU port will remain in the port matrix
1349 * of this port.
1350 */
1351 p->pm = PCR_MATRIX(port_bitmap);
1352 if (priv->ports[port].enable)
1353 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK, p->pm);
1354 }
1355
1356 static int
mt7530_port_pre_bridge_flags(struct dsa_switch * ds,int port,struct switchdev_brport_flags flags,struct netlink_ext_ack * extack)1357 mt7530_port_pre_bridge_flags(struct dsa_switch *ds, int port,
1358 struct switchdev_brport_flags flags,
1359 struct netlink_ext_ack *extack)
1360 {
1361 if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
1362 BR_BCAST_FLOOD | BR_ISOLATED))
1363 return -EINVAL;
1364
1365 return 0;
1366 }
1367
1368 static int
mt7530_port_bridge_flags(struct dsa_switch * ds,int port,struct switchdev_brport_flags flags,struct netlink_ext_ack * extack)1369 mt7530_port_bridge_flags(struct dsa_switch *ds, int port,
1370 struct switchdev_brport_flags flags,
1371 struct netlink_ext_ack *extack)
1372 {
1373 struct mt7530_priv *priv = ds->priv;
1374
1375 if (flags.mask & BR_LEARNING)
1376 mt7530_rmw(priv, MT7530_PSC_P(port), SA_DIS,
1377 flags.val & BR_LEARNING ? 0 : SA_DIS);
1378
1379 if (flags.mask & BR_FLOOD)
1380 mt7530_rmw(priv, MT753X_MFC, UNU_FFP(BIT(port)),
1381 flags.val & BR_FLOOD ? UNU_FFP(BIT(port)) : 0);
1382
1383 if (flags.mask & BR_MCAST_FLOOD)
1384 mt7530_rmw(priv, MT753X_MFC, UNM_FFP(BIT(port)),
1385 flags.val & BR_MCAST_FLOOD ? UNM_FFP(BIT(port)) : 0);
1386
1387 if (flags.mask & BR_BCAST_FLOOD)
1388 mt7530_rmw(priv, MT753X_MFC, BC_FFP(BIT(port)),
1389 flags.val & BR_BCAST_FLOOD ? BC_FFP(BIT(port)) : 0);
1390
1391 if (flags.mask & BR_ISOLATED) {
1392 struct dsa_port *dp = dsa_to_port(ds, port);
1393 struct net_device *bridge_dev = dsa_port_bridge_dev_get(dp);
1394
1395 priv->ports[port].isolated = !!(flags.val & BR_ISOLATED);
1396
1397 mutex_lock(&priv->reg_mutex);
1398 mt7530_update_port_member(priv, port, bridge_dev, true);
1399 mutex_unlock(&priv->reg_mutex);
1400 }
1401
1402 return 0;
1403 }
1404
1405 static int
mt7530_port_bridge_join(struct dsa_switch * ds,int port,struct dsa_bridge bridge,bool * tx_fwd_offload,struct netlink_ext_ack * extack)1406 mt7530_port_bridge_join(struct dsa_switch *ds, int port,
1407 struct dsa_bridge bridge, bool *tx_fwd_offload,
1408 struct netlink_ext_ack *extack)
1409 {
1410 struct mt7530_priv *priv = ds->priv;
1411
1412 mutex_lock(&priv->reg_mutex);
1413
1414 mt7530_update_port_member(priv, port, bridge.dev, true);
1415
1416 /* Set to fallback mode for independent VLAN learning */
1417 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
1418 MT7530_PORT_FALLBACK_MODE);
1419
1420 mutex_unlock(&priv->reg_mutex);
1421
1422 return 0;
1423 }
1424
1425 static void
mt7530_port_set_vlan_unaware(struct dsa_switch * ds,int port)1426 mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
1427 {
1428 struct mt7530_priv *priv = ds->priv;
1429 bool all_user_ports_removed = true;
1430 int i;
1431
1432 /* This is called after .port_bridge_leave when leaving a VLAN-aware
1433 * bridge. Don't set standalone ports to fallback mode.
1434 */
1435 if (dsa_port_bridge_dev_get(dsa_to_port(ds, port)))
1436 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
1437 MT7530_PORT_FALLBACK_MODE);
1438
1439 mt7530_rmw(priv, MT7530_PVC_P(port),
1440 VLAN_ATTR_MASK | PVC_EG_TAG_MASK | ACC_FRM_MASK,
1441 VLAN_ATTR(MT7530_VLAN_TRANSPARENT) |
1442 PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT) |
1443 MT7530_VLAN_ACC_ALL);
1444
1445 /* Set PVID to 0 */
1446 mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
1447 G0_PORT_VID_DEF);
1448
1449 for (i = 0; i < priv->ds->num_ports; i++) {
1450 if (dsa_is_user_port(ds, i) &&
1451 dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) {
1452 all_user_ports_removed = false;
1453 break;
1454 }
1455 }
1456
1457 /* CPU port also does the same thing until all user ports belonging to
1458 * the CPU port get out of VLAN filtering mode.
1459 */
1460 if (all_user_ports_removed) {
1461 struct dsa_port *dp = dsa_to_port(ds, port);
1462 struct dsa_port *cpu_dp = dp->cpu_dp;
1463
1464 mt7530_write(priv, MT7530_PCR_P(cpu_dp->index),
1465 PCR_MATRIX(dsa_user_ports(priv->ds)));
1466 mt7530_write(priv, MT7530_PVC_P(cpu_dp->index), PORT_SPEC_TAG
1467 | PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
1468 }
1469 }
1470
1471 static void
mt7530_port_set_vlan_aware(struct dsa_switch * ds,int port)1472 mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
1473 {
1474 struct mt7530_priv *priv = ds->priv;
1475
1476 /* Trapped into security mode allows packet forwarding through VLAN
1477 * table lookup.
1478 */
1479 if (dsa_is_user_port(ds, port)) {
1480 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
1481 MT7530_PORT_SECURITY_MODE);
1482 mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
1483 G0_PORT_VID(priv->ports[port].pvid));
1484
1485 /* Only accept tagged frames if PVID is not set */
1486 if (!priv->ports[port].pvid)
1487 mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
1488 MT7530_VLAN_ACC_TAGGED);
1489
1490 /* Set the port as a user port which is to be able to recognize
1491 * VID from incoming packets before fetching entry within the
1492 * VLAN table.
1493 */
1494 mt7530_rmw(priv, MT7530_PVC_P(port),
1495 VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
1496 VLAN_ATTR(MT7530_VLAN_USER) |
1497 PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
1498 } else {
1499 /* Also set CPU ports to the "user" VLAN port attribute, to
1500 * allow VLAN classification, but keep the EG_TAG attribute as
1501 * "consistent" (i.o.w. don't change its value) for packets
1502 * received by the switch from the CPU, so that tagged packets
1503 * are forwarded to user ports as tagged, and untagged as
1504 * untagged.
1505 */
1506 mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
1507 VLAN_ATTR(MT7530_VLAN_USER));
1508 }
1509 }
1510
1511 static void
mt7530_port_bridge_leave(struct dsa_switch * ds,int port,struct dsa_bridge bridge)1512 mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
1513 struct dsa_bridge bridge)
1514 {
1515 struct mt7530_priv *priv = ds->priv;
1516
1517 mutex_lock(&priv->reg_mutex);
1518
1519 mt7530_update_port_member(priv, port, bridge.dev, false);
1520
1521 /* When a port is removed from the bridge, the port would be set up
1522 * back to the default as is at initial boot which is a VLAN-unaware
1523 * port.
1524 */
1525 mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
1526 MT7530_PORT_MATRIX_MODE);
1527
1528 mutex_unlock(&priv->reg_mutex);
1529 }
1530
1531 static int
mt7530_port_fdb_add(struct dsa_switch * ds,int port,const unsigned char * addr,u16 vid,struct dsa_db db)1532 mt7530_port_fdb_add(struct dsa_switch *ds, int port,
1533 const unsigned char *addr, u16 vid,
1534 struct dsa_db db)
1535 {
1536 struct mt7530_priv *priv = ds->priv;
1537 int ret;
1538 u8 port_mask = BIT(port);
1539
1540 mutex_lock(&priv->reg_mutex);
1541 mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_ENT);
1542 ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL);
1543 mutex_unlock(&priv->reg_mutex);
1544
1545 return ret;
1546 }
1547
1548 static int
mt7530_port_fdb_del(struct dsa_switch * ds,int port,const unsigned char * addr,u16 vid,struct dsa_db db)1549 mt7530_port_fdb_del(struct dsa_switch *ds, int port,
1550 const unsigned char *addr, u16 vid,
1551 struct dsa_db db)
1552 {
1553 struct mt7530_priv *priv = ds->priv;
1554 int ret;
1555 u8 port_mask = BIT(port);
1556
1557 mutex_lock(&priv->reg_mutex);
1558 mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_EMP);
1559 ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL);
1560 mutex_unlock(&priv->reg_mutex);
1561
1562 return ret;
1563 }
1564
1565 static int
mt7530_port_fdb_dump(struct dsa_switch * ds,int port,dsa_fdb_dump_cb_t * cb,void * data)1566 mt7530_port_fdb_dump(struct dsa_switch *ds, int port,
1567 dsa_fdb_dump_cb_t *cb, void *data)
1568 {
1569 struct mt7530_priv *priv = ds->priv;
1570 struct mt7530_fdb _fdb = { 0 };
1571 int cnt = MT7530_NUM_FDB_RECORDS;
1572 int ret = 0;
1573 u32 rsp = 0;
1574
1575 mutex_lock(&priv->reg_mutex);
1576
1577 ret = mt7530_fdb_cmd(priv, MT7530_FDB_START, &rsp);
1578 if (ret < 0)
1579 goto err;
1580
1581 do {
1582 if (rsp & ATC_SRCH_HIT) {
1583 mt7530_fdb_read(priv, &_fdb);
1584 if (_fdb.port_mask & BIT(port)) {
1585 ret = cb(_fdb.mac, _fdb.vid, _fdb.noarp,
1586 data);
1587 if (ret < 0)
1588 break;
1589 }
1590 }
1591 } while (--cnt &&
1592 !(rsp & ATC_SRCH_END) &&
1593 !mt7530_fdb_cmd(priv, MT7530_FDB_NEXT, &rsp));
1594 err:
1595 mutex_unlock(&priv->reg_mutex);
1596
1597 return 0;
1598 }
1599
1600 static int
mt7530_port_mdb_add(struct dsa_switch * ds,int port,const struct switchdev_obj_port_mdb * mdb,struct dsa_db db)1601 mt7530_port_mdb_add(struct dsa_switch *ds, int port,
1602 const struct switchdev_obj_port_mdb *mdb,
1603 struct dsa_db db)
1604 {
1605 struct mt7530_priv *priv = ds->priv;
1606 const u8 *addr = mdb->addr;
1607 u16 vid = mdb->vid;
1608 u8 port_mask = 0;
1609 int ret;
1610
1611 mutex_lock(&priv->reg_mutex);
1612
1613 mt7530_fdb_write(priv, vid, 0, addr, 0, STATIC_EMP);
1614 if (!mt7530_fdb_cmd(priv, MT7530_FDB_READ, NULL))
1615 port_mask = (mt7530_read(priv, MT7530_ATRD) >> PORT_MAP)
1616 & PORT_MAP_MASK;
1617
1618 port_mask |= BIT(port);
1619 mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_ENT);
1620 ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL);
1621
1622 mutex_unlock(&priv->reg_mutex);
1623
1624 return ret;
1625 }
1626
1627 static int
mt7530_port_mdb_del(struct dsa_switch * ds,int port,const struct switchdev_obj_port_mdb * mdb,struct dsa_db db)1628 mt7530_port_mdb_del(struct dsa_switch *ds, int port,
1629 const struct switchdev_obj_port_mdb *mdb,
1630 struct dsa_db db)
1631 {
1632 struct mt7530_priv *priv = ds->priv;
1633 const u8 *addr = mdb->addr;
1634 u16 vid = mdb->vid;
1635 u8 port_mask = 0;
1636 int ret;
1637
1638 mutex_lock(&priv->reg_mutex);
1639
1640 mt7530_fdb_write(priv, vid, 0, addr, 0, STATIC_EMP);
1641 if (!mt7530_fdb_cmd(priv, MT7530_FDB_READ, NULL))
1642 port_mask = (mt7530_read(priv, MT7530_ATRD) >> PORT_MAP)
1643 & PORT_MAP_MASK;
1644
1645 port_mask &= ~BIT(port);
1646 mt7530_fdb_write(priv, vid, port_mask, addr, -1,
1647 port_mask ? STATIC_ENT : STATIC_EMP);
1648 ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL);
1649
1650 mutex_unlock(&priv->reg_mutex);
1651
1652 return ret;
1653 }
1654
1655 static int
mt7530_vlan_cmd(struct mt7530_priv * priv,enum mt7530_vlan_cmd cmd,u16 vid)1656 mt7530_vlan_cmd(struct mt7530_priv *priv, enum mt7530_vlan_cmd cmd, u16 vid)
1657 {
1658 struct mt7530_dummy_poll p;
1659 u32 val;
1660 int ret;
1661
1662 val = VTCR_BUSY | VTCR_FUNC(cmd) | vid;
1663 mt7530_write(priv, MT7530_VTCR, val);
1664
1665 INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_VTCR);
1666 ret = readx_poll_timeout(_mt7530_read, &p, val,
1667 !(val & VTCR_BUSY), 20, 20000);
1668 if (ret < 0) {
1669 dev_err(priv->dev, "poll timeout\n");
1670 return ret;
1671 }
1672
1673 val = mt7530_read(priv, MT7530_VTCR);
1674 if (val & VTCR_INVALID) {
1675 dev_err(priv->dev, "read VTCR invalid\n");
1676 return -EINVAL;
1677 }
1678
1679 return 0;
1680 }
1681
1682 static int
mt7530_port_vlan_filtering(struct dsa_switch * ds,int port,bool vlan_filtering,struct netlink_ext_ack * extack)1683 mt7530_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
1684 struct netlink_ext_ack *extack)
1685 {
1686 struct dsa_port *dp = dsa_to_port(ds, port);
1687 struct dsa_port *cpu_dp = dp->cpu_dp;
1688
1689 if (vlan_filtering) {
1690 /* The port is being kept as VLAN-unaware port when bridge is
1691 * set up with vlan_filtering not being set, Otherwise, the
1692 * port and the corresponding CPU port is required the setup
1693 * for becoming a VLAN-aware port.
1694 */
1695 mt7530_port_set_vlan_aware(ds, port);
1696 mt7530_port_set_vlan_aware(ds, cpu_dp->index);
1697 } else {
1698 mt7530_port_set_vlan_unaware(ds, port);
1699 }
1700
1701 return 0;
1702 }
1703
1704 static void
mt7530_hw_vlan_add(struct mt7530_priv * priv,struct mt7530_hw_vlan_entry * entry)1705 mt7530_hw_vlan_add(struct mt7530_priv *priv,
1706 struct mt7530_hw_vlan_entry *entry)
1707 {
1708 struct dsa_port *dp = dsa_to_port(priv->ds, entry->port);
1709 u8 new_members;
1710 u32 val;
1711
1712 new_members = entry->old_members | BIT(entry->port);
1713
1714 /* Validate the entry with independent learning, create egress tag per
1715 * VLAN and joining the port as one of the port members.
1716 */
1717 val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) | FID(FID_BRIDGED) |
1718 VLAN_VALID;
1719 mt7530_write(priv, MT7530_VAWD1, val);
1720
1721 /* Decide whether adding tag or not for those outgoing packets from the
1722 * port inside the VLAN.
1723 * CPU port is always taken as a tagged port for serving more than one
1724 * VLANs across and also being applied with egress type stack mode for
1725 * that VLAN tags would be appended after hardware special tag used as
1726 * DSA tag.
1727 */
1728 if (dsa_port_is_cpu(dp))
1729 val = MT7530_VLAN_EGRESS_STACK;
1730 else if (entry->untagged)
1731 val = MT7530_VLAN_EGRESS_UNTAG;
1732 else
1733 val = MT7530_VLAN_EGRESS_TAG;
1734 mt7530_rmw(priv, MT7530_VAWD2,
1735 ETAG_CTRL_P_MASK(entry->port),
1736 ETAG_CTRL_P(entry->port, val));
1737 }
1738
1739 static void
mt7530_hw_vlan_del(struct mt7530_priv * priv,struct mt7530_hw_vlan_entry * entry)1740 mt7530_hw_vlan_del(struct mt7530_priv *priv,
1741 struct mt7530_hw_vlan_entry *entry)
1742 {
1743 u8 new_members;
1744 u32 val;
1745
1746 new_members = entry->old_members & ~BIT(entry->port);
1747
1748 val = mt7530_read(priv, MT7530_VAWD1);
1749 if (!(val & VLAN_VALID)) {
1750 dev_err(priv->dev,
1751 "Cannot be deleted due to invalid entry\n");
1752 return;
1753 }
1754
1755 if (new_members) {
1756 val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) |
1757 VLAN_VALID;
1758 mt7530_write(priv, MT7530_VAWD1, val);
1759 } else {
1760 mt7530_write(priv, MT7530_VAWD1, 0);
1761 mt7530_write(priv, MT7530_VAWD2, 0);
1762 }
1763 }
1764
1765 static void
mt7530_hw_vlan_update(struct mt7530_priv * priv,u16 vid,struct mt7530_hw_vlan_entry * entry,mt7530_vlan_op vlan_op)1766 mt7530_hw_vlan_update(struct mt7530_priv *priv, u16 vid,
1767 struct mt7530_hw_vlan_entry *entry,
1768 mt7530_vlan_op vlan_op)
1769 {
1770 u32 val;
1771
1772 /* Fetch entry */
1773 mt7530_vlan_cmd(priv, MT7530_VTCR_RD_VID, vid);
1774
1775 val = mt7530_read(priv, MT7530_VAWD1);
1776
1777 entry->old_members = (val >> PORT_MEM_SHFT) & PORT_MEM_MASK;
1778
1779 /* Manipulate entry */
1780 vlan_op(priv, entry);
1781
1782 /* Flush result to hardware */
1783 mt7530_vlan_cmd(priv, MT7530_VTCR_WR_VID, vid);
1784 }
1785
1786 static int
mt7530_setup_vlan0(struct mt7530_priv * priv)1787 mt7530_setup_vlan0(struct mt7530_priv *priv)
1788 {
1789 u32 val;
1790
1791 /* Validate the entry with independent learning, keep the original
1792 * ingress tag attribute.
1793 */
1794 val = IVL_MAC | EG_CON | PORT_MEM(MT7530_ALL_MEMBERS) | FID(FID_BRIDGED) |
1795 VLAN_VALID;
1796 mt7530_write(priv, MT7530_VAWD1, val);
1797
1798 return mt7530_vlan_cmd(priv, MT7530_VTCR_WR_VID, 0);
1799 }
1800
1801 static int
mt7530_port_vlan_add(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan,struct netlink_ext_ack * extack)1802 mt7530_port_vlan_add(struct dsa_switch *ds, int port,
1803 const struct switchdev_obj_port_vlan *vlan,
1804 struct netlink_ext_ack *extack)
1805 {
1806 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1807 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1808 struct mt7530_hw_vlan_entry new_entry;
1809 struct mt7530_priv *priv = ds->priv;
1810
1811 mutex_lock(&priv->reg_mutex);
1812
1813 mt7530_hw_vlan_entry_init(&new_entry, port, untagged);
1814 mt7530_hw_vlan_update(priv, vlan->vid, &new_entry, mt7530_hw_vlan_add);
1815
1816 if (pvid) {
1817 priv->ports[port].pvid = vlan->vid;
1818
1819 /* Accept all frames if PVID is set */
1820 mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
1821 MT7530_VLAN_ACC_ALL);
1822
1823 /* Only configure PVID if VLAN filtering is enabled */
1824 if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
1825 mt7530_rmw(priv, MT7530_PPBV1_P(port),
1826 G0_PORT_VID_MASK,
1827 G0_PORT_VID(vlan->vid));
1828 } else if (vlan->vid && priv->ports[port].pvid == vlan->vid) {
1829 /* This VLAN is overwritten without PVID, so unset it */
1830 priv->ports[port].pvid = G0_PORT_VID_DEF;
1831
1832 /* Only accept tagged frames if the port is VLAN-aware */
1833 if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
1834 mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
1835 MT7530_VLAN_ACC_TAGGED);
1836
1837 mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
1838 G0_PORT_VID_DEF);
1839 }
1840
1841 mutex_unlock(&priv->reg_mutex);
1842
1843 return 0;
1844 }
1845
1846 static int
mt7530_port_vlan_del(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan)1847 mt7530_port_vlan_del(struct dsa_switch *ds, int port,
1848 const struct switchdev_obj_port_vlan *vlan)
1849 {
1850 struct mt7530_hw_vlan_entry target_entry;
1851 struct mt7530_priv *priv = ds->priv;
1852
1853 mutex_lock(&priv->reg_mutex);
1854
1855 mt7530_hw_vlan_entry_init(&target_entry, port, 0);
1856 mt7530_hw_vlan_update(priv, vlan->vid, &target_entry,
1857 mt7530_hw_vlan_del);
1858
1859 /* PVID is being restored to the default whenever the PVID port
1860 * is being removed from the VLAN.
1861 */
1862 if (priv->ports[port].pvid == vlan->vid) {
1863 priv->ports[port].pvid = G0_PORT_VID_DEF;
1864
1865 /* Only accept tagged frames if the port is VLAN-aware */
1866 if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
1867 mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
1868 MT7530_VLAN_ACC_TAGGED);
1869
1870 mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
1871 G0_PORT_VID_DEF);
1872 }
1873
1874
1875 mutex_unlock(&priv->reg_mutex);
1876
1877 return 0;
1878 }
1879
mt753x_port_mirror_add(struct dsa_switch * ds,int port,struct dsa_mall_mirror_tc_entry * mirror,bool ingress,struct netlink_ext_ack * extack)1880 static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
1881 struct dsa_mall_mirror_tc_entry *mirror,
1882 bool ingress, struct netlink_ext_ack *extack)
1883 {
1884 struct mt7530_priv *priv = ds->priv;
1885 int monitor_port;
1886 u32 val;
1887
1888 /* Check for existent entry */
1889 if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
1890 return -EEXIST;
1891
1892 val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id));
1893
1894 /* MT7530 only supports one monitor port */
1895 monitor_port = MT753X_MIRROR_PORT_GET(priv->id, val);
1896 if (val & MT753X_MIRROR_EN(priv->id) &&
1897 monitor_port != mirror->to_local_port)
1898 return -EEXIST;
1899
1900 val |= MT753X_MIRROR_EN(priv->id);
1901 val &= ~MT753X_MIRROR_PORT_MASK(priv->id);
1902 val |= MT753X_MIRROR_PORT_SET(priv->id, mirror->to_local_port);
1903 mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val);
1904
1905 val = mt7530_read(priv, MT7530_PCR_P(port));
1906 if (ingress) {
1907 val |= PORT_RX_MIR;
1908 priv->mirror_rx |= BIT(port);
1909 } else {
1910 val |= PORT_TX_MIR;
1911 priv->mirror_tx |= BIT(port);
1912 }
1913 mt7530_write(priv, MT7530_PCR_P(port), val);
1914
1915 return 0;
1916 }
1917
mt753x_port_mirror_del(struct dsa_switch * ds,int port,struct dsa_mall_mirror_tc_entry * mirror)1918 static void mt753x_port_mirror_del(struct dsa_switch *ds, int port,
1919 struct dsa_mall_mirror_tc_entry *mirror)
1920 {
1921 struct mt7530_priv *priv = ds->priv;
1922 u32 val;
1923
1924 val = mt7530_read(priv, MT7530_PCR_P(port));
1925 if (mirror->ingress) {
1926 val &= ~PORT_RX_MIR;
1927 priv->mirror_rx &= ~BIT(port);
1928 } else {
1929 val &= ~PORT_TX_MIR;
1930 priv->mirror_tx &= ~BIT(port);
1931 }
1932 mt7530_write(priv, MT7530_PCR_P(port), val);
1933
1934 if (!priv->mirror_rx && !priv->mirror_tx) {
1935 val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id));
1936 val &= ~MT753X_MIRROR_EN(priv->id);
1937 mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val);
1938 }
1939 }
1940
1941 static enum dsa_tag_protocol
mtk_get_tag_protocol(struct dsa_switch * ds,int port,enum dsa_tag_protocol mp)1942 mtk_get_tag_protocol(struct dsa_switch *ds, int port,
1943 enum dsa_tag_protocol mp)
1944 {
1945 return DSA_TAG_PROTO_MTK;
1946 }
1947
1948 #ifdef CONFIG_GPIOLIB
1949 static inline u32
mt7530_gpio_to_bit(unsigned int offset)1950 mt7530_gpio_to_bit(unsigned int offset)
1951 {
1952 /* Map GPIO offset to register bit
1953 * [ 2: 0] port 0 LED 0..2 as GPIO 0..2
1954 * [ 6: 4] port 1 LED 0..2 as GPIO 3..5
1955 * [10: 8] port 2 LED 0..2 as GPIO 6..8
1956 * [14:12] port 3 LED 0..2 as GPIO 9..11
1957 * [18:16] port 4 LED 0..2 as GPIO 12..14
1958 */
1959 return BIT(offset + offset / 3);
1960 }
1961
1962 static int
mt7530_gpio_get(struct gpio_chip * gc,unsigned int offset)1963 mt7530_gpio_get(struct gpio_chip *gc, unsigned int offset)
1964 {
1965 struct mt7530_priv *priv = gpiochip_get_data(gc);
1966 u32 bit = mt7530_gpio_to_bit(offset);
1967
1968 return !!(mt7530_read(priv, MT7530_LED_GPIO_DATA) & bit);
1969 }
1970
1971 static void
mt7530_gpio_set(struct gpio_chip * gc,unsigned int offset,int value)1972 mt7530_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
1973 {
1974 struct mt7530_priv *priv = gpiochip_get_data(gc);
1975 u32 bit = mt7530_gpio_to_bit(offset);
1976
1977 if (value)
1978 mt7530_set(priv, MT7530_LED_GPIO_DATA, bit);
1979 else
1980 mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit);
1981 }
1982
1983 static int
mt7530_gpio_get_direction(struct gpio_chip * gc,unsigned int offset)1984 mt7530_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
1985 {
1986 struct mt7530_priv *priv = gpiochip_get_data(gc);
1987 u32 bit = mt7530_gpio_to_bit(offset);
1988
1989 return (mt7530_read(priv, MT7530_LED_GPIO_DIR) & bit) ?
1990 GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
1991 }
1992
1993 static int
mt7530_gpio_direction_input(struct gpio_chip * gc,unsigned int offset)1994 mt7530_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
1995 {
1996 struct mt7530_priv *priv = gpiochip_get_data(gc);
1997 u32 bit = mt7530_gpio_to_bit(offset);
1998
1999 mt7530_clear(priv, MT7530_LED_GPIO_OE, bit);
2000 mt7530_clear(priv, MT7530_LED_GPIO_DIR, bit);
2001
2002 return 0;
2003 }
2004
2005 static int
mt7530_gpio_direction_output(struct gpio_chip * gc,unsigned int offset,int value)2006 mt7530_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value)
2007 {
2008 struct mt7530_priv *priv = gpiochip_get_data(gc);
2009 u32 bit = mt7530_gpio_to_bit(offset);
2010
2011 mt7530_set(priv, MT7530_LED_GPIO_DIR, bit);
2012
2013 if (value)
2014 mt7530_set(priv, MT7530_LED_GPIO_DATA, bit);
2015 else
2016 mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit);
2017
2018 mt7530_set(priv, MT7530_LED_GPIO_OE, bit);
2019
2020 return 0;
2021 }
2022
2023 static int
mt7530_setup_gpio(struct mt7530_priv * priv)2024 mt7530_setup_gpio(struct mt7530_priv *priv)
2025 {
2026 struct device *dev = priv->dev;
2027 struct gpio_chip *gc;
2028
2029 gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
2030 if (!gc)
2031 return -ENOMEM;
2032
2033 mt7530_write(priv, MT7530_LED_GPIO_OE, 0);
2034 mt7530_write(priv, MT7530_LED_GPIO_DIR, 0);
2035 mt7530_write(priv, MT7530_LED_IO_MODE, 0);
2036
2037 gc->label = "mt7530";
2038 gc->parent = dev;
2039 gc->owner = THIS_MODULE;
2040 gc->get_direction = mt7530_gpio_get_direction;
2041 gc->direction_input = mt7530_gpio_direction_input;
2042 gc->direction_output = mt7530_gpio_direction_output;
2043 gc->get = mt7530_gpio_get;
2044 gc->set = mt7530_gpio_set;
2045 gc->base = -1;
2046 gc->ngpio = 15;
2047 gc->can_sleep = true;
2048
2049 return devm_gpiochip_add_data(dev, gc, priv);
2050 }
2051 #endif /* CONFIG_GPIOLIB */
2052
2053 static irqreturn_t
mt7530_irq_thread_fn(int irq,void * dev_id)2054 mt7530_irq_thread_fn(int irq, void *dev_id)
2055 {
2056 struct mt7530_priv *priv = dev_id;
2057 bool handled = false;
2058 u32 val;
2059 int p;
2060
2061 mt7530_mutex_lock(priv);
2062 val = mt7530_mii_read(priv, MT7530_SYS_INT_STS);
2063 mt7530_mii_write(priv, MT7530_SYS_INT_STS, val);
2064 mt7530_mutex_unlock(priv);
2065
2066 for (p = 0; p < MT7530_NUM_PHYS; p++) {
2067 if (BIT(p) & val) {
2068 unsigned int irq;
2069
2070 irq = irq_find_mapping(priv->irq_domain, p);
2071 handle_nested_irq(irq);
2072 handled = true;
2073 }
2074 }
2075
2076 return IRQ_RETVAL(handled);
2077 }
2078
2079 static void
mt7530_irq_mask(struct irq_data * d)2080 mt7530_irq_mask(struct irq_data *d)
2081 {
2082 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
2083
2084 priv->irq_enable &= ~BIT(d->hwirq);
2085 }
2086
2087 static void
mt7530_irq_unmask(struct irq_data * d)2088 mt7530_irq_unmask(struct irq_data *d)
2089 {
2090 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
2091
2092 priv->irq_enable |= BIT(d->hwirq);
2093 }
2094
2095 static void
mt7530_irq_bus_lock(struct irq_data * d)2096 mt7530_irq_bus_lock(struct irq_data *d)
2097 {
2098 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
2099
2100 mt7530_mutex_lock(priv);
2101 }
2102
2103 static void
mt7530_irq_bus_sync_unlock(struct irq_data * d)2104 mt7530_irq_bus_sync_unlock(struct irq_data *d)
2105 {
2106 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
2107
2108 mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable);
2109 mt7530_mutex_unlock(priv);
2110 }
2111
2112 static struct irq_chip mt7530_irq_chip = {
2113 .name = KBUILD_MODNAME,
2114 .irq_mask = mt7530_irq_mask,
2115 .irq_unmask = mt7530_irq_unmask,
2116 .irq_bus_lock = mt7530_irq_bus_lock,
2117 .irq_bus_sync_unlock = mt7530_irq_bus_sync_unlock,
2118 };
2119
2120 static int
mt7530_irq_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)2121 mt7530_irq_map(struct irq_domain *domain, unsigned int irq,
2122 irq_hw_number_t hwirq)
2123 {
2124 irq_set_chip_data(irq, domain->host_data);
2125 irq_set_chip_and_handler(irq, &mt7530_irq_chip, handle_simple_irq);
2126 irq_set_nested_thread(irq, true);
2127 irq_set_noprobe(irq);
2128
2129 return 0;
2130 }
2131
2132 static const struct irq_domain_ops mt7530_irq_domain_ops = {
2133 .map = mt7530_irq_map,
2134 .xlate = irq_domain_xlate_onecell,
2135 };
2136
2137 static void
mt7988_irq_mask(struct irq_data * d)2138 mt7988_irq_mask(struct irq_data *d)
2139 {
2140 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
2141
2142 priv->irq_enable &= ~BIT(d->hwirq);
2143 mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable);
2144 }
2145
2146 static void
mt7988_irq_unmask(struct irq_data * d)2147 mt7988_irq_unmask(struct irq_data *d)
2148 {
2149 struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
2150
2151 priv->irq_enable |= BIT(d->hwirq);
2152 mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable);
2153 }
2154
2155 static struct irq_chip mt7988_irq_chip = {
2156 .name = KBUILD_MODNAME,
2157 .irq_mask = mt7988_irq_mask,
2158 .irq_unmask = mt7988_irq_unmask,
2159 };
2160
2161 static int
mt7988_irq_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)2162 mt7988_irq_map(struct irq_domain *domain, unsigned int irq,
2163 irq_hw_number_t hwirq)
2164 {
2165 irq_set_chip_data(irq, domain->host_data);
2166 irq_set_chip_and_handler(irq, &mt7988_irq_chip, handle_simple_irq);
2167 irq_set_nested_thread(irq, true);
2168 irq_set_noprobe(irq);
2169
2170 return 0;
2171 }
2172
2173 static const struct irq_domain_ops mt7988_irq_domain_ops = {
2174 .map = mt7988_irq_map,
2175 .xlate = irq_domain_xlate_onecell,
2176 };
2177
2178 static void
mt7530_setup_mdio_irq(struct mt7530_priv * priv)2179 mt7530_setup_mdio_irq(struct mt7530_priv *priv)
2180 {
2181 struct dsa_switch *ds = priv->ds;
2182 int p;
2183
2184 for (p = 0; p < MT7530_NUM_PHYS; p++) {
2185 if (BIT(p) & ds->phys_mii_mask) {
2186 unsigned int irq;
2187
2188 irq = irq_create_mapping(priv->irq_domain, p);
2189 ds->user_mii_bus->irq[p] = irq;
2190 }
2191 }
2192 }
2193
2194 static int
mt7530_setup_irq(struct mt7530_priv * priv)2195 mt7530_setup_irq(struct mt7530_priv *priv)
2196 {
2197 struct device *dev = priv->dev;
2198 struct device_node *np = dev->of_node;
2199 int ret;
2200
2201 if (!of_property_read_bool(np, "interrupt-controller")) {
2202 dev_info(dev, "no interrupt support\n");
2203 return 0;
2204 }
2205
2206 priv->irq = of_irq_get(np, 0);
2207 if (priv->irq <= 0) {
2208 dev_err(dev, "failed to get parent IRQ: %d\n", priv->irq);
2209 return priv->irq ? : -EINVAL;
2210 }
2211
2212 if (priv->id == ID_MT7988 || priv->id == ID_EN7581)
2213 priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS,
2214 &mt7988_irq_domain_ops,
2215 priv);
2216 else
2217 priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS,
2218 &mt7530_irq_domain_ops,
2219 priv);
2220
2221 if (!priv->irq_domain) {
2222 dev_err(dev, "failed to create IRQ domain\n");
2223 return -ENOMEM;
2224 }
2225
2226 /* This register must be set for MT7530 to properly fire interrupts */
2227 if (priv->id == ID_MT7530 || priv->id == ID_MT7621)
2228 mt7530_set(priv, MT7530_TOP_SIG_CTRL, TOP_SIG_CTRL_NORMAL);
2229
2230 ret = request_threaded_irq(priv->irq, NULL, mt7530_irq_thread_fn,
2231 IRQF_ONESHOT, KBUILD_MODNAME, priv);
2232 if (ret) {
2233 irq_domain_remove(priv->irq_domain);
2234 dev_err(dev, "failed to request IRQ: %d\n", ret);
2235 return ret;
2236 }
2237
2238 return 0;
2239 }
2240
2241 static void
mt7530_free_mdio_irq(struct mt7530_priv * priv)2242 mt7530_free_mdio_irq(struct mt7530_priv *priv)
2243 {
2244 int p;
2245
2246 for (p = 0; p < MT7530_NUM_PHYS; p++) {
2247 if (BIT(p) & priv->ds->phys_mii_mask) {
2248 unsigned int irq;
2249
2250 irq = irq_find_mapping(priv->irq_domain, p);
2251 irq_dispose_mapping(irq);
2252 }
2253 }
2254 }
2255
2256 static void
mt7530_free_irq_common(struct mt7530_priv * priv)2257 mt7530_free_irq_common(struct mt7530_priv *priv)
2258 {
2259 free_irq(priv->irq, priv);
2260 irq_domain_remove(priv->irq_domain);
2261 }
2262
2263 static void
mt7530_free_irq(struct mt7530_priv * priv)2264 mt7530_free_irq(struct mt7530_priv *priv)
2265 {
2266 struct device_node *mnp, *np = priv->dev->of_node;
2267
2268 mnp = of_get_child_by_name(np, "mdio");
2269 if (!mnp)
2270 mt7530_free_mdio_irq(priv);
2271 of_node_put(mnp);
2272
2273 mt7530_free_irq_common(priv);
2274 }
2275
2276 static int
mt7530_setup_mdio(struct mt7530_priv * priv)2277 mt7530_setup_mdio(struct mt7530_priv *priv)
2278 {
2279 struct device_node *mnp, *np = priv->dev->of_node;
2280 struct dsa_switch *ds = priv->ds;
2281 struct device *dev = priv->dev;
2282 struct mii_bus *bus;
2283 static int idx;
2284 int ret = 0;
2285
2286 mnp = of_get_child_by_name(np, "mdio");
2287
2288 if (mnp && !of_device_is_available(mnp))
2289 goto out;
2290
2291 bus = devm_mdiobus_alloc(dev);
2292 if (!bus) {
2293 ret = -ENOMEM;
2294 goto out;
2295 }
2296
2297 if (!mnp)
2298 ds->user_mii_bus = bus;
2299
2300 bus->priv = priv;
2301 bus->name = KBUILD_MODNAME "-mii";
2302 snprintf(bus->id, MII_BUS_ID_SIZE, KBUILD_MODNAME "-%d", idx++);
2303 bus->read = mt753x_phy_read_c22;
2304 bus->write = mt753x_phy_write_c22;
2305 bus->read_c45 = mt753x_phy_read_c45;
2306 bus->write_c45 = mt753x_phy_write_c45;
2307 bus->parent = dev;
2308 bus->phy_mask = ~ds->phys_mii_mask;
2309
2310 if (priv->irq && !mnp)
2311 mt7530_setup_mdio_irq(priv);
2312
2313 ret = devm_of_mdiobus_register(dev, bus, mnp);
2314 if (ret) {
2315 dev_err(dev, "failed to register MDIO bus: %d\n", ret);
2316 if (priv->irq && !mnp)
2317 mt7530_free_mdio_irq(priv);
2318 }
2319
2320 out:
2321 of_node_put(mnp);
2322 return ret;
2323 }
2324
2325 static int
mt7530_setup(struct dsa_switch * ds)2326 mt7530_setup(struct dsa_switch *ds)
2327 {
2328 struct mt7530_priv *priv = ds->priv;
2329 struct device_node *dn = NULL;
2330 struct device_node *phy_node;
2331 struct device_node *mac_np;
2332 struct mt7530_dummy_poll p;
2333 phy_interface_t interface;
2334 struct dsa_port *cpu_dp;
2335 u32 id, val;
2336 int ret, i;
2337
2338 /* The parent node of conduit netdev which holds the common system
2339 * controller also is the container for two GMACs nodes representing
2340 * as two netdev instances.
2341 */
2342 dsa_switch_for_each_cpu_port(cpu_dp, ds) {
2343 dn = cpu_dp->conduit->dev.of_node->parent;
2344 /* It doesn't matter which CPU port is found first,
2345 * their conduits should share the same parent OF node
2346 */
2347 break;
2348 }
2349
2350 if (!dn) {
2351 dev_err(ds->dev, "parent OF node of DSA conduit not found");
2352 return -EINVAL;
2353 }
2354
2355 ds->assisted_learning_on_cpu_port = true;
2356 ds->mtu_enforcement_ingress = true;
2357
2358 if (priv->id == ID_MT7530) {
2359 regulator_set_voltage(priv->core_pwr, 1000000, 1000000);
2360 ret = regulator_enable(priv->core_pwr);
2361 if (ret < 0) {
2362 dev_err(priv->dev,
2363 "Failed to enable core power: %d\n", ret);
2364 return ret;
2365 }
2366
2367 regulator_set_voltage(priv->io_pwr, 3300000, 3300000);
2368 ret = regulator_enable(priv->io_pwr);
2369 if (ret < 0) {
2370 dev_err(priv->dev, "Failed to enable io pwr: %d\n",
2371 ret);
2372 return ret;
2373 }
2374 }
2375
2376 /* Reset whole chip through gpio pin or memory-mapped registers for
2377 * different type of hardware
2378 */
2379 if (priv->mcm) {
2380 reset_control_assert(priv->rstc);
2381 usleep_range(5000, 5100);
2382 reset_control_deassert(priv->rstc);
2383 } else {
2384 gpiod_set_value_cansleep(priv->reset, 0);
2385 usleep_range(5000, 5100);
2386 gpiod_set_value_cansleep(priv->reset, 1);
2387 }
2388
2389 /* Waiting for MT7530 got to stable */
2390 INIT_MT7530_DUMMY_POLL(&p, priv, MT753X_TRAP);
2391 ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0,
2392 20, 1000000);
2393 if (ret < 0) {
2394 dev_err(priv->dev, "reset timeout\n");
2395 return ret;
2396 }
2397
2398 id = mt7530_read(priv, MT7530_CREV);
2399 id >>= CHIP_NAME_SHIFT;
2400 if (id != MT7530_ID) {
2401 dev_err(priv->dev, "chip %x can't be supported\n", id);
2402 return -ENODEV;
2403 }
2404
2405 if ((val & MT7530_XTAL_MASK) == MT7530_XTAL_20MHZ) {
2406 dev_err(priv->dev,
2407 "MT7530 with a 20MHz XTAL is not supported!\n");
2408 return -EINVAL;
2409 }
2410
2411 /* Reset the switch through internal reset */
2412 mt7530_write(priv, MT7530_SYS_CTRL,
2413 SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
2414 SYS_CTRL_REG_RST);
2415
2416 /* Lower Tx driving for TRGMII path */
2417 for (i = 0; i < NUM_TRGMII_CTRL; i++)
2418 mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
2419 TD_DM_DRVP(8) | TD_DM_DRVN(8));
2420
2421 for (i = 0; i < NUM_TRGMII_CTRL; i++)
2422 mt7530_rmw(priv, MT7530_TRGMII_RD(i),
2423 RD_TAP_MASK, RD_TAP(16));
2424
2425 /* Allow modifying the trap and directly access PHY registers via the
2426 * MDIO bus the switch is on.
2427 */
2428 mt7530_rmw(priv, MT753X_MTRAP, MT7530_CHG_TRAP |
2429 MT7530_PHY_INDIRECT_ACCESS, MT7530_CHG_TRAP);
2430
2431 if ((val & MT7530_XTAL_MASK) == MT7530_XTAL_40MHZ)
2432 mt7530_pll_setup(priv);
2433
2434 mt753x_trap_frames(priv);
2435
2436 /* Enable and reset MIB counters */
2437 mt7530_mib_reset(ds);
2438
2439 for (i = 0; i < priv->ds->num_ports; i++) {
2440 /* Clear link settings and enable force mode to force link down
2441 * on all ports until they're enabled later.
2442 */
2443 mt7530_rmw(priv, MT753X_PMCR_P(i),
2444 PMCR_LINK_SETTINGS_MASK |
2445 MT753X_FORCE_MODE(priv->id),
2446 MT753X_FORCE_MODE(priv->id));
2447
2448 /* Disable forwarding by default on all ports */
2449 mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
2450 PCR_MATRIX_CLR);
2451
2452 /* Disable learning by default on all ports */
2453 mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
2454
2455 if (dsa_is_cpu_port(ds, i)) {
2456 mt753x_cpu_port_enable(ds, i);
2457 } else {
2458 mt7530_port_disable(ds, i);
2459
2460 /* Set default PVID to 0 on all user ports */
2461 mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK,
2462 G0_PORT_VID_DEF);
2463 }
2464 /* Enable consistent egress tag */
2465 mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
2466 PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
2467 }
2468
2469 /* Allow mirroring frames received on the local port (monitor port). */
2470 mt7530_set(priv, MT753X_AGC, LOCAL_EN);
2471
2472 /* Setup VLAN ID 0 for VLAN-unaware bridges */
2473 ret = mt7530_setup_vlan0(priv);
2474 if (ret)
2475 return ret;
2476
2477 /* Check for PHY muxing on port 5 */
2478 if (dsa_is_unused_port(ds, 5)) {
2479 /* Scan the ethernet nodes. Look for GMAC1, lookup the used PHY.
2480 * Set priv->p5_mode to the appropriate value if PHY muxing is
2481 * detected.
2482 */
2483 for_each_child_of_node(dn, mac_np) {
2484 if (!of_device_is_compatible(mac_np,
2485 "mediatek,eth-mac"))
2486 continue;
2487
2488 ret = of_property_read_u32(mac_np, "reg", &id);
2489 if (ret < 0 || id != 1)
2490 continue;
2491
2492 phy_node = of_parse_phandle(mac_np, "phy-handle", 0);
2493 if (!phy_node)
2494 continue;
2495
2496 if (phy_node->parent == priv->dev->of_node->parent ||
2497 phy_node->parent->parent == priv->dev->of_node) {
2498 ret = of_get_phy_mode(mac_np, &interface);
2499 if (ret && ret != -ENODEV) {
2500 of_node_put(mac_np);
2501 of_node_put(phy_node);
2502 return ret;
2503 }
2504 id = of_mdio_parse_addr(ds->dev, phy_node);
2505 if (id == 0)
2506 priv->p5_mode = MUX_PHY_P0;
2507 if (id == 4)
2508 priv->p5_mode = MUX_PHY_P4;
2509 }
2510 of_node_put(mac_np);
2511 of_node_put(phy_node);
2512 break;
2513 }
2514
2515 if (priv->p5_mode == MUX_PHY_P0 ||
2516 priv->p5_mode == MUX_PHY_P4) {
2517 mt7530_clear(priv, MT753X_MTRAP, MT7530_P5_DIS);
2518 mt7530_setup_port5(ds, interface);
2519 }
2520 }
2521
2522 #ifdef CONFIG_GPIOLIB
2523 if (of_property_read_bool(priv->dev->of_node, "gpio-controller")) {
2524 ret = mt7530_setup_gpio(priv);
2525 if (ret)
2526 return ret;
2527 }
2528 #endif /* CONFIG_GPIOLIB */
2529
2530 /* Flush the FDB table */
2531 ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
2532 if (ret < 0)
2533 return ret;
2534
2535 return 0;
2536 }
2537
2538 static int
mt7531_setup_common(struct dsa_switch * ds)2539 mt7531_setup_common(struct dsa_switch *ds)
2540 {
2541 struct mt7530_priv *priv = ds->priv;
2542 int ret, i;
2543
2544 mt753x_trap_frames(priv);
2545
2546 /* Enable and reset MIB counters */
2547 mt7530_mib_reset(ds);
2548
2549 /* Disable flooding on all ports */
2550 mt7530_clear(priv, MT753X_MFC, BC_FFP_MASK | UNM_FFP_MASK |
2551 UNU_FFP_MASK);
2552
2553 for (i = 0; i < priv->ds->num_ports; i++) {
2554 /* Clear link settings and enable force mode to force link down
2555 * on all ports until they're enabled later.
2556 */
2557 mt7530_rmw(priv, MT753X_PMCR_P(i),
2558 PMCR_LINK_SETTINGS_MASK |
2559 MT753X_FORCE_MODE(priv->id),
2560 MT753X_FORCE_MODE(priv->id));
2561
2562 /* Disable forwarding by default on all ports */
2563 mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
2564 PCR_MATRIX_CLR);
2565
2566 /* Disable learning by default on all ports */
2567 mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
2568
2569 mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR);
2570
2571 if (dsa_is_cpu_port(ds, i)) {
2572 mt753x_cpu_port_enable(ds, i);
2573 } else {
2574 mt7530_port_disable(ds, i);
2575
2576 /* Set default PVID to 0 on all user ports */
2577 mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK,
2578 G0_PORT_VID_DEF);
2579 }
2580
2581 /* Enable consistent egress tag */
2582 mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
2583 PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
2584 }
2585
2586 /* Allow mirroring frames received on the local port (monitor port). */
2587 mt7530_set(priv, MT753X_AGC, LOCAL_EN);
2588
2589 /* Flush the FDB table */
2590 ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
2591 if (ret < 0)
2592 return ret;
2593
2594 return 0;
2595 }
2596
2597 static int
mt7531_setup(struct dsa_switch * ds)2598 mt7531_setup(struct dsa_switch *ds)
2599 {
2600 struct mt7530_priv *priv = ds->priv;
2601 struct mt7530_dummy_poll p;
2602 u32 val, id;
2603 int ret, i;
2604
2605 /* Reset whole chip through gpio pin or memory-mapped registers for
2606 * different type of hardware
2607 */
2608 if (priv->mcm) {
2609 reset_control_assert(priv->rstc);
2610 usleep_range(5000, 5100);
2611 reset_control_deassert(priv->rstc);
2612 } else {
2613 gpiod_set_value_cansleep(priv->reset, 0);
2614 usleep_range(5000, 5100);
2615 gpiod_set_value_cansleep(priv->reset, 1);
2616 }
2617
2618 /* Waiting for MT7530 got to stable */
2619 INIT_MT7530_DUMMY_POLL(&p, priv, MT753X_TRAP);
2620 ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0,
2621 20, 1000000);
2622 if (ret < 0) {
2623 dev_err(priv->dev, "reset timeout\n");
2624 return ret;
2625 }
2626
2627 id = mt7530_read(priv, MT7531_CREV);
2628 id >>= CHIP_NAME_SHIFT;
2629
2630 if (id != MT7531_ID) {
2631 dev_err(priv->dev, "chip %x can't be supported\n", id);
2632 return -ENODEV;
2633 }
2634
2635 /* MT7531AE has got two SGMII units. One for port 5, one for port 6.
2636 * MT7531BE has got only one SGMII unit which is for port 6.
2637 */
2638 val = mt7530_read(priv, MT7531_TOP_SIG_SR);
2639 priv->p5_sgmii = !!(val & PAD_DUAL_SGMII_EN);
2640
2641 /* Force link down on all ports before internal reset */
2642 for (i = 0; i < priv->ds->num_ports; i++)
2643 mt7530_write(priv, MT753X_PMCR_P(i), MT7531_FORCE_MODE_LNK);
2644
2645 /* Reset the switch through internal reset */
2646 mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_SW_RST | SYS_CTRL_REG_RST);
2647
2648 if (!priv->p5_sgmii) {
2649 mt7531_pll_setup(priv);
2650 } else {
2651 /* Unlike MT7531BE, the GPIO 6-12 pins are not used for RGMII on
2652 * MT7531AE. Set the GPIO 11-12 pins to function as MDC and MDIO
2653 * to expose the MDIO bus of the switch.
2654 */
2655 mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO11_RG_RXD2_MASK,
2656 MT7531_EXT_P_MDC_11);
2657 mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO12_RG_RXD3_MASK,
2658 MT7531_EXT_P_MDIO_12);
2659 }
2660
2661 mt7530_rmw(priv, MT7531_GPIO_MODE0, MT7531_GPIO0_MASK,
2662 MT7531_GPIO0_INTERRUPT);
2663
2664 /* Enable Energy-Efficient Ethernet (EEE) and PHY core PLL, since
2665 * phy_device has not yet been created provided for
2666 * phy_[read,write]_mmd_indirect is called, we provide our own
2667 * mt7531_ind_mmd_phy_[read,write] to complete this function.
2668 */
2669 val = mt7531_ind_c45_phy_read(priv,
2670 MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
2671 MDIO_MMD_VEND2, CORE_PLL_GROUP4);
2672 val |= MT7531_RG_SYSPLL_DMY2 | MT7531_PHY_PLL_BYPASS_MODE;
2673 val &= ~MT7531_PHY_PLL_OFF;
2674 mt7531_ind_c45_phy_write(priv,
2675 MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
2676 MDIO_MMD_VEND2, CORE_PLL_GROUP4, val);
2677
2678 /* Disable EEE advertisement on the switch PHYs. */
2679 for (i = MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr);
2680 i < MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr) + MT7530_NUM_PHYS;
2681 i++) {
2682 mt7531_ind_c45_phy_write(priv, i, MDIO_MMD_AN, MDIO_AN_EEE_ADV,
2683 0);
2684 }
2685
2686 ret = mt7531_setup_common(ds);
2687 if (ret)
2688 return ret;
2689
2690 /* Setup VLAN ID 0 for VLAN-unaware bridges */
2691 ret = mt7530_setup_vlan0(priv);
2692 if (ret)
2693 return ret;
2694
2695 ds->assisted_learning_on_cpu_port = true;
2696 ds->mtu_enforcement_ingress = true;
2697
2698 return 0;
2699 }
2700
mt7530_mac_port_get_caps(struct dsa_switch * ds,int port,struct phylink_config * config)2701 static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port,
2702 struct phylink_config *config)
2703 {
2704 config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
2705
2706 switch (port) {
2707 /* Ports which are connected to switch PHYs. There is no MII pinout. */
2708 case 0 ... 4:
2709 __set_bit(PHY_INTERFACE_MODE_GMII,
2710 config->supported_interfaces);
2711 break;
2712
2713 /* Port 5 supports rgmii with delays, mii, and gmii. */
2714 case 5:
2715 phy_interface_set_rgmii(config->supported_interfaces);
2716 __set_bit(PHY_INTERFACE_MODE_MII,
2717 config->supported_interfaces);
2718 __set_bit(PHY_INTERFACE_MODE_GMII,
2719 config->supported_interfaces);
2720 break;
2721
2722 /* Port 6 supports rgmii and trgmii. */
2723 case 6:
2724 __set_bit(PHY_INTERFACE_MODE_RGMII,
2725 config->supported_interfaces);
2726 __set_bit(PHY_INTERFACE_MODE_TRGMII,
2727 config->supported_interfaces);
2728 break;
2729 }
2730 }
2731
mt7531_mac_port_get_caps(struct dsa_switch * ds,int port,struct phylink_config * config)2732 static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port,
2733 struct phylink_config *config)
2734 {
2735 struct mt7530_priv *priv = ds->priv;
2736
2737 config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
2738
2739 switch (port) {
2740 /* Ports which are connected to switch PHYs. There is no MII pinout. */
2741 case 0 ... 4:
2742 __set_bit(PHY_INTERFACE_MODE_GMII,
2743 config->supported_interfaces);
2744 break;
2745
2746 /* Port 5 supports rgmii with delays on MT7531BE, sgmii/802.3z on
2747 * MT7531AE.
2748 */
2749 case 5:
2750 if (!priv->p5_sgmii) {
2751 phy_interface_set_rgmii(config->supported_interfaces);
2752 break;
2753 }
2754 fallthrough;
2755
2756 /* Port 6 supports sgmii/802.3z. */
2757 case 6:
2758 __set_bit(PHY_INTERFACE_MODE_SGMII,
2759 config->supported_interfaces);
2760 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
2761 config->supported_interfaces);
2762 __set_bit(PHY_INTERFACE_MODE_2500BASEX,
2763 config->supported_interfaces);
2764
2765 config->mac_capabilities |= MAC_2500FD;
2766 break;
2767 }
2768 }
2769
mt7988_mac_port_get_caps(struct dsa_switch * ds,int port,struct phylink_config * config)2770 static void mt7988_mac_port_get_caps(struct dsa_switch *ds, int port,
2771 struct phylink_config *config)
2772 {
2773 switch (port) {
2774 /* Ports which are connected to switch PHYs. There is no MII pinout. */
2775 case 0 ... 3:
2776 __set_bit(PHY_INTERFACE_MODE_INTERNAL,
2777 config->supported_interfaces);
2778
2779 config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
2780 break;
2781
2782 /* Port 6 is connected to SoC's XGMII MAC. There is no MII pinout. */
2783 case 6:
2784 __set_bit(PHY_INTERFACE_MODE_INTERNAL,
2785 config->supported_interfaces);
2786
2787 config->mac_capabilities |= MAC_10000FD;
2788 break;
2789 }
2790 }
2791
en7581_mac_port_get_caps(struct dsa_switch * ds,int port,struct phylink_config * config)2792 static void en7581_mac_port_get_caps(struct dsa_switch *ds, int port,
2793 struct phylink_config *config)
2794 {
2795 switch (port) {
2796 /* Ports which are connected to switch PHYs. There is no MII pinout. */
2797 case 0 ... 4:
2798 __set_bit(PHY_INTERFACE_MODE_INTERNAL,
2799 config->supported_interfaces);
2800
2801 config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
2802 break;
2803
2804 /* Port 6 is connected to SoC's XGMII MAC. There is no MII pinout. */
2805 case 6:
2806 __set_bit(PHY_INTERFACE_MODE_INTERNAL,
2807 config->supported_interfaces);
2808
2809 config->mac_capabilities |= MAC_10000FD;
2810 break;
2811 }
2812 }
2813
2814 static void
mt7530_mac_config(struct dsa_switch * ds,int port,unsigned int mode,phy_interface_t interface)2815 mt7530_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
2816 phy_interface_t interface)
2817 {
2818 struct mt7530_priv *priv = ds->priv;
2819
2820 if (port == 5)
2821 mt7530_setup_port5(priv->ds, interface);
2822 else if (port == 6)
2823 mt7530_setup_port6(priv->ds, interface);
2824 }
2825
mt7531_rgmii_setup(struct mt7530_priv * priv,phy_interface_t interface,struct phy_device * phydev)2826 static void mt7531_rgmii_setup(struct mt7530_priv *priv,
2827 phy_interface_t interface,
2828 struct phy_device *phydev)
2829 {
2830 u32 val;
2831
2832 val = mt7530_read(priv, MT7531_CLKGEN_CTRL);
2833 val |= GP_CLK_EN;
2834 val &= ~GP_MODE_MASK;
2835 val |= GP_MODE(MT7531_GP_MODE_RGMII);
2836 val &= ~CLK_SKEW_IN_MASK;
2837 val |= CLK_SKEW_IN(MT7531_CLK_SKEW_NO_CHG);
2838 val &= ~CLK_SKEW_OUT_MASK;
2839 val |= CLK_SKEW_OUT(MT7531_CLK_SKEW_NO_CHG);
2840 val |= TXCLK_NO_REVERSE | RXCLK_NO_DELAY;
2841
2842 /* Do not adjust rgmii delay when vendor phy driver presents. */
2843 if (!phydev || phy_driver_is_genphy(phydev)) {
2844 val &= ~(TXCLK_NO_REVERSE | RXCLK_NO_DELAY);
2845 switch (interface) {
2846 case PHY_INTERFACE_MODE_RGMII:
2847 val |= TXCLK_NO_REVERSE;
2848 val |= RXCLK_NO_DELAY;
2849 break;
2850 case PHY_INTERFACE_MODE_RGMII_RXID:
2851 val |= TXCLK_NO_REVERSE;
2852 break;
2853 case PHY_INTERFACE_MODE_RGMII_TXID:
2854 val |= RXCLK_NO_DELAY;
2855 break;
2856 case PHY_INTERFACE_MODE_RGMII_ID:
2857 break;
2858 default:
2859 break;
2860 }
2861 }
2862
2863 mt7530_write(priv, MT7531_CLKGEN_CTRL, val);
2864 }
2865
2866 static void
mt7531_mac_config(struct dsa_switch * ds,int port,unsigned int mode,phy_interface_t interface)2867 mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
2868 phy_interface_t interface)
2869 {
2870 struct mt7530_priv *priv = ds->priv;
2871 struct phy_device *phydev;
2872 struct dsa_port *dp;
2873
2874 if (phy_interface_mode_is_rgmii(interface)) {
2875 dp = dsa_to_port(ds, port);
2876 phydev = dp->user->phydev;
2877 mt7531_rgmii_setup(priv, interface, phydev);
2878 }
2879 }
2880
2881 static struct phylink_pcs *
mt753x_phylink_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)2882 mt753x_phylink_mac_select_pcs(struct phylink_config *config,
2883 phy_interface_t interface)
2884 {
2885 struct dsa_port *dp = dsa_phylink_to_port(config);
2886 struct mt7530_priv *priv = dp->ds->priv;
2887
2888 switch (interface) {
2889 case PHY_INTERFACE_MODE_TRGMII:
2890 return &priv->pcs[dp->index].pcs;
2891 case PHY_INTERFACE_MODE_SGMII:
2892 case PHY_INTERFACE_MODE_1000BASEX:
2893 case PHY_INTERFACE_MODE_2500BASEX:
2894 return priv->ports[dp->index].sgmii_pcs;
2895 default:
2896 return NULL;
2897 }
2898 }
2899
2900 static void
mt753x_phylink_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)2901 mt753x_phylink_mac_config(struct phylink_config *config, unsigned int mode,
2902 const struct phylink_link_state *state)
2903 {
2904 struct dsa_port *dp = dsa_phylink_to_port(config);
2905 struct dsa_switch *ds = dp->ds;
2906 struct mt7530_priv *priv;
2907 int port = dp->index;
2908
2909 priv = ds->priv;
2910
2911 if ((port == 5 || port == 6) && priv->info->mac_port_config)
2912 priv->info->mac_port_config(ds, port, mode, state->interface);
2913
2914 /* Are we connected to external phy */
2915 if (port == 5 && dsa_is_user_port(ds, 5))
2916 mt7530_set(priv, MT753X_PMCR_P(port), PMCR_EXT_PHY);
2917 }
2918
mt753x_phylink_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)2919 static void mt753x_phylink_mac_link_down(struct phylink_config *config,
2920 unsigned int mode,
2921 phy_interface_t interface)
2922 {
2923 struct dsa_port *dp = dsa_phylink_to_port(config);
2924 struct mt7530_priv *priv = dp->ds->priv;
2925
2926 mt7530_clear(priv, MT753X_PMCR_P(dp->index), PMCR_LINK_SETTINGS_MASK);
2927 }
2928
mt753x_phylink_mac_link_up(struct phylink_config * config,struct phy_device * phydev,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)2929 static void mt753x_phylink_mac_link_up(struct phylink_config *config,
2930 struct phy_device *phydev,
2931 unsigned int mode,
2932 phy_interface_t interface,
2933 int speed, int duplex,
2934 bool tx_pause, bool rx_pause)
2935 {
2936 struct dsa_port *dp = dsa_phylink_to_port(config);
2937 struct mt7530_priv *priv = dp->ds->priv;
2938 u32 mcr;
2939
2940 mcr = PMCR_MAC_RX_EN | PMCR_MAC_TX_EN | PMCR_FORCE_LNK;
2941
2942 switch (speed) {
2943 case SPEED_1000:
2944 case SPEED_2500:
2945 case SPEED_10000:
2946 mcr |= PMCR_FORCE_SPEED_1000;
2947 break;
2948 case SPEED_100:
2949 mcr |= PMCR_FORCE_SPEED_100;
2950 break;
2951 }
2952 if (duplex == DUPLEX_FULL) {
2953 mcr |= PMCR_FORCE_FDX;
2954 if (tx_pause)
2955 mcr |= PMCR_FORCE_TX_FC_EN;
2956 if (rx_pause)
2957 mcr |= PMCR_FORCE_RX_FC_EN;
2958 }
2959
2960 if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, false) >= 0) {
2961 switch (speed) {
2962 case SPEED_1000:
2963 case SPEED_2500:
2964 mcr |= PMCR_FORCE_EEE1G;
2965 break;
2966 case SPEED_100:
2967 mcr |= PMCR_FORCE_EEE100;
2968 break;
2969 }
2970 }
2971
2972 mt7530_set(priv, MT753X_PMCR_P(dp->index), mcr);
2973 }
2974
mt753x_phylink_get_caps(struct dsa_switch * ds,int port,struct phylink_config * config)2975 static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port,
2976 struct phylink_config *config)
2977 {
2978 struct mt7530_priv *priv = ds->priv;
2979
2980 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE;
2981
2982 priv->info->mac_port_get_caps(ds, port, config);
2983 }
2984
mt753x_pcs_validate(struct phylink_pcs * pcs,unsigned long * supported,const struct phylink_link_state * state)2985 static int mt753x_pcs_validate(struct phylink_pcs *pcs,
2986 unsigned long *supported,
2987 const struct phylink_link_state *state)
2988 {
2989 /* Autonegotiation is not supported in TRGMII nor 802.3z modes */
2990 if (state->interface == PHY_INTERFACE_MODE_TRGMII ||
2991 phy_interface_mode_is_8023z(state->interface))
2992 phylink_clear(supported, Autoneg);
2993
2994 return 0;
2995 }
2996
mt7530_pcs_get_state(struct phylink_pcs * pcs,struct phylink_link_state * state)2997 static void mt7530_pcs_get_state(struct phylink_pcs *pcs,
2998 struct phylink_link_state *state)
2999 {
3000 struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
3001 int port = pcs_to_mt753x_pcs(pcs)->port;
3002 u32 pmsr;
3003
3004 pmsr = mt7530_read(priv, MT7530_PMSR_P(port));
3005
3006 state->link = (pmsr & PMSR_LINK);
3007 state->an_complete = state->link;
3008 state->duplex = !!(pmsr & PMSR_DPX);
3009
3010 switch (pmsr & PMSR_SPEED_MASK) {
3011 case PMSR_SPEED_10:
3012 state->speed = SPEED_10;
3013 break;
3014 case PMSR_SPEED_100:
3015 state->speed = SPEED_100;
3016 break;
3017 case PMSR_SPEED_1000:
3018 state->speed = SPEED_1000;
3019 break;
3020 default:
3021 state->speed = SPEED_UNKNOWN;
3022 break;
3023 }
3024
3025 state->pause &= ~(MLO_PAUSE_RX | MLO_PAUSE_TX);
3026 if (pmsr & PMSR_RX_FC)
3027 state->pause |= MLO_PAUSE_RX;
3028 if (pmsr & PMSR_TX_FC)
3029 state->pause |= MLO_PAUSE_TX;
3030 }
3031
mt753x_pcs_config(struct phylink_pcs * pcs,unsigned int neg_mode,phy_interface_t interface,const unsigned long * advertising,bool permit_pause_to_mac)3032 static int mt753x_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
3033 phy_interface_t interface,
3034 const unsigned long *advertising,
3035 bool permit_pause_to_mac)
3036 {
3037 return 0;
3038 }
3039
mt7530_pcs_an_restart(struct phylink_pcs * pcs)3040 static void mt7530_pcs_an_restart(struct phylink_pcs *pcs)
3041 {
3042 }
3043
3044 static const struct phylink_pcs_ops mt7530_pcs_ops = {
3045 .pcs_validate = mt753x_pcs_validate,
3046 .pcs_get_state = mt7530_pcs_get_state,
3047 .pcs_config = mt753x_pcs_config,
3048 .pcs_an_restart = mt7530_pcs_an_restart,
3049 };
3050
3051 static int
mt753x_setup(struct dsa_switch * ds)3052 mt753x_setup(struct dsa_switch *ds)
3053 {
3054 struct mt7530_priv *priv = ds->priv;
3055 int ret = priv->info->sw_setup(ds);
3056 int i;
3057
3058 if (ret)
3059 return ret;
3060
3061 ret = mt7530_setup_irq(priv);
3062 if (ret)
3063 return ret;
3064
3065 ret = mt7530_setup_mdio(priv);
3066 if (ret && priv->irq)
3067 mt7530_free_irq_common(priv);
3068 if (ret)
3069 return ret;
3070
3071 /* Initialise the PCS devices */
3072 for (i = 0; i < priv->ds->num_ports; i++) {
3073 priv->pcs[i].pcs.ops = priv->info->pcs_ops;
3074 priv->pcs[i].pcs.neg_mode = true;
3075 priv->pcs[i].priv = priv;
3076 priv->pcs[i].port = i;
3077 }
3078
3079 if (priv->create_sgmii) {
3080 ret = priv->create_sgmii(priv);
3081 if (ret && priv->irq)
3082 mt7530_free_irq(priv);
3083 }
3084
3085 return ret;
3086 }
3087
mt753x_get_mac_eee(struct dsa_switch * ds,int port,struct ethtool_keee * e)3088 static int mt753x_get_mac_eee(struct dsa_switch *ds, int port,
3089 struct ethtool_keee *e)
3090 {
3091 struct mt7530_priv *priv = ds->priv;
3092 u32 eeecr = mt7530_read(priv, MT753X_PMEEECR_P(port));
3093
3094 e->tx_lpi_enabled = !(eeecr & LPI_MODE_EN);
3095 e->tx_lpi_timer = LPI_THRESH_GET(eeecr);
3096
3097 return 0;
3098 }
3099
mt753x_set_mac_eee(struct dsa_switch * ds,int port,struct ethtool_keee * e)3100 static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
3101 struct ethtool_keee *e)
3102 {
3103 struct mt7530_priv *priv = ds->priv;
3104 u32 set, mask = LPI_THRESH_MASK | LPI_MODE_EN;
3105
3106 if (e->tx_lpi_timer > 0xFFF)
3107 return -EINVAL;
3108
3109 set = LPI_THRESH_SET(e->tx_lpi_timer);
3110 if (!e->tx_lpi_enabled)
3111 /* Force LPI Mode without a delay */
3112 set |= LPI_MODE_EN;
3113 mt7530_rmw(priv, MT753X_PMEEECR_P(port), mask, set);
3114
3115 return 0;
3116 }
3117
3118 static void
mt753x_conduit_state_change(struct dsa_switch * ds,const struct net_device * conduit,bool operational)3119 mt753x_conduit_state_change(struct dsa_switch *ds,
3120 const struct net_device *conduit,
3121 bool operational)
3122 {
3123 struct dsa_port *cpu_dp = conduit->dsa_ptr;
3124 struct mt7530_priv *priv = ds->priv;
3125 int val = 0;
3126 u8 mask;
3127
3128 /* Set the CPU port to trap frames to for MT7530. Trapped frames will be
3129 * forwarded to the numerically smallest CPU port whose conduit
3130 * interface is up.
3131 */
3132 if (priv->id != ID_MT7530 && priv->id != ID_MT7621)
3133 return;
3134
3135 mask = BIT(cpu_dp->index);
3136
3137 if (operational)
3138 priv->active_cpu_ports |= mask;
3139 else
3140 priv->active_cpu_ports &= ~mask;
3141
3142 if (priv->active_cpu_ports) {
3143 val = MT7530_CPU_EN |
3144 MT7530_CPU_PORT(__ffs(priv->active_cpu_ports));
3145 }
3146
3147 mt7530_rmw(priv, MT753X_MFC, MT7530_CPU_EN | MT7530_CPU_PORT_MASK, val);
3148 }
3149
mt753x_tc_setup_qdisc_tbf(struct dsa_switch * ds,int port,struct tc_tbf_qopt_offload * qopt)3150 static int mt753x_tc_setup_qdisc_tbf(struct dsa_switch *ds, int port,
3151 struct tc_tbf_qopt_offload *qopt)
3152 {
3153 struct tc_tbf_qopt_offload_replace_params *p = &qopt->replace_params;
3154 struct mt7530_priv *priv = ds->priv;
3155 u32 rate = 0;
3156
3157 switch (qopt->command) {
3158 case TC_TBF_REPLACE:
3159 rate = div_u64(p->rate.rate_bytes_ps, 1000) << 3; /* kbps */
3160 fallthrough;
3161 case TC_TBF_DESTROY: {
3162 u32 val, tick;
3163
3164 mt7530_rmw(priv, MT753X_GERLCR, EGR_BC_MASK,
3165 EGR_BC_CRC_IPG_PREAMBLE);
3166
3167 /* if rate is greater than 10Mbps tick is 1/32 ms,
3168 * 1ms otherwise
3169 */
3170 tick = rate > 10000 ? 2 : 7;
3171 val = FIELD_PREP(ERLCR_CIR_MASK, (rate >> 5)) |
3172 FIELD_PREP(ERLCR_EN_MASK, !!rate) |
3173 FIELD_PREP(ERLCR_EXP_MASK, tick) |
3174 ERLCR_TBF_MODE_MASK |
3175 FIELD_PREP(ERLCR_MANT_MASK, 0xf);
3176 mt7530_write(priv, MT753X_ERLCR_P(port), val);
3177 break;
3178 }
3179 default:
3180 return -EOPNOTSUPP;
3181 }
3182
3183 return 0;
3184 }
3185
mt753x_setup_tc(struct dsa_switch * ds,int port,enum tc_setup_type type,void * type_data)3186 static int mt753x_setup_tc(struct dsa_switch *ds, int port,
3187 enum tc_setup_type type, void *type_data)
3188 {
3189 switch (type) {
3190 case TC_SETUP_QDISC_TBF:
3191 return mt753x_tc_setup_qdisc_tbf(ds, port, type_data);
3192 default:
3193 return -EOPNOTSUPP;
3194 }
3195 }
3196
mt7988_setup(struct dsa_switch * ds)3197 static int mt7988_setup(struct dsa_switch *ds)
3198 {
3199 struct mt7530_priv *priv = ds->priv;
3200
3201 /* Reset the switch */
3202 reset_control_assert(priv->rstc);
3203 usleep_range(20, 50);
3204 reset_control_deassert(priv->rstc);
3205 usleep_range(20, 50);
3206
3207 /* Reset the switch PHYs */
3208 mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_PHY_RST);
3209
3210 return mt7531_setup_common(ds);
3211 }
3212
3213 const struct dsa_switch_ops mt7530_switch_ops = {
3214 .get_tag_protocol = mtk_get_tag_protocol,
3215 .setup = mt753x_setup,
3216 .preferred_default_local_cpu_port = mt753x_preferred_default_local_cpu_port,
3217 .get_strings = mt7530_get_strings,
3218 .get_ethtool_stats = mt7530_get_ethtool_stats,
3219 .get_sset_count = mt7530_get_sset_count,
3220 .set_ageing_time = mt7530_set_ageing_time,
3221 .port_enable = mt7530_port_enable,
3222 .port_disable = mt7530_port_disable,
3223 .port_change_mtu = mt7530_port_change_mtu,
3224 .port_max_mtu = mt7530_port_max_mtu,
3225 .port_stp_state_set = mt7530_stp_state_set,
3226 .port_pre_bridge_flags = mt7530_port_pre_bridge_flags,
3227 .port_bridge_flags = mt7530_port_bridge_flags,
3228 .port_bridge_join = mt7530_port_bridge_join,
3229 .port_bridge_leave = mt7530_port_bridge_leave,
3230 .port_fdb_add = mt7530_port_fdb_add,
3231 .port_fdb_del = mt7530_port_fdb_del,
3232 .port_fdb_dump = mt7530_port_fdb_dump,
3233 .port_mdb_add = mt7530_port_mdb_add,
3234 .port_mdb_del = mt7530_port_mdb_del,
3235 .port_vlan_filtering = mt7530_port_vlan_filtering,
3236 .port_vlan_add = mt7530_port_vlan_add,
3237 .port_vlan_del = mt7530_port_vlan_del,
3238 .port_mirror_add = mt753x_port_mirror_add,
3239 .port_mirror_del = mt753x_port_mirror_del,
3240 .phylink_get_caps = mt753x_phylink_get_caps,
3241 .get_mac_eee = mt753x_get_mac_eee,
3242 .set_mac_eee = mt753x_set_mac_eee,
3243 .conduit_state_change = mt753x_conduit_state_change,
3244 .port_setup_tc = mt753x_setup_tc,
3245 };
3246 EXPORT_SYMBOL_GPL(mt7530_switch_ops);
3247
3248 static const struct phylink_mac_ops mt753x_phylink_mac_ops = {
3249 .mac_select_pcs = mt753x_phylink_mac_select_pcs,
3250 .mac_config = mt753x_phylink_mac_config,
3251 .mac_link_down = mt753x_phylink_mac_link_down,
3252 .mac_link_up = mt753x_phylink_mac_link_up,
3253 };
3254
3255 const struct mt753x_info mt753x_table[] = {
3256 [ID_MT7621] = {
3257 .id = ID_MT7621,
3258 .pcs_ops = &mt7530_pcs_ops,
3259 .sw_setup = mt7530_setup,
3260 .phy_read_c22 = mt7530_phy_read_c22,
3261 .phy_write_c22 = mt7530_phy_write_c22,
3262 .phy_read_c45 = mt7530_phy_read_c45,
3263 .phy_write_c45 = mt7530_phy_write_c45,
3264 .mac_port_get_caps = mt7530_mac_port_get_caps,
3265 .mac_port_config = mt7530_mac_config,
3266 },
3267 [ID_MT7530] = {
3268 .id = ID_MT7530,
3269 .pcs_ops = &mt7530_pcs_ops,
3270 .sw_setup = mt7530_setup,
3271 .phy_read_c22 = mt7530_phy_read_c22,
3272 .phy_write_c22 = mt7530_phy_write_c22,
3273 .phy_read_c45 = mt7530_phy_read_c45,
3274 .phy_write_c45 = mt7530_phy_write_c45,
3275 .mac_port_get_caps = mt7530_mac_port_get_caps,
3276 .mac_port_config = mt7530_mac_config,
3277 },
3278 [ID_MT7531] = {
3279 .id = ID_MT7531,
3280 .pcs_ops = &mt7530_pcs_ops,
3281 .sw_setup = mt7531_setup,
3282 .phy_read_c22 = mt7531_ind_c22_phy_read,
3283 .phy_write_c22 = mt7531_ind_c22_phy_write,
3284 .phy_read_c45 = mt7531_ind_c45_phy_read,
3285 .phy_write_c45 = mt7531_ind_c45_phy_write,
3286 .mac_port_get_caps = mt7531_mac_port_get_caps,
3287 .mac_port_config = mt7531_mac_config,
3288 },
3289 [ID_MT7988] = {
3290 .id = ID_MT7988,
3291 .pcs_ops = &mt7530_pcs_ops,
3292 .sw_setup = mt7988_setup,
3293 .phy_read_c22 = mt7531_ind_c22_phy_read,
3294 .phy_write_c22 = mt7531_ind_c22_phy_write,
3295 .phy_read_c45 = mt7531_ind_c45_phy_read,
3296 .phy_write_c45 = mt7531_ind_c45_phy_write,
3297 .mac_port_get_caps = mt7988_mac_port_get_caps,
3298 },
3299 [ID_EN7581] = {
3300 .id = ID_EN7581,
3301 .pcs_ops = &mt7530_pcs_ops,
3302 .sw_setup = mt7988_setup,
3303 .phy_read_c22 = mt7531_ind_c22_phy_read,
3304 .phy_write_c22 = mt7531_ind_c22_phy_write,
3305 .phy_read_c45 = mt7531_ind_c45_phy_read,
3306 .phy_write_c45 = mt7531_ind_c45_phy_write,
3307 .mac_port_get_caps = en7581_mac_port_get_caps,
3308 },
3309 };
3310 EXPORT_SYMBOL_GPL(mt753x_table);
3311
3312 int
mt7530_probe_common(struct mt7530_priv * priv)3313 mt7530_probe_common(struct mt7530_priv *priv)
3314 {
3315 struct device *dev = priv->dev;
3316
3317 priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
3318 if (!priv->ds)
3319 return -ENOMEM;
3320
3321 priv->ds->dev = dev;
3322 priv->ds->num_ports = MT7530_NUM_PORTS;
3323
3324 /* Get the hardware identifier from the devicetree node.
3325 * We will need it for some of the clock and regulator setup.
3326 */
3327 priv->info = of_device_get_match_data(dev);
3328 if (!priv->info)
3329 return -EINVAL;
3330
3331 priv->id = priv->info->id;
3332 priv->dev = dev;
3333 priv->ds->priv = priv;
3334 priv->ds->ops = &mt7530_switch_ops;
3335 priv->ds->phylink_mac_ops = &mt753x_phylink_mac_ops;
3336 mutex_init(&priv->reg_mutex);
3337 dev_set_drvdata(dev, priv);
3338
3339 return 0;
3340 }
3341 EXPORT_SYMBOL_GPL(mt7530_probe_common);
3342
3343 void
mt7530_remove_common(struct mt7530_priv * priv)3344 mt7530_remove_common(struct mt7530_priv *priv)
3345 {
3346 if (priv->irq)
3347 mt7530_free_irq(priv);
3348
3349 dsa_unregister_switch(priv->ds);
3350
3351 mutex_destroy(&priv->reg_mutex);
3352 }
3353 EXPORT_SYMBOL_GPL(mt7530_remove_common);
3354
3355 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
3356 MODULE_DESCRIPTION("Driver for Mediatek MT7530 Switch");
3357 MODULE_LICENSE("GPL");
3358