xref: /linux/drivers/net/dsa/bcm_sf2.c (revision 0883c2c06fb5bcf5b9e008270827e63c09a88c1e)
1 /*
2  * Broadcom Starfighter 2 DSA switch driver
3  *
4  * Copyright (C) 2014, Broadcom Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11 
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/interrupt.h>
16 #include <linux/platform_device.h>
17 #include <linux/of.h>
18 #include <linux/phy.h>
19 #include <linux/phy_fixed.h>
20 #include <linux/mii.h>
21 #include <linux/of.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_address.h>
24 #include <linux/of_net.h>
25 #include <net/dsa.h>
26 #include <linux/ethtool.h>
27 #include <linux/if_bridge.h>
28 #include <linux/brcmphy.h>
29 #include <linux/etherdevice.h>
30 #include <net/switchdev.h>
31 
32 #include "bcm_sf2.h"
33 #include "bcm_sf2_regs.h"
34 
35 /* String, offset, and register size in bytes if different from 4 bytes */
36 static const struct bcm_sf2_hw_stats bcm_sf2_mib[] = {
37 	{ "TxOctets",		0x000, 8	},
38 	{ "TxDropPkts",		0x020		},
39 	{ "TxQPKTQ0",		0x030		},
40 	{ "TxBroadcastPkts",	0x040		},
41 	{ "TxMulticastPkts",	0x050		},
42 	{ "TxUnicastPKts",	0x060		},
43 	{ "TxCollisions",	0x070		},
44 	{ "TxSingleCollision",	0x080		},
45 	{ "TxMultipleCollision", 0x090		},
46 	{ "TxDeferredCollision", 0x0a0		},
47 	{ "TxLateCollision",	0x0b0		},
48 	{ "TxExcessiveCollision", 0x0c0		},
49 	{ "TxFrameInDisc",	0x0d0		},
50 	{ "TxPausePkts",	0x0e0		},
51 	{ "TxQPKTQ1",		0x0f0		},
52 	{ "TxQPKTQ2",		0x100		},
53 	{ "TxQPKTQ3",		0x110		},
54 	{ "TxQPKTQ4",		0x120		},
55 	{ "TxQPKTQ5",		0x130		},
56 	{ "RxOctets",		0x140, 8	},
57 	{ "RxUndersizePkts",	0x160		},
58 	{ "RxPausePkts",	0x170		},
59 	{ "RxPkts64Octets",	0x180		},
60 	{ "RxPkts65to127Octets", 0x190		},
61 	{ "RxPkts128to255Octets", 0x1a0		},
62 	{ "RxPkts256to511Octets", 0x1b0		},
63 	{ "RxPkts512to1023Octets", 0x1c0	},
64 	{ "RxPkts1024toMaxPktsOctets", 0x1d0	},
65 	{ "RxOversizePkts",	0x1e0		},
66 	{ "RxJabbers",		0x1f0		},
67 	{ "RxAlignmentErrors",	0x200		},
68 	{ "RxFCSErrors",	0x210		},
69 	{ "RxGoodOctets",	0x220, 8	},
70 	{ "RxDropPkts",		0x240		},
71 	{ "RxUnicastPkts",	0x250		},
72 	{ "RxMulticastPkts",	0x260		},
73 	{ "RxBroadcastPkts",	0x270		},
74 	{ "RxSAChanges",	0x280		},
75 	{ "RxFragments",	0x290		},
76 	{ "RxJumboPkt",		0x2a0		},
77 	{ "RxSymblErr",		0x2b0		},
78 	{ "InRangeErrCount",	0x2c0		},
79 	{ "OutRangeErrCount",	0x2d0		},
80 	{ "EEELpiEvent",	0x2e0		},
81 	{ "EEELpiDuration",	0x2f0		},
82 	{ "RxDiscard",		0x300, 8	},
83 	{ "TxQPKTQ6",		0x320		},
84 	{ "TxQPKTQ7",		0x330		},
85 	{ "TxPkts64Octets",	0x340		},
86 	{ "TxPkts65to127Octets", 0x350		},
87 	{ "TxPkts128to255Octets", 0x360		},
88 	{ "TxPkts256to511Ocets", 0x370		},
89 	{ "TxPkts512to1023Ocets", 0x380		},
90 	{ "TxPkts1024toMaxPktOcets", 0x390	},
91 };
92 
93 #define BCM_SF2_STATS_SIZE	ARRAY_SIZE(bcm_sf2_mib)
94 
95 static void bcm_sf2_sw_get_strings(struct dsa_switch *ds,
96 				   int port, uint8_t *data)
97 {
98 	unsigned int i;
99 
100 	for (i = 0; i < BCM_SF2_STATS_SIZE; i++)
101 		memcpy(data + i * ETH_GSTRING_LEN,
102 		       bcm_sf2_mib[i].string, ETH_GSTRING_LEN);
103 }
104 
105 static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds,
106 					 int port, uint64_t *data)
107 {
108 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
109 	const struct bcm_sf2_hw_stats *s;
110 	unsigned int i;
111 	u64 val = 0;
112 	u32 offset;
113 
114 	mutex_lock(&priv->stats_mutex);
115 
116 	/* Now fetch the per-port counters */
117 	for (i = 0; i < BCM_SF2_STATS_SIZE; i++) {
118 		s = &bcm_sf2_mib[i];
119 
120 		/* Do a latched 64-bit read if needed */
121 		offset = s->reg + CORE_P_MIB_OFFSET(port);
122 		if (s->sizeof_stat == 8)
123 			val = core_readq(priv, offset);
124 		else
125 			val = core_readl(priv, offset);
126 
127 		data[i] = (u64)val;
128 	}
129 
130 	mutex_unlock(&priv->stats_mutex);
131 }
132 
133 static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds)
134 {
135 	return BCM_SF2_STATS_SIZE;
136 }
137 
138 static const char *bcm_sf2_sw_drv_probe(struct device *dsa_dev,
139 					struct device *host_dev, int sw_addr,
140 					void **_priv)
141 {
142 	struct bcm_sf2_priv *priv;
143 
144 	priv = devm_kzalloc(dsa_dev, sizeof(*priv), GFP_KERNEL);
145 	if (!priv)
146 		return NULL;
147 	*_priv = priv;
148 
149 	return "Broadcom Starfighter 2";
150 }
151 
152 static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
153 {
154 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
155 	unsigned int i;
156 	u32 reg;
157 
158 	/* Enable the IMP Port to be in the same VLAN as the other ports
159 	 * on a per-port basis such that we only have Port i and IMP in
160 	 * the same VLAN.
161 	 */
162 	for (i = 0; i < priv->hw_params.num_ports; i++) {
163 		if (!((1 << i) & ds->enabled_port_mask))
164 			continue;
165 
166 		reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
167 		reg |= (1 << cpu_port);
168 		core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
169 	}
170 }
171 
172 static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
173 {
174 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
175 	u32 reg, val;
176 
177 	/* Enable the port memories */
178 	reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
179 	reg &= ~P_TXQ_PSM_VDD(port);
180 	core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
181 
182 	/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
183 	reg = core_readl(priv, CORE_IMP_CTL);
184 	reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
185 	reg &= ~(RX_DIS | TX_DIS);
186 	core_writel(priv, reg, CORE_IMP_CTL);
187 
188 	/* Enable forwarding */
189 	core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
190 
191 	/* Enable IMP port in dumb mode */
192 	reg = core_readl(priv, CORE_SWITCH_CTRL);
193 	reg |= MII_DUMB_FWDG_EN;
194 	core_writel(priv, reg, CORE_SWITCH_CTRL);
195 
196 	/* Resolve which bit controls the Broadcom tag */
197 	switch (port) {
198 	case 8:
199 		val = BRCM_HDR_EN_P8;
200 		break;
201 	case 7:
202 		val = BRCM_HDR_EN_P7;
203 		break;
204 	case 5:
205 		val = BRCM_HDR_EN_P5;
206 		break;
207 	default:
208 		val = 0;
209 		break;
210 	}
211 
212 	/* Enable Broadcom tags for IMP port */
213 	reg = core_readl(priv, CORE_BRCM_HDR_CTRL);
214 	reg |= val;
215 	core_writel(priv, reg, CORE_BRCM_HDR_CTRL);
216 
217 	/* Enable reception Broadcom tag for CPU TX (switch RX) to
218 	 * allow us to tag outgoing frames
219 	 */
220 	reg = core_readl(priv, CORE_BRCM_HDR_RX_DIS);
221 	reg &= ~(1 << port);
222 	core_writel(priv, reg, CORE_BRCM_HDR_RX_DIS);
223 
224 	/* Enable transmission of Broadcom tags from the switch (CPU RX) to
225 	 * allow delivering frames to the per-port net_devices
226 	 */
227 	reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS);
228 	reg &= ~(1 << port);
229 	core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS);
230 
231 	/* Force link status for IMP port */
232 	reg = core_readl(priv, CORE_STS_OVERRIDE_IMP);
233 	reg |= (MII_SW_OR | LINK_STS);
234 	core_writel(priv, reg, CORE_STS_OVERRIDE_IMP);
235 }
236 
237 static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
238 {
239 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
240 	u32 reg;
241 
242 	reg = core_readl(priv, CORE_EEE_EN_CTRL);
243 	if (enable)
244 		reg |= 1 << port;
245 	else
246 		reg &= ~(1 << port);
247 	core_writel(priv, reg, CORE_EEE_EN_CTRL);
248 }
249 
250 static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
251 {
252 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
253 	u32 reg;
254 
255 	reg = reg_readl(priv, REG_SPHY_CNTRL);
256 	if (enable) {
257 		reg |= PHY_RESET;
258 		reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS);
259 		reg_writel(priv, reg, REG_SPHY_CNTRL);
260 		udelay(21);
261 		reg = reg_readl(priv, REG_SPHY_CNTRL);
262 		reg &= ~PHY_RESET;
263 	} else {
264 		reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET;
265 		reg_writel(priv, reg, REG_SPHY_CNTRL);
266 		mdelay(1);
267 		reg |= CK25_DIS;
268 	}
269 	reg_writel(priv, reg, REG_SPHY_CNTRL);
270 
271 	/* Use PHY-driven LED signaling */
272 	if (!enable) {
273 		reg = reg_readl(priv, REG_LED_CNTRL(0));
274 		reg |= SPDLNK_SRC_SEL;
275 		reg_writel(priv, reg, REG_LED_CNTRL(0));
276 	}
277 }
278 
279 static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv,
280 					    int port)
281 {
282 	unsigned int off;
283 
284 	switch (port) {
285 	case 7:
286 		off = P7_IRQ_OFF;
287 		break;
288 	case 0:
289 		/* Port 0 interrupts are located on the first bank */
290 		intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF));
291 		return;
292 	default:
293 		off = P_IRQ_OFF(port);
294 		break;
295 	}
296 
297 	intrl2_1_mask_clear(priv, P_IRQ_MASK(off));
298 }
299 
300 static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv,
301 					     int port)
302 {
303 	unsigned int off;
304 
305 	switch (port) {
306 	case 7:
307 		off = P7_IRQ_OFF;
308 		break;
309 	case 0:
310 		/* Port 0 interrupts are located on the first bank */
311 		intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF));
312 		intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR);
313 		return;
314 	default:
315 		off = P_IRQ_OFF(port);
316 		break;
317 	}
318 
319 	intrl2_1_mask_set(priv, P_IRQ_MASK(off));
320 	intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR);
321 }
322 
323 static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
324 			      struct phy_device *phy)
325 {
326 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
327 	s8 cpu_port = ds->dst[ds->index].cpu_port;
328 	u32 reg;
329 
330 	/* Clear the memory power down */
331 	reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
332 	reg &= ~P_TXQ_PSM_VDD(port);
333 	core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
334 
335 	/* Clear the Rx and Tx disable bits and set to no spanning tree */
336 	core_writel(priv, 0, CORE_G_PCTL_PORT(port));
337 
338 	/* Re-enable the GPHY and re-apply workarounds */
339 	if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) {
340 		bcm_sf2_gphy_enable_set(ds, true);
341 		if (phy) {
342 			/* if phy_stop() has been called before, phy
343 			 * will be in halted state, and phy_start()
344 			 * will call resume.
345 			 *
346 			 * the resume path does not configure back
347 			 * autoneg settings, and since we hard reset
348 			 * the phy manually here, we need to reset the
349 			 * state machine also.
350 			 */
351 			phy->state = PHY_READY;
352 			phy_init_hw(phy);
353 		}
354 	}
355 
356 	/* Enable MoCA port interrupts to get notified */
357 	if (port == priv->moca_port)
358 		bcm_sf2_port_intr_enable(priv, port);
359 
360 	/* Set this port, and only this one to be in the default VLAN,
361 	 * if member of a bridge, restore its membership prior to
362 	 * bringing down this port.
363 	 */
364 	reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
365 	reg &= ~PORT_VLAN_CTRL_MASK;
366 	reg |= (1 << port);
367 	reg |= priv->port_sts[port].vlan_ctl_mask;
368 	core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port));
369 
370 	bcm_sf2_imp_vlan_setup(ds, cpu_port);
371 
372 	/* If EEE was enabled, restore it */
373 	if (priv->port_sts[port].eee.eee_enabled)
374 		bcm_sf2_eee_enable_set(ds, port, true);
375 
376 	return 0;
377 }
378 
379 static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
380 				 struct phy_device *phy)
381 {
382 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
383 	u32 off, reg;
384 
385 	if (priv->wol_ports_mask & (1 << port))
386 		return;
387 
388 	if (port == priv->moca_port)
389 		bcm_sf2_port_intr_disable(priv, port);
390 
391 	if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
392 		bcm_sf2_gphy_enable_set(ds, false);
393 
394 	if (dsa_is_cpu_port(ds, port))
395 		off = CORE_IMP_CTL;
396 	else
397 		off = CORE_G_PCTL_PORT(port);
398 
399 	reg = core_readl(priv, off);
400 	reg |= RX_DIS | TX_DIS;
401 	core_writel(priv, reg, off);
402 
403 	/* Power down the port memory */
404 	reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
405 	reg |= P_TXQ_PSM_VDD(port);
406 	core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
407 }
408 
409 /* Returns 0 if EEE was not enabled, or 1 otherwise
410  */
411 static int bcm_sf2_eee_init(struct dsa_switch *ds, int port,
412 			    struct phy_device *phy)
413 {
414 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
415 	struct ethtool_eee *p = &priv->port_sts[port].eee;
416 	int ret;
417 
418 	p->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full);
419 
420 	ret = phy_init_eee(phy, 0);
421 	if (ret)
422 		return 0;
423 
424 	bcm_sf2_eee_enable_set(ds, port, true);
425 
426 	return 1;
427 }
428 
429 static int bcm_sf2_sw_get_eee(struct dsa_switch *ds, int port,
430 			      struct ethtool_eee *e)
431 {
432 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
433 	struct ethtool_eee *p = &priv->port_sts[port].eee;
434 	u32 reg;
435 
436 	reg = core_readl(priv, CORE_EEE_LPI_INDICATE);
437 	e->eee_enabled = p->eee_enabled;
438 	e->eee_active = !!(reg & (1 << port));
439 
440 	return 0;
441 }
442 
443 static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port,
444 			      struct phy_device *phydev,
445 			      struct ethtool_eee *e)
446 {
447 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
448 	struct ethtool_eee *p = &priv->port_sts[port].eee;
449 
450 	p->eee_enabled = e->eee_enabled;
451 
452 	if (!p->eee_enabled) {
453 		bcm_sf2_eee_enable_set(ds, port, false);
454 	} else {
455 		p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev);
456 		if (!p->eee_enabled)
457 			return -EOPNOTSUPP;
458 	}
459 
460 	return 0;
461 }
462 
463 /* Fast-ageing of ARL entries for a given port, equivalent to an ARL
464  * flush for that port.
465  */
466 static int bcm_sf2_sw_fast_age_port(struct dsa_switch  *ds, int port)
467 {
468 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
469 	unsigned int timeout = 1000;
470 	u32 reg;
471 
472 	core_writel(priv, port, CORE_FAST_AGE_PORT);
473 
474 	reg = core_readl(priv, CORE_FAST_AGE_CTRL);
475 	reg |= EN_AGE_PORT | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE;
476 	core_writel(priv, reg, CORE_FAST_AGE_CTRL);
477 
478 	do {
479 		reg = core_readl(priv, CORE_FAST_AGE_CTRL);
480 		if (!(reg & FAST_AGE_STR_DONE))
481 			break;
482 
483 		cpu_relax();
484 	} while (timeout--);
485 
486 	if (!timeout)
487 		return -ETIMEDOUT;
488 
489 	core_writel(priv, 0, CORE_FAST_AGE_CTRL);
490 
491 	return 0;
492 }
493 
494 static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port,
495 			      struct net_device *bridge)
496 {
497 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
498 	unsigned int i;
499 	u32 reg, p_ctl;
500 
501 	priv->port_sts[port].bridge_dev = bridge;
502 	p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
503 
504 	for (i = 0; i < priv->hw_params.num_ports; i++) {
505 		if (priv->port_sts[i].bridge_dev != bridge)
506 			continue;
507 
508 		/* Add this local port to the remote port VLAN control
509 		 * membership and update the remote port bitmask
510 		 */
511 		reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
512 		reg |= 1 << port;
513 		core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
514 		priv->port_sts[i].vlan_ctl_mask = reg;
515 
516 		p_ctl |= 1 << i;
517 	}
518 
519 	/* Configure the local port VLAN control membership to include
520 	 * remote ports and update the local port bitmask
521 	 */
522 	core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
523 	priv->port_sts[port].vlan_ctl_mask = p_ctl;
524 
525 	return 0;
526 }
527 
528 static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port)
529 {
530 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
531 	struct net_device *bridge = priv->port_sts[port].bridge_dev;
532 	unsigned int i;
533 	u32 reg, p_ctl;
534 
535 	p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
536 
537 	for (i = 0; i < priv->hw_params.num_ports; i++) {
538 		/* Don't touch the remaining ports */
539 		if (priv->port_sts[i].bridge_dev != bridge)
540 			continue;
541 
542 		reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
543 		reg &= ~(1 << port);
544 		core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
545 		priv->port_sts[port].vlan_ctl_mask = reg;
546 
547 		/* Prevent self removal to preserve isolation */
548 		if (port != i)
549 			p_ctl &= ~(1 << i);
550 	}
551 
552 	core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
553 	priv->port_sts[port].vlan_ctl_mask = p_ctl;
554 	priv->port_sts[port].bridge_dev = NULL;
555 }
556 
557 static void bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
558 					u8 state)
559 {
560 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
561 	u8 hw_state, cur_hw_state;
562 	u32 reg;
563 
564 	reg = core_readl(priv, CORE_G_PCTL_PORT(port));
565 	cur_hw_state = reg & (G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
566 
567 	switch (state) {
568 	case BR_STATE_DISABLED:
569 		hw_state = G_MISTP_DIS_STATE;
570 		break;
571 	case BR_STATE_LISTENING:
572 		hw_state = G_MISTP_LISTEN_STATE;
573 		break;
574 	case BR_STATE_LEARNING:
575 		hw_state = G_MISTP_LEARN_STATE;
576 		break;
577 	case BR_STATE_FORWARDING:
578 		hw_state = G_MISTP_FWD_STATE;
579 		break;
580 	case BR_STATE_BLOCKING:
581 		hw_state = G_MISTP_BLOCK_STATE;
582 		break;
583 	default:
584 		pr_err("%s: invalid STP state: %d\n", __func__, state);
585 		return;
586 	}
587 
588 	/* Fast-age ARL entries if we are moving a port from Learning or
589 	 * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening
590 	 * state (hw_state)
591 	 */
592 	if (cur_hw_state != hw_state) {
593 		if (cur_hw_state >= G_MISTP_LEARN_STATE &&
594 		    hw_state <= G_MISTP_LISTEN_STATE) {
595 			if (bcm_sf2_sw_fast_age_port(ds, port)) {
596 				pr_err("%s: fast-ageing failed\n", __func__);
597 				return;
598 			}
599 		}
600 	}
601 
602 	reg = core_readl(priv, CORE_G_PCTL_PORT(port));
603 	reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
604 	reg |= hw_state;
605 	core_writel(priv, reg, CORE_G_PCTL_PORT(port));
606 }
607 
608 /* Address Resolution Logic routines */
609 static int bcm_sf2_arl_op_wait(struct bcm_sf2_priv *priv)
610 {
611 	unsigned int timeout = 10;
612 	u32 reg;
613 
614 	do {
615 		reg = core_readl(priv, CORE_ARLA_RWCTL);
616 		if (!(reg & ARL_STRTDN))
617 			return 0;
618 
619 		usleep_range(1000, 2000);
620 	} while (timeout--);
621 
622 	return -ETIMEDOUT;
623 }
624 
625 static int bcm_sf2_arl_rw_op(struct bcm_sf2_priv *priv, unsigned int op)
626 {
627 	u32 cmd;
628 
629 	if (op > ARL_RW)
630 		return -EINVAL;
631 
632 	cmd = core_readl(priv, CORE_ARLA_RWCTL);
633 	cmd &= ~IVL_SVL_SELECT;
634 	cmd |= ARL_STRTDN;
635 	if (op)
636 		cmd |= ARL_RW;
637 	else
638 		cmd &= ~ARL_RW;
639 	core_writel(priv, cmd, CORE_ARLA_RWCTL);
640 
641 	return bcm_sf2_arl_op_wait(priv);
642 }
643 
644 static int bcm_sf2_arl_read(struct bcm_sf2_priv *priv, u64 mac,
645 			    u16 vid, struct bcm_sf2_arl_entry *ent, u8 *idx,
646 			    bool is_valid)
647 {
648 	unsigned int i;
649 	int ret;
650 
651 	ret = bcm_sf2_arl_op_wait(priv);
652 	if (ret)
653 		return ret;
654 
655 	/* Read the 4 bins */
656 	for (i = 0; i < 4; i++) {
657 		u64 mac_vid;
658 		u32 fwd_entry;
659 
660 		mac_vid = core_readq(priv, CORE_ARLA_MACVID_ENTRY(i));
661 		fwd_entry = core_readl(priv, CORE_ARLA_FWD_ENTRY(i));
662 		bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry);
663 
664 		if (ent->is_valid && is_valid) {
665 			*idx = i;
666 			return 0;
667 		}
668 
669 		/* This is the MAC we just deleted */
670 		if (!is_valid && (mac_vid & mac))
671 			return 0;
672 	}
673 
674 	return -ENOENT;
675 }
676 
677 static int bcm_sf2_arl_op(struct bcm_sf2_priv *priv, int op, int port,
678 			  const unsigned char *addr, u16 vid, bool is_valid)
679 {
680 	struct bcm_sf2_arl_entry ent;
681 	u32 fwd_entry;
682 	u64 mac, mac_vid = 0;
683 	u8 idx = 0;
684 	int ret;
685 
686 	/* Convert the array into a 64-bit MAC */
687 	mac = bcm_sf2_mac_to_u64(addr);
688 
689 	/* Perform a read for the given MAC and VID */
690 	core_writeq(priv, mac, CORE_ARLA_MAC);
691 	core_writel(priv, vid, CORE_ARLA_VID);
692 
693 	/* Issue a read operation for this MAC */
694 	ret = bcm_sf2_arl_rw_op(priv, 1);
695 	if (ret)
696 		return ret;
697 
698 	ret = bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid);
699 	/* If this is a read, just finish now */
700 	if (op)
701 		return ret;
702 
703 	/* We could not find a matching MAC, so reset to a new entry */
704 	if (ret) {
705 		fwd_entry = 0;
706 		idx = 0;
707 	}
708 
709 	memset(&ent, 0, sizeof(ent));
710 	ent.port = port;
711 	ent.is_valid = is_valid;
712 	ent.vid = vid;
713 	ent.is_static = true;
714 	memcpy(ent.mac, addr, ETH_ALEN);
715 	bcm_sf2_arl_from_entry(&mac_vid, &fwd_entry, &ent);
716 
717 	core_writeq(priv, mac_vid, CORE_ARLA_MACVID_ENTRY(idx));
718 	core_writel(priv, fwd_entry, CORE_ARLA_FWD_ENTRY(idx));
719 
720 	ret = bcm_sf2_arl_rw_op(priv, 0);
721 	if (ret)
722 		return ret;
723 
724 	/* Re-read the entry to check */
725 	return bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid);
726 }
727 
728 static int bcm_sf2_sw_fdb_prepare(struct dsa_switch *ds, int port,
729 				  const struct switchdev_obj_port_fdb *fdb,
730 				  struct switchdev_trans *trans)
731 {
732 	/* We do not need to do anything specific here yet */
733 	return 0;
734 }
735 
736 static void bcm_sf2_sw_fdb_add(struct dsa_switch *ds, int port,
737 			       const struct switchdev_obj_port_fdb *fdb,
738 			       struct switchdev_trans *trans)
739 {
740 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
741 
742 	if (bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, true))
743 		pr_err("%s: failed to add MAC address\n", __func__);
744 }
745 
746 static int bcm_sf2_sw_fdb_del(struct dsa_switch *ds, int port,
747 			      const struct switchdev_obj_port_fdb *fdb)
748 {
749 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
750 
751 	return bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, false);
752 }
753 
754 static int bcm_sf2_arl_search_wait(struct bcm_sf2_priv *priv)
755 {
756 	unsigned timeout = 1000;
757 	u32 reg;
758 
759 	do {
760 		reg = core_readl(priv, CORE_ARLA_SRCH_CTL);
761 		if (!(reg & ARLA_SRCH_STDN))
762 			return 0;
763 
764 		if (reg & ARLA_SRCH_VLID)
765 			return 0;
766 
767 		usleep_range(1000, 2000);
768 	} while (timeout--);
769 
770 	return -ETIMEDOUT;
771 }
772 
773 static void bcm_sf2_arl_search_rd(struct bcm_sf2_priv *priv, u8 idx,
774 				  struct bcm_sf2_arl_entry *ent)
775 {
776 	u64 mac_vid;
777 	u32 fwd_entry;
778 
779 	mac_vid = core_readq(priv, CORE_ARLA_SRCH_RSLT_MACVID(idx));
780 	fwd_entry = core_readl(priv, CORE_ARLA_SRCH_RSLT(idx));
781 	bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry);
782 }
783 
784 static int bcm_sf2_sw_fdb_copy(struct net_device *dev, int port,
785 			       const struct bcm_sf2_arl_entry *ent,
786 			       struct switchdev_obj_port_fdb *fdb,
787 			       int (*cb)(struct switchdev_obj *obj))
788 {
789 	if (!ent->is_valid)
790 		return 0;
791 
792 	if (port != ent->port)
793 		return 0;
794 
795 	ether_addr_copy(fdb->addr, ent->mac);
796 	fdb->vid = ent->vid;
797 	fdb->ndm_state = ent->is_static ? NUD_NOARP : NUD_REACHABLE;
798 
799 	return cb(&fdb->obj);
800 }
801 
802 static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port,
803 			       struct switchdev_obj_port_fdb *fdb,
804 			       int (*cb)(struct switchdev_obj *obj))
805 {
806 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
807 	struct net_device *dev = ds->ports[port];
808 	struct bcm_sf2_arl_entry results[2];
809 	unsigned int count = 0;
810 	int ret;
811 
812 	/* Start search operation */
813 	core_writel(priv, ARLA_SRCH_STDN, CORE_ARLA_SRCH_CTL);
814 
815 	do {
816 		ret = bcm_sf2_arl_search_wait(priv);
817 		if (ret)
818 			return ret;
819 
820 		/* Read both entries, then return their values back */
821 		bcm_sf2_arl_search_rd(priv, 0, &results[0]);
822 		ret = bcm_sf2_sw_fdb_copy(dev, port, &results[0], fdb, cb);
823 		if (ret)
824 			return ret;
825 
826 		bcm_sf2_arl_search_rd(priv, 1, &results[1]);
827 		ret = bcm_sf2_sw_fdb_copy(dev, port, &results[1], fdb, cb);
828 		if (ret)
829 			return ret;
830 
831 		if (!results[0].is_valid && !results[1].is_valid)
832 			break;
833 
834 	} while (count++ < CORE_ARLA_NUM_ENTRIES);
835 
836 	return 0;
837 }
838 
839 static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
840 {
841 	struct bcm_sf2_priv *priv = dev_id;
842 
843 	priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
844 				~priv->irq0_mask;
845 	intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
846 
847 	return IRQ_HANDLED;
848 }
849 
850 static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
851 {
852 	struct bcm_sf2_priv *priv = dev_id;
853 
854 	priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
855 				~priv->irq1_mask;
856 	intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
857 
858 	if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF))
859 		priv->port_sts[7].link = 1;
860 	if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF))
861 		priv->port_sts[7].link = 0;
862 
863 	return IRQ_HANDLED;
864 }
865 
866 static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
867 {
868 	unsigned int timeout = 1000;
869 	u32 reg;
870 
871 	reg = core_readl(priv, CORE_WATCHDOG_CTRL);
872 	reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
873 	core_writel(priv, reg, CORE_WATCHDOG_CTRL);
874 
875 	do {
876 		reg = core_readl(priv, CORE_WATCHDOG_CTRL);
877 		if (!(reg & SOFTWARE_RESET))
878 			break;
879 
880 		usleep_range(1000, 2000);
881 	} while (timeout-- > 0);
882 
883 	if (timeout == 0)
884 		return -ETIMEDOUT;
885 
886 	return 0;
887 }
888 
889 static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
890 {
891 	intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
892 	intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
893 	intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
894 	intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
895 	intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
896 	intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
897 }
898 
899 static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
900 				   struct device_node *dn)
901 {
902 	struct device_node *port;
903 	const char *phy_mode_str;
904 	int mode;
905 	unsigned int port_num;
906 	int ret;
907 
908 	priv->moca_port = -1;
909 
910 	for_each_available_child_of_node(dn, port) {
911 		if (of_property_read_u32(port, "reg", &port_num))
912 			continue;
913 
914 		/* Internal PHYs get assigned a specific 'phy-mode' property
915 		 * value: "internal" to help flag them before MDIO probing
916 		 * has completed, since they might be turned off at that
917 		 * time
918 		 */
919 		mode = of_get_phy_mode(port);
920 		if (mode < 0) {
921 			ret = of_property_read_string(port, "phy-mode",
922 						      &phy_mode_str);
923 			if (ret < 0)
924 				continue;
925 
926 			if (!strcasecmp(phy_mode_str, "internal"))
927 				priv->int_phy_mask |= 1 << port_num;
928 		}
929 
930 		if (mode == PHY_INTERFACE_MODE_MOCA)
931 			priv->moca_port = port_num;
932 	}
933 }
934 
935 static int bcm_sf2_sw_setup(struct dsa_switch *ds)
936 {
937 	const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
938 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
939 	struct device_node *dn;
940 	void __iomem **base;
941 	unsigned int port;
942 	unsigned int i;
943 	u32 reg, rev;
944 	int ret;
945 
946 	spin_lock_init(&priv->indir_lock);
947 	mutex_init(&priv->stats_mutex);
948 
949 	/* All the interesting properties are at the parent device_node
950 	 * level
951 	 */
952 	dn = ds->cd->of_node->parent;
953 	bcm_sf2_identify_ports(priv, ds->cd->of_node);
954 
955 	priv->irq0 = irq_of_parse_and_map(dn, 0);
956 	priv->irq1 = irq_of_parse_and_map(dn, 1);
957 
958 	base = &priv->core;
959 	for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
960 		*base = of_iomap(dn, i);
961 		if (*base == NULL) {
962 			pr_err("unable to find register: %s\n", reg_names[i]);
963 			ret = -ENOMEM;
964 			goto out_unmap;
965 		}
966 		base++;
967 	}
968 
969 	ret = bcm_sf2_sw_rst(priv);
970 	if (ret) {
971 		pr_err("unable to software reset switch: %d\n", ret);
972 		goto out_unmap;
973 	}
974 
975 	/* Disable all interrupts and request them */
976 	bcm_sf2_intr_disable(priv);
977 
978 	ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0,
979 			  "switch_0", priv);
980 	if (ret < 0) {
981 		pr_err("failed to request switch_0 IRQ\n");
982 		goto out_unmap;
983 	}
984 
985 	ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0,
986 			  "switch_1", priv);
987 	if (ret < 0) {
988 		pr_err("failed to request switch_1 IRQ\n");
989 		goto out_free_irq0;
990 	}
991 
992 	/* Reset the MIB counters */
993 	reg = core_readl(priv, CORE_GMNCFGCFG);
994 	reg |= RST_MIB_CNT;
995 	core_writel(priv, reg, CORE_GMNCFGCFG);
996 	reg &= ~RST_MIB_CNT;
997 	core_writel(priv, reg, CORE_GMNCFGCFG);
998 
999 	/* Get the maximum number of ports for this switch */
1000 	priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
1001 	if (priv->hw_params.num_ports > DSA_MAX_PORTS)
1002 		priv->hw_params.num_ports = DSA_MAX_PORTS;
1003 
1004 	/* Assume a single GPHY setup if we can't read that property */
1005 	if (of_property_read_u32(dn, "brcm,num-gphy",
1006 				 &priv->hw_params.num_gphy))
1007 		priv->hw_params.num_gphy = 1;
1008 
1009 	/* Enable all valid ports and disable those unused */
1010 	for (port = 0; port < priv->hw_params.num_ports; port++) {
1011 		/* IMP port receives special treatment */
1012 		if ((1 << port) & ds->enabled_port_mask)
1013 			bcm_sf2_port_setup(ds, port, NULL);
1014 		else if (dsa_is_cpu_port(ds, port))
1015 			bcm_sf2_imp_setup(ds, port);
1016 		else
1017 			bcm_sf2_port_disable(ds, port, NULL);
1018 	}
1019 
1020 	/* Include the pseudo-PHY address and the broadcast PHY address to
1021 	 * divert reads towards our workaround. This is only required for
1022 	 * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such
1023 	 * that we can use the regular SWITCH_MDIO master controller instead.
1024 	 *
1025 	 * By default, DSA initializes ds->phys_mii_mask to
1026 	 * ds->enabled_port_mask to have a 1:1 mapping between Port address
1027 	 * and PHY address in order to utilize the slave_mii_bus instance to
1028 	 * read from Port PHYs. This is not what we want here, so we
1029 	 * initialize phys_mii_mask 0 to always utilize the "master" MDIO
1030 	 * bus backed by the "mdio-unimac" driver.
1031 	 */
1032 	if (of_machine_is_compatible("brcm,bcm7445d0"))
1033 		ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
1034 	else
1035 		ds->phys_mii_mask = 0;
1036 
1037 	rev = reg_readl(priv, REG_SWITCH_REVISION);
1038 	priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
1039 					SWITCH_TOP_REV_MASK;
1040 	priv->hw_params.core_rev = (rev & SF2_REV_MASK);
1041 
1042 	rev = reg_readl(priv, REG_PHY_REVISION);
1043 	priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
1044 
1045 	pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
1046 		priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
1047 		priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
1048 		priv->core, priv->irq0, priv->irq1);
1049 
1050 	return 0;
1051 
1052 out_free_irq0:
1053 	free_irq(priv->irq0, priv);
1054 out_unmap:
1055 	base = &priv->core;
1056 	for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
1057 		if (*base)
1058 			iounmap(*base);
1059 		base++;
1060 	}
1061 	return ret;
1062 }
1063 
1064 static int bcm_sf2_sw_set_addr(struct dsa_switch *ds, u8 *addr)
1065 {
1066 	return 0;
1067 }
1068 
1069 static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
1070 {
1071 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
1072 
1073 	/* The BCM7xxx PHY driver expects to find the integrated PHY revision
1074 	 * in bits 15:8 and the patch level in bits 7:0 which is exactly what
1075 	 * the REG_PHY_REVISION register layout is.
1076 	 */
1077 
1078 	return priv->hw_params.gphy_rev;
1079 }
1080 
1081 static int bcm_sf2_sw_indir_rw(struct dsa_switch *ds, int op, int addr,
1082 			       int regnum, u16 val)
1083 {
1084 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
1085 	int ret = 0;
1086 	u32 reg;
1087 
1088 	reg = reg_readl(priv, REG_SWITCH_CNTRL);
1089 	reg |= MDIO_MASTER_SEL;
1090 	reg_writel(priv, reg, REG_SWITCH_CNTRL);
1091 
1092 	/* Page << 8 | offset */
1093 	reg = 0x70;
1094 	reg <<= 2;
1095 	core_writel(priv, addr, reg);
1096 
1097 	/* Page << 8 | offset */
1098 	reg = 0x80 << 8 | regnum << 1;
1099 	reg <<= 2;
1100 
1101 	if (op)
1102 		ret = core_readl(priv, reg);
1103 	else
1104 		core_writel(priv, val, reg);
1105 
1106 	reg = reg_readl(priv, REG_SWITCH_CNTRL);
1107 	reg &= ~MDIO_MASTER_SEL;
1108 	reg_writel(priv, reg, REG_SWITCH_CNTRL);
1109 
1110 	return ret & 0xffff;
1111 }
1112 
1113 static int bcm_sf2_sw_phy_read(struct dsa_switch *ds, int addr, int regnum)
1114 {
1115 	/* Intercept reads from the MDIO broadcast address or Broadcom
1116 	 * pseudo-PHY address
1117 	 */
1118 	switch (addr) {
1119 	case 0:
1120 	case BRCM_PSEUDO_PHY_ADDR:
1121 		return bcm_sf2_sw_indir_rw(ds, 1, addr, regnum, 0);
1122 	default:
1123 		return 0xffff;
1124 	}
1125 }
1126 
1127 static int bcm_sf2_sw_phy_write(struct dsa_switch *ds, int addr, int regnum,
1128 				u16 val)
1129 {
1130 	/* Intercept writes to the MDIO broadcast address or Broadcom
1131 	 * pseudo-PHY address
1132 	 */
1133 	switch (addr) {
1134 	case 0:
1135 	case BRCM_PSEUDO_PHY_ADDR:
1136 		bcm_sf2_sw_indir_rw(ds, 0, addr, regnum, val);
1137 		break;
1138 	}
1139 
1140 	return 0;
1141 }
1142 
1143 static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
1144 				   struct phy_device *phydev)
1145 {
1146 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
1147 	u32 id_mode_dis = 0, port_mode;
1148 	const char *str = NULL;
1149 	u32 reg;
1150 
1151 	switch (phydev->interface) {
1152 	case PHY_INTERFACE_MODE_RGMII:
1153 		str = "RGMII (no delay)";
1154 		id_mode_dis = 1;
1155 	case PHY_INTERFACE_MODE_RGMII_TXID:
1156 		if (!str)
1157 			str = "RGMII (TX delay)";
1158 		port_mode = EXT_GPHY;
1159 		break;
1160 	case PHY_INTERFACE_MODE_MII:
1161 		str = "MII";
1162 		port_mode = EXT_EPHY;
1163 		break;
1164 	case PHY_INTERFACE_MODE_REVMII:
1165 		str = "Reverse MII";
1166 		port_mode = EXT_REVMII;
1167 		break;
1168 	default:
1169 		/* All other PHYs: internal and MoCA */
1170 		goto force_link;
1171 	}
1172 
1173 	/* If the link is down, just disable the interface to conserve power */
1174 	if (!phydev->link) {
1175 		reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
1176 		reg &= ~RGMII_MODE_EN;
1177 		reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
1178 		goto force_link;
1179 	}
1180 
1181 	/* Clear id_mode_dis bit, and the existing port mode, but
1182 	 * make sure we enable the RGMII block for data to pass
1183 	 */
1184 	reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
1185 	reg &= ~ID_MODE_DIS;
1186 	reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
1187 	reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
1188 
1189 	reg |= port_mode | RGMII_MODE_EN;
1190 	if (id_mode_dis)
1191 		reg |= ID_MODE_DIS;
1192 
1193 	if (phydev->pause) {
1194 		if (phydev->asym_pause)
1195 			reg |= TX_PAUSE_EN;
1196 		reg |= RX_PAUSE_EN;
1197 	}
1198 
1199 	reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
1200 
1201 	pr_info("Port %d configured for %s\n", port, str);
1202 
1203 force_link:
1204 	/* Force link settings detected from the PHY */
1205 	reg = SW_OVERRIDE;
1206 	switch (phydev->speed) {
1207 	case SPEED_1000:
1208 		reg |= SPDSTS_1000 << SPEED_SHIFT;
1209 		break;
1210 	case SPEED_100:
1211 		reg |= SPDSTS_100 << SPEED_SHIFT;
1212 		break;
1213 	}
1214 
1215 	if (phydev->link)
1216 		reg |= LINK_STS;
1217 	if (phydev->duplex == DUPLEX_FULL)
1218 		reg |= DUPLX_MODE;
1219 
1220 	core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
1221 }
1222 
1223 static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
1224 					 struct fixed_phy_status *status)
1225 {
1226 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
1227 	u32 duplex, pause;
1228 	u32 reg;
1229 
1230 	duplex = core_readl(priv, CORE_DUPSTS);
1231 	pause = core_readl(priv, CORE_PAUSESTS);
1232 
1233 	status->link = 0;
1234 
1235 	/* MoCA port is special as we do not get link status from CORE_LNKSTS,
1236 	 * which means that we need to force the link at the port override
1237 	 * level to get the data to flow. We do use what the interrupt handler
1238 	 * did determine before.
1239 	 *
1240 	 * For the other ports, we just force the link status, since this is
1241 	 * a fixed PHY device.
1242 	 */
1243 	if (port == priv->moca_port) {
1244 		status->link = priv->port_sts[port].link;
1245 		/* For MoCA interfaces, also force a link down notification
1246 		 * since some version of the user-space daemon (mocad) use
1247 		 * cmd->autoneg to force the link, which messes up the PHY
1248 		 * state machine and make it go in PHY_FORCING state instead.
1249 		 */
1250 		if (!status->link)
1251 			netif_carrier_off(ds->ports[port]);
1252 		status->duplex = 1;
1253 	} else {
1254 		status->link = 1;
1255 		status->duplex = !!(duplex & (1 << port));
1256 	}
1257 
1258 	reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(port));
1259 	reg |= SW_OVERRIDE;
1260 	if (status->link)
1261 		reg |= LINK_STS;
1262 	else
1263 		reg &= ~LINK_STS;
1264 	core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
1265 
1266 	if ((pause & (1 << port)) &&
1267 	    (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
1268 		status->asym_pause = 1;
1269 		status->pause = 1;
1270 	}
1271 
1272 	if (pause & (1 << port))
1273 		status->pause = 1;
1274 }
1275 
1276 static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
1277 {
1278 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
1279 	unsigned int port;
1280 
1281 	bcm_sf2_intr_disable(priv);
1282 
1283 	/* Disable all ports physically present including the IMP
1284 	 * port, the other ones have already been disabled during
1285 	 * bcm_sf2_sw_setup
1286 	 */
1287 	for (port = 0; port < DSA_MAX_PORTS; port++) {
1288 		if ((1 << port) & ds->enabled_port_mask ||
1289 		    dsa_is_cpu_port(ds, port))
1290 			bcm_sf2_port_disable(ds, port, NULL);
1291 	}
1292 
1293 	return 0;
1294 }
1295 
1296 static int bcm_sf2_sw_resume(struct dsa_switch *ds)
1297 {
1298 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
1299 	unsigned int port;
1300 	int ret;
1301 
1302 	ret = bcm_sf2_sw_rst(priv);
1303 	if (ret) {
1304 		pr_err("%s: failed to software reset switch\n", __func__);
1305 		return ret;
1306 	}
1307 
1308 	if (priv->hw_params.num_gphy == 1)
1309 		bcm_sf2_gphy_enable_set(ds, true);
1310 
1311 	for (port = 0; port < DSA_MAX_PORTS; port++) {
1312 		if ((1 << port) & ds->enabled_port_mask)
1313 			bcm_sf2_port_setup(ds, port, NULL);
1314 		else if (dsa_is_cpu_port(ds, port))
1315 			bcm_sf2_imp_setup(ds, port);
1316 	}
1317 
1318 	return 0;
1319 }
1320 
1321 static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
1322 			       struct ethtool_wolinfo *wol)
1323 {
1324 	struct net_device *p = ds->dst[ds->index].master_netdev;
1325 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
1326 	struct ethtool_wolinfo pwol;
1327 
1328 	/* Get the parent device WoL settings */
1329 	p->ethtool_ops->get_wol(p, &pwol);
1330 
1331 	/* Advertise the parent device supported settings */
1332 	wol->supported = pwol.supported;
1333 	memset(&wol->sopass, 0, sizeof(wol->sopass));
1334 
1335 	if (pwol.wolopts & WAKE_MAGICSECURE)
1336 		memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass));
1337 
1338 	if (priv->wol_ports_mask & (1 << port))
1339 		wol->wolopts = pwol.wolopts;
1340 	else
1341 		wol->wolopts = 0;
1342 }
1343 
1344 static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
1345 			      struct ethtool_wolinfo *wol)
1346 {
1347 	struct net_device *p = ds->dst[ds->index].master_netdev;
1348 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
1349 	s8 cpu_port = ds->dst[ds->index].cpu_port;
1350 	struct ethtool_wolinfo pwol;
1351 
1352 	p->ethtool_ops->get_wol(p, &pwol);
1353 	if (wol->wolopts & ~pwol.supported)
1354 		return -EINVAL;
1355 
1356 	if (wol->wolopts)
1357 		priv->wol_ports_mask |= (1 << port);
1358 	else
1359 		priv->wol_ports_mask &= ~(1 << port);
1360 
1361 	/* If we have at least one port enabled, make sure the CPU port
1362 	 * is also enabled. If the CPU port is the last one enabled, we disable
1363 	 * it since this configuration does not make sense.
1364 	 */
1365 	if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port))
1366 		priv->wol_ports_mask |= (1 << cpu_port);
1367 	else
1368 		priv->wol_ports_mask &= ~(1 << cpu_port);
1369 
1370 	return p->ethtool_ops->set_wol(p, wol);
1371 }
1372 
1373 static struct dsa_switch_driver bcm_sf2_switch_driver = {
1374 	.tag_protocol		= DSA_TAG_PROTO_BRCM,
1375 	.probe			= bcm_sf2_sw_drv_probe,
1376 	.setup			= bcm_sf2_sw_setup,
1377 	.set_addr		= bcm_sf2_sw_set_addr,
1378 	.get_phy_flags		= bcm_sf2_sw_get_phy_flags,
1379 	.phy_read		= bcm_sf2_sw_phy_read,
1380 	.phy_write		= bcm_sf2_sw_phy_write,
1381 	.get_strings		= bcm_sf2_sw_get_strings,
1382 	.get_ethtool_stats	= bcm_sf2_sw_get_ethtool_stats,
1383 	.get_sset_count		= bcm_sf2_sw_get_sset_count,
1384 	.adjust_link		= bcm_sf2_sw_adjust_link,
1385 	.fixed_link_update	= bcm_sf2_sw_fixed_link_update,
1386 	.suspend		= bcm_sf2_sw_suspend,
1387 	.resume			= bcm_sf2_sw_resume,
1388 	.get_wol		= bcm_sf2_sw_get_wol,
1389 	.set_wol		= bcm_sf2_sw_set_wol,
1390 	.port_enable		= bcm_sf2_port_setup,
1391 	.port_disable		= bcm_sf2_port_disable,
1392 	.get_eee		= bcm_sf2_sw_get_eee,
1393 	.set_eee		= bcm_sf2_sw_set_eee,
1394 	.port_bridge_join	= bcm_sf2_sw_br_join,
1395 	.port_bridge_leave	= bcm_sf2_sw_br_leave,
1396 	.port_stp_state_set	= bcm_sf2_sw_br_set_stp_state,
1397 	.port_fdb_prepare	= bcm_sf2_sw_fdb_prepare,
1398 	.port_fdb_add		= bcm_sf2_sw_fdb_add,
1399 	.port_fdb_del		= bcm_sf2_sw_fdb_del,
1400 	.port_fdb_dump		= bcm_sf2_sw_fdb_dump,
1401 };
1402 
1403 static int __init bcm_sf2_init(void)
1404 {
1405 	register_switch_driver(&bcm_sf2_switch_driver);
1406 
1407 	return 0;
1408 }
1409 module_init(bcm_sf2_init);
1410 
1411 static void __exit bcm_sf2_exit(void)
1412 {
1413 	unregister_switch_driver(&bcm_sf2_switch_driver);
1414 }
1415 module_exit(bcm_sf2_exit);
1416 
1417 MODULE_AUTHOR("Broadcom Corporation");
1418 MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip");
1419 MODULE_LICENSE("GPL");
1420 MODULE_ALIAS("platform:brcm-sf2");
1421