xref: /linux/drivers/net/dsa/b53/b53_common.c (revision faee676944dab731c9b2b91cf86c769d291a2237)
1 /*
2  * B53 switch driver main logic
3  *
4  * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
5  * Copyright (C) 2016 Florian Fainelli <f.fainelli@gmail.com>
6  *
7  * Permission to use, copy, modify, and/or distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 
22 #include <linux/delay.h>
23 #include <linux/export.h>
24 #include <linux/gpio.h>
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/platform_data/b53.h>
28 #include <linux/phy.h>
29 #include <linux/phylink.h>
30 #include <linux/etherdevice.h>
31 #include <linux/if_bridge.h>
32 #include <net/dsa.h>
33 
34 #include "b53_regs.h"
35 #include "b53_priv.h"
36 
37 struct b53_mib_desc {
38 	u8 size;
39 	u8 offset;
40 	const char *name;
41 };
42 
43 /* BCM5365 MIB counters */
44 static const struct b53_mib_desc b53_mibs_65[] = {
45 	{ 8, 0x00, "TxOctets" },
46 	{ 4, 0x08, "TxDropPkts" },
47 	{ 4, 0x10, "TxBroadcastPkts" },
48 	{ 4, 0x14, "TxMulticastPkts" },
49 	{ 4, 0x18, "TxUnicastPkts" },
50 	{ 4, 0x1c, "TxCollisions" },
51 	{ 4, 0x20, "TxSingleCollision" },
52 	{ 4, 0x24, "TxMultipleCollision" },
53 	{ 4, 0x28, "TxDeferredTransmit" },
54 	{ 4, 0x2c, "TxLateCollision" },
55 	{ 4, 0x30, "TxExcessiveCollision" },
56 	{ 4, 0x38, "TxPausePkts" },
57 	{ 8, 0x44, "RxOctets" },
58 	{ 4, 0x4c, "RxUndersizePkts" },
59 	{ 4, 0x50, "RxPausePkts" },
60 	{ 4, 0x54, "Pkts64Octets" },
61 	{ 4, 0x58, "Pkts65to127Octets" },
62 	{ 4, 0x5c, "Pkts128to255Octets" },
63 	{ 4, 0x60, "Pkts256to511Octets" },
64 	{ 4, 0x64, "Pkts512to1023Octets" },
65 	{ 4, 0x68, "Pkts1024to1522Octets" },
66 	{ 4, 0x6c, "RxOversizePkts" },
67 	{ 4, 0x70, "RxJabbers" },
68 	{ 4, 0x74, "RxAlignmentErrors" },
69 	{ 4, 0x78, "RxFCSErrors" },
70 	{ 8, 0x7c, "RxGoodOctets" },
71 	{ 4, 0x84, "RxDropPkts" },
72 	{ 4, 0x88, "RxUnicastPkts" },
73 	{ 4, 0x8c, "RxMulticastPkts" },
74 	{ 4, 0x90, "RxBroadcastPkts" },
75 	{ 4, 0x94, "RxSAChanges" },
76 	{ 4, 0x98, "RxFragments" },
77 };
78 
79 #define B53_MIBS_65_SIZE	ARRAY_SIZE(b53_mibs_65)
80 
81 /* BCM63xx MIB counters */
82 static const struct b53_mib_desc b53_mibs_63xx[] = {
83 	{ 8, 0x00, "TxOctets" },
84 	{ 4, 0x08, "TxDropPkts" },
85 	{ 4, 0x0c, "TxQoSPkts" },
86 	{ 4, 0x10, "TxBroadcastPkts" },
87 	{ 4, 0x14, "TxMulticastPkts" },
88 	{ 4, 0x18, "TxUnicastPkts" },
89 	{ 4, 0x1c, "TxCollisions" },
90 	{ 4, 0x20, "TxSingleCollision" },
91 	{ 4, 0x24, "TxMultipleCollision" },
92 	{ 4, 0x28, "TxDeferredTransmit" },
93 	{ 4, 0x2c, "TxLateCollision" },
94 	{ 4, 0x30, "TxExcessiveCollision" },
95 	{ 4, 0x38, "TxPausePkts" },
96 	{ 8, 0x3c, "TxQoSOctets" },
97 	{ 8, 0x44, "RxOctets" },
98 	{ 4, 0x4c, "RxUndersizePkts" },
99 	{ 4, 0x50, "RxPausePkts" },
100 	{ 4, 0x54, "Pkts64Octets" },
101 	{ 4, 0x58, "Pkts65to127Octets" },
102 	{ 4, 0x5c, "Pkts128to255Octets" },
103 	{ 4, 0x60, "Pkts256to511Octets" },
104 	{ 4, 0x64, "Pkts512to1023Octets" },
105 	{ 4, 0x68, "Pkts1024to1522Octets" },
106 	{ 4, 0x6c, "RxOversizePkts" },
107 	{ 4, 0x70, "RxJabbers" },
108 	{ 4, 0x74, "RxAlignmentErrors" },
109 	{ 4, 0x78, "RxFCSErrors" },
110 	{ 8, 0x7c, "RxGoodOctets" },
111 	{ 4, 0x84, "RxDropPkts" },
112 	{ 4, 0x88, "RxUnicastPkts" },
113 	{ 4, 0x8c, "RxMulticastPkts" },
114 	{ 4, 0x90, "RxBroadcastPkts" },
115 	{ 4, 0x94, "RxSAChanges" },
116 	{ 4, 0x98, "RxFragments" },
117 	{ 4, 0xa0, "RxSymbolErrors" },
118 	{ 4, 0xa4, "RxQoSPkts" },
119 	{ 8, 0xa8, "RxQoSOctets" },
120 	{ 4, 0xb0, "Pkts1523to2047Octets" },
121 	{ 4, 0xb4, "Pkts2048to4095Octets" },
122 	{ 4, 0xb8, "Pkts4096to8191Octets" },
123 	{ 4, 0xbc, "Pkts8192to9728Octets" },
124 	{ 4, 0xc0, "RxDiscarded" },
125 };
126 
127 #define B53_MIBS_63XX_SIZE	ARRAY_SIZE(b53_mibs_63xx)
128 
129 /* MIB counters */
130 static const struct b53_mib_desc b53_mibs[] = {
131 	{ 8, 0x00, "TxOctets" },
132 	{ 4, 0x08, "TxDropPkts" },
133 	{ 4, 0x10, "TxBroadcastPkts" },
134 	{ 4, 0x14, "TxMulticastPkts" },
135 	{ 4, 0x18, "TxUnicastPkts" },
136 	{ 4, 0x1c, "TxCollisions" },
137 	{ 4, 0x20, "TxSingleCollision" },
138 	{ 4, 0x24, "TxMultipleCollision" },
139 	{ 4, 0x28, "TxDeferredTransmit" },
140 	{ 4, 0x2c, "TxLateCollision" },
141 	{ 4, 0x30, "TxExcessiveCollision" },
142 	{ 4, 0x38, "TxPausePkts" },
143 	{ 8, 0x50, "RxOctets" },
144 	{ 4, 0x58, "RxUndersizePkts" },
145 	{ 4, 0x5c, "RxPausePkts" },
146 	{ 4, 0x60, "Pkts64Octets" },
147 	{ 4, 0x64, "Pkts65to127Octets" },
148 	{ 4, 0x68, "Pkts128to255Octets" },
149 	{ 4, 0x6c, "Pkts256to511Octets" },
150 	{ 4, 0x70, "Pkts512to1023Octets" },
151 	{ 4, 0x74, "Pkts1024to1522Octets" },
152 	{ 4, 0x78, "RxOversizePkts" },
153 	{ 4, 0x7c, "RxJabbers" },
154 	{ 4, 0x80, "RxAlignmentErrors" },
155 	{ 4, 0x84, "RxFCSErrors" },
156 	{ 8, 0x88, "RxGoodOctets" },
157 	{ 4, 0x90, "RxDropPkts" },
158 	{ 4, 0x94, "RxUnicastPkts" },
159 	{ 4, 0x98, "RxMulticastPkts" },
160 	{ 4, 0x9c, "RxBroadcastPkts" },
161 	{ 4, 0xa0, "RxSAChanges" },
162 	{ 4, 0xa4, "RxFragments" },
163 	{ 4, 0xa8, "RxJumboPkts" },
164 	{ 4, 0xac, "RxSymbolErrors" },
165 	{ 4, 0xc0, "RxDiscarded" },
166 };
167 
168 #define B53_MIBS_SIZE	ARRAY_SIZE(b53_mibs)
169 
170 static const struct b53_mib_desc b53_mibs_58xx[] = {
171 	{ 8, 0x00, "TxOctets" },
172 	{ 4, 0x08, "TxDropPkts" },
173 	{ 4, 0x0c, "TxQPKTQ0" },
174 	{ 4, 0x10, "TxBroadcastPkts" },
175 	{ 4, 0x14, "TxMulticastPkts" },
176 	{ 4, 0x18, "TxUnicastPKts" },
177 	{ 4, 0x1c, "TxCollisions" },
178 	{ 4, 0x20, "TxSingleCollision" },
179 	{ 4, 0x24, "TxMultipleCollision" },
180 	{ 4, 0x28, "TxDeferredCollision" },
181 	{ 4, 0x2c, "TxLateCollision" },
182 	{ 4, 0x30, "TxExcessiveCollision" },
183 	{ 4, 0x34, "TxFrameInDisc" },
184 	{ 4, 0x38, "TxPausePkts" },
185 	{ 4, 0x3c, "TxQPKTQ1" },
186 	{ 4, 0x40, "TxQPKTQ2" },
187 	{ 4, 0x44, "TxQPKTQ3" },
188 	{ 4, 0x48, "TxQPKTQ4" },
189 	{ 4, 0x4c, "TxQPKTQ5" },
190 	{ 8, 0x50, "RxOctets" },
191 	{ 4, 0x58, "RxUndersizePkts" },
192 	{ 4, 0x5c, "RxPausePkts" },
193 	{ 4, 0x60, "RxPkts64Octets" },
194 	{ 4, 0x64, "RxPkts65to127Octets" },
195 	{ 4, 0x68, "RxPkts128to255Octets" },
196 	{ 4, 0x6c, "RxPkts256to511Octets" },
197 	{ 4, 0x70, "RxPkts512to1023Octets" },
198 	{ 4, 0x74, "RxPkts1024toMaxPktsOctets" },
199 	{ 4, 0x78, "RxOversizePkts" },
200 	{ 4, 0x7c, "RxJabbers" },
201 	{ 4, 0x80, "RxAlignmentErrors" },
202 	{ 4, 0x84, "RxFCSErrors" },
203 	{ 8, 0x88, "RxGoodOctets" },
204 	{ 4, 0x90, "RxDropPkts" },
205 	{ 4, 0x94, "RxUnicastPkts" },
206 	{ 4, 0x98, "RxMulticastPkts" },
207 	{ 4, 0x9c, "RxBroadcastPkts" },
208 	{ 4, 0xa0, "RxSAChanges" },
209 	{ 4, 0xa4, "RxFragments" },
210 	{ 4, 0xa8, "RxJumboPkt" },
211 	{ 4, 0xac, "RxSymblErr" },
212 	{ 4, 0xb0, "InRangeErrCount" },
213 	{ 4, 0xb4, "OutRangeErrCount" },
214 	{ 4, 0xb8, "EEELpiEvent" },
215 	{ 4, 0xbc, "EEELpiDuration" },
216 	{ 4, 0xc0, "RxDiscard" },
217 	{ 4, 0xc8, "TxQPKTQ6" },
218 	{ 4, 0xcc, "TxQPKTQ7" },
219 	{ 4, 0xd0, "TxPkts64Octets" },
220 	{ 4, 0xd4, "TxPkts65to127Octets" },
221 	{ 4, 0xd8, "TxPkts128to255Octets" },
222 	{ 4, 0xdc, "TxPkts256to511Ocets" },
223 	{ 4, 0xe0, "TxPkts512to1023Ocets" },
224 	{ 4, 0xe4, "TxPkts1024toMaxPktOcets" },
225 };
226 
227 #define B53_MIBS_58XX_SIZE	ARRAY_SIZE(b53_mibs_58xx)
228 
229 static int b53_do_vlan_op(struct b53_device *dev, u8 op)
230 {
231 	unsigned int i;
232 
233 	b53_write8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], VTA_START_CMD | op);
234 
235 	for (i = 0; i < 10; i++) {
236 		u8 vta;
237 
238 		b53_read8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], &vta);
239 		if (!(vta & VTA_START_CMD))
240 			return 0;
241 
242 		usleep_range(100, 200);
243 	}
244 
245 	return -EIO;
246 }
247 
248 static void b53_set_vlan_entry(struct b53_device *dev, u16 vid,
249 			       struct b53_vlan *vlan)
250 {
251 	if (is5325(dev)) {
252 		u32 entry = 0;
253 
254 		if (vlan->members) {
255 			entry = ((vlan->untag & VA_UNTAG_MASK_25) <<
256 				 VA_UNTAG_S_25) | vlan->members;
257 			if (dev->core_rev >= 3)
258 				entry |= VA_VALID_25_R4 | vid << VA_VID_HIGH_S;
259 			else
260 				entry |= VA_VALID_25;
261 		}
262 
263 		b53_write32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, entry);
264 		b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
265 			    VTA_RW_STATE_WR | VTA_RW_OP_EN);
266 	} else if (is5365(dev)) {
267 		u16 entry = 0;
268 
269 		if (vlan->members)
270 			entry = ((vlan->untag & VA_UNTAG_MASK_65) <<
271 				 VA_UNTAG_S_65) | vlan->members | VA_VALID_65;
272 
273 		b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, entry);
274 		b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
275 			    VTA_RW_STATE_WR | VTA_RW_OP_EN);
276 	} else {
277 		b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
278 		b53_write32(dev, B53_ARLIO_PAGE, dev->vta_regs[2],
279 			    (vlan->untag << VTE_UNTAG_S) | vlan->members);
280 
281 		b53_do_vlan_op(dev, VTA_CMD_WRITE);
282 	}
283 
284 	dev_dbg(dev->ds->dev, "VID: %d, members: 0x%04x, untag: 0x%04x\n",
285 		vid, vlan->members, vlan->untag);
286 }
287 
288 static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
289 			       struct b53_vlan *vlan)
290 {
291 	if (is5325(dev)) {
292 		u32 entry = 0;
293 
294 		b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
295 			    VTA_RW_STATE_RD | VTA_RW_OP_EN);
296 		b53_read32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, &entry);
297 
298 		if (dev->core_rev >= 3)
299 			vlan->valid = !!(entry & VA_VALID_25_R4);
300 		else
301 			vlan->valid = !!(entry & VA_VALID_25);
302 		vlan->members = entry & VA_MEMBER_MASK;
303 		vlan->untag = (entry >> VA_UNTAG_S_25) & VA_UNTAG_MASK_25;
304 
305 	} else if (is5365(dev)) {
306 		u16 entry = 0;
307 
308 		b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
309 			    VTA_RW_STATE_WR | VTA_RW_OP_EN);
310 		b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, &entry);
311 
312 		vlan->valid = !!(entry & VA_VALID_65);
313 		vlan->members = entry & VA_MEMBER_MASK;
314 		vlan->untag = (entry >> VA_UNTAG_S_65) & VA_UNTAG_MASK_65;
315 	} else {
316 		u32 entry = 0;
317 
318 		b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
319 		b53_do_vlan_op(dev, VTA_CMD_READ);
320 		b53_read32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], &entry);
321 		vlan->members = entry & VTE_MEMBERS;
322 		vlan->untag = (entry >> VTE_UNTAG_S) & VTE_MEMBERS;
323 		vlan->valid = true;
324 	}
325 }
326 
327 static void b53_set_forwarding(struct b53_device *dev, int enable)
328 {
329 	u8 mgmt;
330 
331 	b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
332 
333 	if (enable)
334 		mgmt |= SM_SW_FWD_EN;
335 	else
336 		mgmt &= ~SM_SW_FWD_EN;
337 
338 	b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
339 
340 	/* Include IMP port in dumb forwarding mode
341 	 */
342 	b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
343 	mgmt |= B53_MII_DUMB_FWDG_EN;
344 	b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
345 
346 	/* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether
347 	 * frames should be flooded or not.
348 	 */
349 	b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
350 	mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN;
351 	b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
352 }
353 
354 static void b53_enable_vlan(struct b53_device *dev, bool enable,
355 			    bool enable_filtering)
356 {
357 	u8 mgmt, vc0, vc1, vc4 = 0, vc5;
358 
359 	b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
360 	b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, &vc0);
361 	b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, &vc1);
362 
363 	if (is5325(dev) || is5365(dev)) {
364 		b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
365 		b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, &vc5);
366 	} else if (is63xx(dev)) {
367 		b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, &vc4);
368 		b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, &vc5);
369 	} else {
370 		b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, &vc4);
371 		b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5);
372 	}
373 
374 	if (enable) {
375 		vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
376 		vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
377 		vc4 &= ~VC4_ING_VID_CHECK_MASK;
378 		if (enable_filtering) {
379 			vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
380 			vc5 |= VC5_DROP_VTABLE_MISS;
381 		} else {
382 			vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
383 			vc5 &= ~VC5_DROP_VTABLE_MISS;
384 		}
385 
386 		if (is5325(dev))
387 			vc0 &= ~VC0_RESERVED_1;
388 
389 		if (is5325(dev) || is5365(dev))
390 			vc1 |= VC1_RX_MCST_TAG_EN;
391 
392 	} else {
393 		vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID);
394 		vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN);
395 		vc4 &= ~VC4_ING_VID_CHECK_MASK;
396 		vc5 &= ~VC5_DROP_VTABLE_MISS;
397 
398 		if (is5325(dev) || is5365(dev))
399 			vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
400 		else
401 			vc4 |= VC4_ING_VID_VIO_TO_IMP << VC4_ING_VID_CHECK_S;
402 
403 		if (is5325(dev) || is5365(dev))
404 			vc1 &= ~VC1_RX_MCST_TAG_EN;
405 	}
406 
407 	if (!is5325(dev) && !is5365(dev))
408 		vc5 &= ~VC5_VID_FFF_EN;
409 
410 	b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, vc0);
411 	b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, vc1);
412 
413 	if (is5325(dev) || is5365(dev)) {
414 		/* enable the high 8 bit vid check on 5325 */
415 		if (is5325(dev) && enable)
416 			b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3,
417 				   VC3_HIGH_8BIT_EN);
418 		else
419 			b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
420 
421 		b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, vc4);
422 		b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, vc5);
423 	} else if (is63xx(dev)) {
424 		b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3_63XX, 0);
425 		b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, vc4);
426 		b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, vc5);
427 	} else {
428 		b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
429 		b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, vc4);
430 		b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, vc5);
431 	}
432 
433 	b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
434 
435 	dev->vlan_enabled = enable;
436 }
437 
438 static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100)
439 {
440 	u32 port_mask = 0;
441 	u16 max_size = JMS_MIN_SIZE;
442 
443 	if (is5325(dev) || is5365(dev))
444 		return -EINVAL;
445 
446 	if (enable) {
447 		port_mask = dev->enabled_ports;
448 		max_size = JMS_MAX_SIZE;
449 		if (allow_10_100)
450 			port_mask |= JPM_10_100_JUMBO_EN;
451 	}
452 
453 	b53_write32(dev, B53_JUMBO_PAGE, dev->jumbo_pm_reg, port_mask);
454 	return b53_write16(dev, B53_JUMBO_PAGE, dev->jumbo_size_reg, max_size);
455 }
456 
457 static int b53_flush_arl(struct b53_device *dev, u8 mask)
458 {
459 	unsigned int i;
460 
461 	b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
462 		   FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask);
463 
464 	for (i = 0; i < 10; i++) {
465 		u8 fast_age_ctrl;
466 
467 		b53_read8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
468 			  &fast_age_ctrl);
469 
470 		if (!(fast_age_ctrl & FAST_AGE_DONE))
471 			goto out;
472 
473 		msleep(1);
474 	}
475 
476 	return -ETIMEDOUT;
477 out:
478 	/* Only age dynamic entries (default behavior) */
479 	b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, FAST_AGE_DYNAMIC);
480 	return 0;
481 }
482 
483 static int b53_fast_age_port(struct b53_device *dev, int port)
484 {
485 	b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port);
486 
487 	return b53_flush_arl(dev, FAST_AGE_PORT);
488 }
489 
490 static int b53_fast_age_vlan(struct b53_device *dev, u16 vid)
491 {
492 	b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid);
493 
494 	return b53_flush_arl(dev, FAST_AGE_VLAN);
495 }
496 
497 void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
498 {
499 	struct b53_device *dev = ds->priv;
500 	unsigned int i;
501 	u16 pvlan;
502 
503 	/* Enable the IMP port to be in the same VLAN as the other ports
504 	 * on a per-port basis such that we only have Port i and IMP in
505 	 * the same VLAN.
506 	 */
507 	b53_for_each_port(dev, i) {
508 		b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &pvlan);
509 		pvlan |= BIT(cpu_port);
510 		b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan);
511 	}
512 }
513 EXPORT_SYMBOL(b53_imp_vlan_setup);
514 
515 int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
516 {
517 	struct b53_device *dev = ds->priv;
518 	unsigned int cpu_port;
519 	int ret = 0;
520 	u16 pvlan;
521 
522 	if (!dsa_is_user_port(ds, port))
523 		return 0;
524 
525 	cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
526 
527 	b53_br_egress_floods(ds, port, true, true);
528 
529 	if (dev->ops->irq_enable)
530 		ret = dev->ops->irq_enable(dev, port);
531 	if (ret)
532 		return ret;
533 
534 	/* Clear the Rx and Tx disable bits and set to no spanning tree */
535 	b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0);
536 
537 	/* Set this port, and only this one to be in the default VLAN,
538 	 * if member of a bridge, restore its membership prior to
539 	 * bringing down this port.
540 	 */
541 	b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
542 	pvlan &= ~0x1ff;
543 	pvlan |= BIT(port);
544 	pvlan |= dev->ports[port].vlan_ctl_mask;
545 	b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
546 
547 	b53_imp_vlan_setup(ds, cpu_port);
548 
549 	/* If EEE was enabled, restore it */
550 	if (dev->ports[port].eee.eee_enabled)
551 		b53_eee_enable_set(ds, port, true);
552 
553 	return 0;
554 }
555 EXPORT_SYMBOL(b53_enable_port);
556 
557 void b53_disable_port(struct dsa_switch *ds, int port)
558 {
559 	struct b53_device *dev = ds->priv;
560 	u8 reg;
561 
562 	/* Disable Tx/Rx for the port */
563 	b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
564 	reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE;
565 	b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
566 
567 	if (dev->ops->irq_disable)
568 		dev->ops->irq_disable(dev, port);
569 }
570 EXPORT_SYMBOL(b53_disable_port);
571 
572 void b53_brcm_hdr_setup(struct dsa_switch *ds, int port)
573 {
574 	struct b53_device *dev = ds->priv;
575 	bool tag_en = !(dev->tag_protocol == DSA_TAG_PROTO_NONE);
576 	u8 hdr_ctl, val;
577 	u16 reg;
578 
579 	/* Resolve which bit controls the Broadcom tag */
580 	switch (port) {
581 	case 8:
582 		val = BRCM_HDR_P8_EN;
583 		break;
584 	case 7:
585 		val = BRCM_HDR_P7_EN;
586 		break;
587 	case 5:
588 		val = BRCM_HDR_P5_EN;
589 		break;
590 	default:
591 		val = 0;
592 		break;
593 	}
594 
595 	/* Enable management mode if tagging is requested */
596 	b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &hdr_ctl);
597 	if (tag_en)
598 		hdr_ctl |= SM_SW_FWD_MODE;
599 	else
600 		hdr_ctl &= ~SM_SW_FWD_MODE;
601 	b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, hdr_ctl);
602 
603 	/* Configure the appropriate IMP port */
604 	b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &hdr_ctl);
605 	if (port == 8)
606 		hdr_ctl |= GC_FRM_MGMT_PORT_MII;
607 	else if (port == 5)
608 		hdr_ctl |= GC_FRM_MGMT_PORT_M;
609 	b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, hdr_ctl);
610 
611 	/* Enable Broadcom tags for IMP port */
612 	b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl);
613 	if (tag_en)
614 		hdr_ctl |= val;
615 	else
616 		hdr_ctl &= ~val;
617 	b53_write8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, hdr_ctl);
618 
619 	/* Registers below are only accessible on newer devices */
620 	if (!is58xx(dev))
621 		return;
622 
623 	/* Enable reception Broadcom tag for CPU TX (switch RX) to
624 	 * allow us to tag outgoing frames
625 	 */
626 	b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, &reg);
627 	if (tag_en)
628 		reg &= ~BIT(port);
629 	else
630 		reg |= BIT(port);
631 	b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, reg);
632 
633 	/* Enable transmission of Broadcom tags from the switch (CPU RX) to
634 	 * allow delivering frames to the per-port net_devices
635 	 */
636 	b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, &reg);
637 	if (tag_en)
638 		reg &= ~BIT(port);
639 	else
640 		reg |= BIT(port);
641 	b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, reg);
642 }
643 EXPORT_SYMBOL(b53_brcm_hdr_setup);
644 
645 static void b53_enable_cpu_port(struct b53_device *dev, int port)
646 {
647 	u8 port_ctrl;
648 
649 	/* BCM5325 CPU port is at 8 */
650 	if ((is5325(dev) || is5365(dev)) && port == B53_CPU_PORT_25)
651 		port = B53_CPU_PORT;
652 
653 	port_ctrl = PORT_CTRL_RX_BCST_EN |
654 		    PORT_CTRL_RX_MCST_EN |
655 		    PORT_CTRL_RX_UCST_EN;
656 	b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl);
657 
658 	b53_brcm_hdr_setup(dev->ds, port);
659 
660 	b53_br_egress_floods(dev->ds, port, true, true);
661 }
662 
663 static void b53_enable_mib(struct b53_device *dev)
664 {
665 	u8 gc;
666 
667 	b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
668 	gc &= ~(GC_RESET_MIB | GC_MIB_AC_EN);
669 	b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
670 }
671 
672 static u16 b53_default_pvid(struct b53_device *dev)
673 {
674 	if (is5325(dev) || is5365(dev))
675 		return 1;
676 	else
677 		return 0;
678 }
679 
680 int b53_configure_vlan(struct dsa_switch *ds)
681 {
682 	struct b53_device *dev = ds->priv;
683 	struct b53_vlan vl = { 0 };
684 	int i, def_vid;
685 
686 	def_vid = b53_default_pvid(dev);
687 
688 	/* clear all vlan entries */
689 	if (is5325(dev) || is5365(dev)) {
690 		for (i = def_vid; i < dev->num_vlans; i++)
691 			b53_set_vlan_entry(dev, i, &vl);
692 	} else {
693 		b53_do_vlan_op(dev, VTA_CMD_CLEAR);
694 	}
695 
696 	b53_enable_vlan(dev, dev->vlan_enabled, ds->vlan_filtering);
697 
698 	b53_for_each_port(dev, i)
699 		b53_write16(dev, B53_VLAN_PAGE,
700 			    B53_VLAN_PORT_DEF_TAG(i), def_vid);
701 
702 	return 0;
703 }
704 EXPORT_SYMBOL(b53_configure_vlan);
705 
706 static void b53_switch_reset_gpio(struct b53_device *dev)
707 {
708 	int gpio = dev->reset_gpio;
709 
710 	if (gpio < 0)
711 		return;
712 
713 	/* Reset sequence: RESET low(50ms)->high(20ms)
714 	 */
715 	gpio_set_value(gpio, 0);
716 	mdelay(50);
717 
718 	gpio_set_value(gpio, 1);
719 	mdelay(20);
720 
721 	dev->current_page = 0xff;
722 }
723 
724 static int b53_switch_reset(struct b53_device *dev)
725 {
726 	unsigned int timeout = 1000;
727 	u8 mgmt, reg;
728 
729 	b53_switch_reset_gpio(dev);
730 
731 	if (is539x(dev)) {
732 		b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83);
733 		b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00);
734 	}
735 
736 	/* This is specific to 58xx devices here, do not use is58xx() which
737 	 * covers the larger Starfigther 2 family, including 7445/7278 which
738 	 * still use this driver as a library and need to perform the reset
739 	 * earlier.
740 	 */
741 	if (dev->chip_id == BCM58XX_DEVICE_ID ||
742 	    dev->chip_id == BCM583XX_DEVICE_ID) {
743 		b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, &reg);
744 		reg |= SW_RST | EN_SW_RST | EN_CH_RST;
745 		b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg);
746 
747 		do {
748 			b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, &reg);
749 			if (!(reg & SW_RST))
750 				break;
751 
752 			usleep_range(1000, 2000);
753 		} while (timeout-- > 0);
754 
755 		if (timeout == 0)
756 			return -ETIMEDOUT;
757 	}
758 
759 	b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
760 
761 	if (!(mgmt & SM_SW_FWD_EN)) {
762 		mgmt &= ~SM_SW_FWD_MODE;
763 		mgmt |= SM_SW_FWD_EN;
764 
765 		b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
766 		b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
767 
768 		if (!(mgmt & SM_SW_FWD_EN)) {
769 			dev_err(dev->dev, "Failed to enable switch!\n");
770 			return -EINVAL;
771 		}
772 	}
773 
774 	b53_enable_mib(dev);
775 
776 	return b53_flush_arl(dev, FAST_AGE_STATIC);
777 }
778 
779 static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg)
780 {
781 	struct b53_device *priv = ds->priv;
782 	u16 value = 0;
783 	int ret;
784 
785 	if (priv->ops->phy_read16)
786 		ret = priv->ops->phy_read16(priv, addr, reg, &value);
787 	else
788 		ret = b53_read16(priv, B53_PORT_MII_PAGE(addr),
789 				 reg * 2, &value);
790 
791 	return ret ? ret : value;
792 }
793 
794 static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
795 {
796 	struct b53_device *priv = ds->priv;
797 
798 	if (priv->ops->phy_write16)
799 		return priv->ops->phy_write16(priv, addr, reg, val);
800 
801 	return b53_write16(priv, B53_PORT_MII_PAGE(addr), reg * 2, val);
802 }
803 
804 static int b53_reset_switch(struct b53_device *priv)
805 {
806 	/* reset vlans */
807 	memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans);
808 	memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports);
809 
810 	priv->serdes_lane = B53_INVALID_LANE;
811 
812 	return b53_switch_reset(priv);
813 }
814 
815 static int b53_apply_config(struct b53_device *priv)
816 {
817 	/* disable switching */
818 	b53_set_forwarding(priv, 0);
819 
820 	b53_configure_vlan(priv->ds);
821 
822 	/* enable switching */
823 	b53_set_forwarding(priv, 1);
824 
825 	return 0;
826 }
827 
828 static void b53_reset_mib(struct b53_device *priv)
829 {
830 	u8 gc;
831 
832 	b53_read8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
833 
834 	b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc | GC_RESET_MIB);
835 	msleep(1);
836 	b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc & ~GC_RESET_MIB);
837 	msleep(1);
838 }
839 
840 static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev)
841 {
842 	if (is5365(dev))
843 		return b53_mibs_65;
844 	else if (is63xx(dev))
845 		return b53_mibs_63xx;
846 	else if (is58xx(dev))
847 		return b53_mibs_58xx;
848 	else
849 		return b53_mibs;
850 }
851 
852 static unsigned int b53_get_mib_size(struct b53_device *dev)
853 {
854 	if (is5365(dev))
855 		return B53_MIBS_65_SIZE;
856 	else if (is63xx(dev))
857 		return B53_MIBS_63XX_SIZE;
858 	else if (is58xx(dev))
859 		return B53_MIBS_58XX_SIZE;
860 	else
861 		return B53_MIBS_SIZE;
862 }
863 
864 static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port)
865 {
866 	/* These ports typically do not have built-in PHYs */
867 	switch (port) {
868 	case B53_CPU_PORT_25:
869 	case 7:
870 	case B53_CPU_PORT:
871 		return NULL;
872 	}
873 
874 	return mdiobus_get_phy(ds->slave_mii_bus, port);
875 }
876 
877 void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
878 		     uint8_t *data)
879 {
880 	struct b53_device *dev = ds->priv;
881 	const struct b53_mib_desc *mibs = b53_get_mib(dev);
882 	unsigned int mib_size = b53_get_mib_size(dev);
883 	struct phy_device *phydev;
884 	unsigned int i;
885 
886 	if (stringset == ETH_SS_STATS) {
887 		for (i = 0; i < mib_size; i++)
888 			strlcpy(data + i * ETH_GSTRING_LEN,
889 				mibs[i].name, ETH_GSTRING_LEN);
890 	} else if (stringset == ETH_SS_PHY_STATS) {
891 		phydev = b53_get_phy_device(ds, port);
892 		if (!phydev)
893 			return;
894 
895 		phy_ethtool_get_strings(phydev, data);
896 	}
897 }
898 EXPORT_SYMBOL(b53_get_strings);
899 
900 void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
901 {
902 	struct b53_device *dev = ds->priv;
903 	const struct b53_mib_desc *mibs = b53_get_mib(dev);
904 	unsigned int mib_size = b53_get_mib_size(dev);
905 	const struct b53_mib_desc *s;
906 	unsigned int i;
907 	u64 val = 0;
908 
909 	if (is5365(dev) && port == 5)
910 		port = 8;
911 
912 	mutex_lock(&dev->stats_mutex);
913 
914 	for (i = 0; i < mib_size; i++) {
915 		s = &mibs[i];
916 
917 		if (s->size == 8) {
918 			b53_read64(dev, B53_MIB_PAGE(port), s->offset, &val);
919 		} else {
920 			u32 val32;
921 
922 			b53_read32(dev, B53_MIB_PAGE(port), s->offset,
923 				   &val32);
924 			val = val32;
925 		}
926 		data[i] = (u64)val;
927 	}
928 
929 	mutex_unlock(&dev->stats_mutex);
930 }
931 EXPORT_SYMBOL(b53_get_ethtool_stats);
932 
933 void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data)
934 {
935 	struct phy_device *phydev;
936 
937 	phydev = b53_get_phy_device(ds, port);
938 	if (!phydev)
939 		return;
940 
941 	phy_ethtool_get_stats(phydev, NULL, data);
942 }
943 EXPORT_SYMBOL(b53_get_ethtool_phy_stats);
944 
945 int b53_get_sset_count(struct dsa_switch *ds, int port, int sset)
946 {
947 	struct b53_device *dev = ds->priv;
948 	struct phy_device *phydev;
949 
950 	if (sset == ETH_SS_STATS) {
951 		return b53_get_mib_size(dev);
952 	} else if (sset == ETH_SS_PHY_STATS) {
953 		phydev = b53_get_phy_device(ds, port);
954 		if (!phydev)
955 			return 0;
956 
957 		return phy_ethtool_get_sset_count(phydev);
958 	}
959 
960 	return 0;
961 }
962 EXPORT_SYMBOL(b53_get_sset_count);
963 
964 static int b53_setup(struct dsa_switch *ds)
965 {
966 	struct b53_device *dev = ds->priv;
967 	unsigned int port;
968 	int ret;
969 
970 	ret = b53_reset_switch(dev);
971 	if (ret) {
972 		dev_err(ds->dev, "failed to reset switch\n");
973 		return ret;
974 	}
975 
976 	b53_reset_mib(dev);
977 
978 	ret = b53_apply_config(dev);
979 	if (ret)
980 		dev_err(ds->dev, "failed to apply configuration\n");
981 
982 	/* Configure IMP/CPU port, disable all other ports. Enabled
983 	 * ports will be configured with .port_enable
984 	 */
985 	for (port = 0; port < dev->num_ports; port++) {
986 		if (dsa_is_cpu_port(ds, port))
987 			b53_enable_cpu_port(dev, port);
988 		else
989 			b53_disable_port(ds, port);
990 	}
991 
992 	/* Let DSA handle the case were multiple bridges span the same switch
993 	 * device and different VLAN awareness settings are requested, which
994 	 * would be breaking filtering semantics for any of the other bridge
995 	 * devices. (not hardware supported)
996 	 */
997 	ds->vlan_filtering_is_global = true;
998 
999 	return ret;
1000 }
1001 
1002 static void b53_force_link(struct b53_device *dev, int port, int link)
1003 {
1004 	u8 reg, val, off;
1005 
1006 	/* Override the port settings */
1007 	if (port == dev->cpu_port) {
1008 		off = B53_PORT_OVERRIDE_CTRL;
1009 		val = PORT_OVERRIDE_EN;
1010 	} else {
1011 		off = B53_GMII_PORT_OVERRIDE_CTRL(port);
1012 		val = GMII_PO_EN;
1013 	}
1014 
1015 	b53_read8(dev, B53_CTRL_PAGE, off, &reg);
1016 	reg |= val;
1017 	if (link)
1018 		reg |= PORT_OVERRIDE_LINK;
1019 	else
1020 		reg &= ~PORT_OVERRIDE_LINK;
1021 	b53_write8(dev, B53_CTRL_PAGE, off, reg);
1022 }
1023 
1024 static void b53_force_port_config(struct b53_device *dev, int port,
1025 				  int speed, int duplex, int pause)
1026 {
1027 	u8 reg, val, off;
1028 
1029 	/* Override the port settings */
1030 	if (port == dev->cpu_port) {
1031 		off = B53_PORT_OVERRIDE_CTRL;
1032 		val = PORT_OVERRIDE_EN;
1033 	} else {
1034 		off = B53_GMII_PORT_OVERRIDE_CTRL(port);
1035 		val = GMII_PO_EN;
1036 	}
1037 
1038 	b53_read8(dev, B53_CTRL_PAGE, off, &reg);
1039 	reg |= val;
1040 	if (duplex == DUPLEX_FULL)
1041 		reg |= PORT_OVERRIDE_FULL_DUPLEX;
1042 	else
1043 		reg &= ~PORT_OVERRIDE_FULL_DUPLEX;
1044 
1045 	switch (speed) {
1046 	case 2000:
1047 		reg |= PORT_OVERRIDE_SPEED_2000M;
1048 		/* fallthrough */
1049 	case SPEED_1000:
1050 		reg |= PORT_OVERRIDE_SPEED_1000M;
1051 		break;
1052 	case SPEED_100:
1053 		reg |= PORT_OVERRIDE_SPEED_100M;
1054 		break;
1055 	case SPEED_10:
1056 		reg |= PORT_OVERRIDE_SPEED_10M;
1057 		break;
1058 	default:
1059 		dev_err(dev->dev, "unknown speed: %d\n", speed);
1060 		return;
1061 	}
1062 
1063 	if (pause & MLO_PAUSE_RX)
1064 		reg |= PORT_OVERRIDE_RX_FLOW;
1065 	if (pause & MLO_PAUSE_TX)
1066 		reg |= PORT_OVERRIDE_TX_FLOW;
1067 
1068 	b53_write8(dev, B53_CTRL_PAGE, off, reg);
1069 }
1070 
1071 static void b53_adjust_link(struct dsa_switch *ds, int port,
1072 			    struct phy_device *phydev)
1073 {
1074 	struct b53_device *dev = ds->priv;
1075 	struct ethtool_eee *p = &dev->ports[port].eee;
1076 	u8 rgmii_ctrl = 0, reg = 0, off;
1077 	int pause = 0;
1078 
1079 	if (!phy_is_pseudo_fixed_link(phydev))
1080 		return;
1081 
1082 	/* Enable flow control on BCM5301x's CPU port */
1083 	if (is5301x(dev) && port == dev->cpu_port)
1084 		pause = MLO_PAUSE_TXRX_MASK;
1085 
1086 	if (phydev->pause) {
1087 		if (phydev->asym_pause)
1088 			pause |= MLO_PAUSE_TX;
1089 		pause |= MLO_PAUSE_RX;
1090 	}
1091 
1092 	b53_force_port_config(dev, port, phydev->speed, phydev->duplex, pause);
1093 	b53_force_link(dev, port, phydev->link);
1094 
1095 	if (is531x5(dev) && phy_interface_is_rgmii(phydev)) {
1096 		if (port == 8)
1097 			off = B53_RGMII_CTRL_IMP;
1098 		else
1099 			off = B53_RGMII_CTRL_P(port);
1100 
1101 		/* Configure the port RGMII clock delay by DLL disabled and
1102 		 * tx_clk aligned timing (restoring to reset defaults)
1103 		 */
1104 		b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
1105 		rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC |
1106 				RGMII_CTRL_TIMING_SEL);
1107 
1108 		/* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make
1109 		 * sure that we enable the port TX clock internal delay to
1110 		 * account for this internal delay that is inserted, otherwise
1111 		 * the switch won't be able to receive correctly.
1112 		 *
1113 		 * PHY_INTERFACE_MODE_RGMII means that we are not introducing
1114 		 * any delay neither on transmission nor reception, so the
1115 		 * BCM53125 must also be configured accordingly to account for
1116 		 * the lack of delay and introduce
1117 		 *
1118 		 * The BCM53125 switch has its RX clock and TX clock control
1119 		 * swapped, hence the reason why we modify the TX clock path in
1120 		 * the "RGMII" case
1121 		 */
1122 		if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
1123 			rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
1124 		if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
1125 			rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC;
1126 		rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
1127 		b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
1128 
1129 		dev_info(ds->dev, "Configured port %d for %s\n", port,
1130 			 phy_modes(phydev->interface));
1131 	}
1132 
1133 	/* configure MII port if necessary */
1134 	if (is5325(dev)) {
1135 		b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
1136 			  &reg);
1137 
1138 		/* reverse mii needs to be enabled */
1139 		if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
1140 			b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
1141 				   reg | PORT_OVERRIDE_RV_MII_25);
1142 			b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
1143 				  &reg);
1144 
1145 			if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
1146 				dev_err(ds->dev,
1147 					"Failed to enable reverse MII mode\n");
1148 				return;
1149 			}
1150 		}
1151 	} else if (is5301x(dev)) {
1152 		if (port != dev->cpu_port) {
1153 			b53_force_port_config(dev, dev->cpu_port, 2000,
1154 					      DUPLEX_FULL, MLO_PAUSE_TXRX_MASK);
1155 			b53_force_link(dev, dev->cpu_port, 1);
1156 		}
1157 	}
1158 
1159 	/* Re-negotiate EEE if it was enabled already */
1160 	p->eee_enabled = b53_eee_init(ds, port, phydev);
1161 }
1162 
1163 void b53_port_event(struct dsa_switch *ds, int port)
1164 {
1165 	struct b53_device *dev = ds->priv;
1166 	bool link;
1167 	u16 sts;
1168 
1169 	b53_read16(dev, B53_STAT_PAGE, B53_LINK_STAT, &sts);
1170 	link = !!(sts & BIT(port));
1171 	dsa_port_phylink_mac_change(ds, port, link);
1172 }
1173 EXPORT_SYMBOL(b53_port_event);
1174 
1175 void b53_phylink_validate(struct dsa_switch *ds, int port,
1176 			  unsigned long *supported,
1177 			  struct phylink_link_state *state)
1178 {
1179 	struct b53_device *dev = ds->priv;
1180 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1181 
1182 	if (dev->ops->serdes_phylink_validate)
1183 		dev->ops->serdes_phylink_validate(dev, port, mask, state);
1184 
1185 	/* Allow all the expected bits */
1186 	phylink_set(mask, Autoneg);
1187 	phylink_set_port_modes(mask);
1188 	phylink_set(mask, Pause);
1189 	phylink_set(mask, Asym_Pause);
1190 
1191 	/* With the exclusion of 5325/5365, MII, Reverse MII and 802.3z, we
1192 	 * support Gigabit, including Half duplex.
1193 	 */
1194 	if (state->interface != PHY_INTERFACE_MODE_MII &&
1195 	    state->interface != PHY_INTERFACE_MODE_REVMII &&
1196 	    !phy_interface_mode_is_8023z(state->interface) &&
1197 	    !(is5325(dev) || is5365(dev))) {
1198 		phylink_set(mask, 1000baseT_Full);
1199 		phylink_set(mask, 1000baseT_Half);
1200 	}
1201 
1202 	if (!phy_interface_mode_is_8023z(state->interface)) {
1203 		phylink_set(mask, 10baseT_Half);
1204 		phylink_set(mask, 10baseT_Full);
1205 		phylink_set(mask, 100baseT_Half);
1206 		phylink_set(mask, 100baseT_Full);
1207 	}
1208 
1209 	bitmap_and(supported, supported, mask,
1210 		   __ETHTOOL_LINK_MODE_MASK_NBITS);
1211 	bitmap_and(state->advertising, state->advertising, mask,
1212 		   __ETHTOOL_LINK_MODE_MASK_NBITS);
1213 
1214 	phylink_helper_basex_speed(state);
1215 }
1216 EXPORT_SYMBOL(b53_phylink_validate);
1217 
1218 int b53_phylink_mac_link_state(struct dsa_switch *ds, int port,
1219 			       struct phylink_link_state *state)
1220 {
1221 	struct b53_device *dev = ds->priv;
1222 	int ret = -EOPNOTSUPP;
1223 
1224 	if ((phy_interface_mode_is_8023z(state->interface) ||
1225 	     state->interface == PHY_INTERFACE_MODE_SGMII) &&
1226 	     dev->ops->serdes_link_state)
1227 		ret = dev->ops->serdes_link_state(dev, port, state);
1228 
1229 	return ret;
1230 }
1231 EXPORT_SYMBOL(b53_phylink_mac_link_state);
1232 
1233 void b53_phylink_mac_config(struct dsa_switch *ds, int port,
1234 			    unsigned int mode,
1235 			    const struct phylink_link_state *state)
1236 {
1237 	struct b53_device *dev = ds->priv;
1238 
1239 	if (mode == MLO_AN_PHY)
1240 		return;
1241 
1242 	if (mode == MLO_AN_FIXED) {
1243 		b53_force_port_config(dev, port, state->speed,
1244 				      state->duplex, state->pause);
1245 		return;
1246 	}
1247 
1248 	if ((phy_interface_mode_is_8023z(state->interface) ||
1249 	     state->interface == PHY_INTERFACE_MODE_SGMII) &&
1250 	     dev->ops->serdes_config)
1251 		dev->ops->serdes_config(dev, port, mode, state);
1252 }
1253 EXPORT_SYMBOL(b53_phylink_mac_config);
1254 
1255 void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port)
1256 {
1257 	struct b53_device *dev = ds->priv;
1258 
1259 	if (dev->ops->serdes_an_restart)
1260 		dev->ops->serdes_an_restart(dev, port);
1261 }
1262 EXPORT_SYMBOL(b53_phylink_mac_an_restart);
1263 
1264 void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
1265 			       unsigned int mode,
1266 			       phy_interface_t interface)
1267 {
1268 	struct b53_device *dev = ds->priv;
1269 
1270 	if (mode == MLO_AN_PHY)
1271 		return;
1272 
1273 	if (mode == MLO_AN_FIXED) {
1274 		b53_force_link(dev, port, false);
1275 		return;
1276 	}
1277 
1278 	if (phy_interface_mode_is_8023z(interface) &&
1279 	    dev->ops->serdes_link_set)
1280 		dev->ops->serdes_link_set(dev, port, mode, interface, false);
1281 }
1282 EXPORT_SYMBOL(b53_phylink_mac_link_down);
1283 
1284 void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
1285 			     unsigned int mode,
1286 			     phy_interface_t interface,
1287 			     struct phy_device *phydev,
1288 			     int speed, int duplex,
1289 			     bool tx_pause, bool rx_pause)
1290 {
1291 	struct b53_device *dev = ds->priv;
1292 
1293 	if (mode == MLO_AN_PHY)
1294 		return;
1295 
1296 	if (mode == MLO_AN_FIXED) {
1297 		b53_force_link(dev, port, true);
1298 		return;
1299 	}
1300 
1301 	if (phy_interface_mode_is_8023z(interface) &&
1302 	    dev->ops->serdes_link_set)
1303 		dev->ops->serdes_link_set(dev, port, mode, interface, true);
1304 }
1305 EXPORT_SYMBOL(b53_phylink_mac_link_up);
1306 
1307 int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
1308 {
1309 	struct b53_device *dev = ds->priv;
1310 	u16 pvid, new_pvid;
1311 
1312 	b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
1313 	new_pvid = pvid;
1314 	if (!vlan_filtering) {
1315 		/* Filtering is currently enabled, use the default PVID since
1316 		 * the bridge does not expect tagging anymore
1317 		 */
1318 		dev->ports[port].pvid = pvid;
1319 		new_pvid = b53_default_pvid(dev);
1320 	} else {
1321 		/* Filtering is currently disabled, restore the previous PVID */
1322 		new_pvid = dev->ports[port].pvid;
1323 	}
1324 
1325 	if (pvid != new_pvid)
1326 		b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
1327 			    new_pvid);
1328 
1329 	b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering);
1330 
1331 	return 0;
1332 }
1333 EXPORT_SYMBOL(b53_vlan_filtering);
1334 
1335 int b53_vlan_prepare(struct dsa_switch *ds, int port,
1336 		     const struct switchdev_obj_port_vlan *vlan)
1337 {
1338 	struct b53_device *dev = ds->priv;
1339 
1340 	if ((is5325(dev) || is5365(dev)) && vlan->vid_begin == 0)
1341 		return -EOPNOTSUPP;
1342 
1343 	if (vlan->vid_end > dev->num_vlans)
1344 		return -ERANGE;
1345 
1346 	b53_enable_vlan(dev, true, ds->vlan_filtering);
1347 
1348 	return 0;
1349 }
1350 EXPORT_SYMBOL(b53_vlan_prepare);
1351 
1352 void b53_vlan_add(struct dsa_switch *ds, int port,
1353 		  const struct switchdev_obj_port_vlan *vlan)
1354 {
1355 	struct b53_device *dev = ds->priv;
1356 	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1357 	bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1358 	struct b53_vlan *vl;
1359 	u16 vid;
1360 
1361 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1362 		vl = &dev->vlans[vid];
1363 
1364 		b53_get_vlan_entry(dev, vid, vl);
1365 
1366 		if (vid == 0 && vid == b53_default_pvid(dev))
1367 			untagged = true;
1368 
1369 		vl->members |= BIT(port);
1370 		if (untagged && !dsa_is_cpu_port(ds, port))
1371 			vl->untag |= BIT(port);
1372 		else
1373 			vl->untag &= ~BIT(port);
1374 
1375 		b53_set_vlan_entry(dev, vid, vl);
1376 		b53_fast_age_vlan(dev, vid);
1377 	}
1378 
1379 	if (pvid && !dsa_is_cpu_port(ds, port)) {
1380 		b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
1381 			    vlan->vid_end);
1382 		b53_fast_age_vlan(dev, vid);
1383 	}
1384 }
1385 EXPORT_SYMBOL(b53_vlan_add);
1386 
1387 int b53_vlan_del(struct dsa_switch *ds, int port,
1388 		 const struct switchdev_obj_port_vlan *vlan)
1389 {
1390 	struct b53_device *dev = ds->priv;
1391 	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1392 	struct b53_vlan *vl;
1393 	u16 vid;
1394 	u16 pvid;
1395 
1396 	b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
1397 
1398 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1399 		vl = &dev->vlans[vid];
1400 
1401 		b53_get_vlan_entry(dev, vid, vl);
1402 
1403 		vl->members &= ~BIT(port);
1404 
1405 		if (pvid == vid)
1406 			pvid = b53_default_pvid(dev);
1407 
1408 		if (untagged && !dsa_is_cpu_port(ds, port))
1409 			vl->untag &= ~(BIT(port));
1410 
1411 		b53_set_vlan_entry(dev, vid, vl);
1412 		b53_fast_age_vlan(dev, vid);
1413 	}
1414 
1415 	b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid);
1416 	b53_fast_age_vlan(dev, pvid);
1417 
1418 	return 0;
1419 }
1420 EXPORT_SYMBOL(b53_vlan_del);
1421 
1422 /* Address Resolution Logic routines */
1423 static int b53_arl_op_wait(struct b53_device *dev)
1424 {
1425 	unsigned int timeout = 10;
1426 	u8 reg;
1427 
1428 	do {
1429 		b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, &reg);
1430 		if (!(reg & ARLTBL_START_DONE))
1431 			return 0;
1432 
1433 		usleep_range(1000, 2000);
1434 	} while (timeout--);
1435 
1436 	dev_warn(dev->dev, "timeout waiting for ARL to finish: 0x%02x\n", reg);
1437 
1438 	return -ETIMEDOUT;
1439 }
1440 
1441 static int b53_arl_rw_op(struct b53_device *dev, unsigned int op)
1442 {
1443 	u8 reg;
1444 
1445 	if (op > ARLTBL_RW)
1446 		return -EINVAL;
1447 
1448 	b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, &reg);
1449 	reg |= ARLTBL_START_DONE;
1450 	if (op)
1451 		reg |= ARLTBL_RW;
1452 	else
1453 		reg &= ~ARLTBL_RW;
1454 	b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg);
1455 
1456 	return b53_arl_op_wait(dev);
1457 }
1458 
1459 static int b53_arl_read(struct b53_device *dev, u64 mac,
1460 			u16 vid, struct b53_arl_entry *ent, u8 *idx,
1461 			bool is_valid)
1462 {
1463 	unsigned int i;
1464 	int ret;
1465 
1466 	ret = b53_arl_op_wait(dev);
1467 	if (ret)
1468 		return ret;
1469 
1470 	/* Read the bins */
1471 	for (i = 0; i < dev->num_arl_entries; i++) {
1472 		u64 mac_vid;
1473 		u32 fwd_entry;
1474 
1475 		b53_read64(dev, B53_ARLIO_PAGE,
1476 			   B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid);
1477 		b53_read32(dev, B53_ARLIO_PAGE,
1478 			   B53_ARLTBL_DATA_ENTRY(i), &fwd_entry);
1479 		b53_arl_to_entry(ent, mac_vid, fwd_entry);
1480 
1481 		if (!(fwd_entry & ARLTBL_VALID))
1482 			continue;
1483 		if ((mac_vid & ARLTBL_MAC_MASK) != mac)
1484 			continue;
1485 		*idx = i;
1486 	}
1487 
1488 	return -ENOENT;
1489 }
1490 
1491 static int b53_arl_op(struct b53_device *dev, int op, int port,
1492 		      const unsigned char *addr, u16 vid, bool is_valid)
1493 {
1494 	struct b53_arl_entry ent;
1495 	u32 fwd_entry;
1496 	u64 mac, mac_vid = 0;
1497 	u8 idx = 0;
1498 	int ret;
1499 
1500 	/* Convert the array into a 64-bit MAC */
1501 	mac = ether_addr_to_u64(addr);
1502 
1503 	/* Perform a read for the given MAC and VID */
1504 	b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac);
1505 	b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
1506 
1507 	/* Issue a read operation for this MAC */
1508 	ret = b53_arl_rw_op(dev, 1);
1509 	if (ret)
1510 		return ret;
1511 
1512 	ret = b53_arl_read(dev, mac, vid, &ent, &idx, is_valid);
1513 	/* If this is a read, just finish now */
1514 	if (op)
1515 		return ret;
1516 
1517 	/* We could not find a matching MAC, so reset to a new entry */
1518 	if (ret) {
1519 		fwd_entry = 0;
1520 		idx = 1;
1521 	}
1522 
1523 	/* For multicast address, the port is a bitmask and the validity
1524 	 * is determined by having at least one port being still active
1525 	 */
1526 	if (!is_multicast_ether_addr(addr)) {
1527 		ent.port = port;
1528 		ent.is_valid = is_valid;
1529 	} else {
1530 		if (is_valid)
1531 			ent.port |= BIT(port);
1532 		else
1533 			ent.port &= ~BIT(port);
1534 
1535 		ent.is_valid = !!(ent.port);
1536 	}
1537 
1538 	ent.is_valid = is_valid;
1539 	ent.vid = vid;
1540 	ent.is_static = true;
1541 	ent.is_age = false;
1542 	memcpy(ent.mac, addr, ETH_ALEN);
1543 	b53_arl_from_entry(&mac_vid, &fwd_entry, &ent);
1544 
1545 	b53_write64(dev, B53_ARLIO_PAGE,
1546 		    B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid);
1547 	b53_write32(dev, B53_ARLIO_PAGE,
1548 		    B53_ARLTBL_DATA_ENTRY(idx), fwd_entry);
1549 
1550 	return b53_arl_rw_op(dev, 0);
1551 }
1552 
1553 int b53_fdb_add(struct dsa_switch *ds, int port,
1554 		const unsigned char *addr, u16 vid)
1555 {
1556 	struct b53_device *priv = ds->priv;
1557 
1558 	/* 5325 and 5365 require some more massaging, but could
1559 	 * be supported eventually
1560 	 */
1561 	if (is5325(priv) || is5365(priv))
1562 		return -EOPNOTSUPP;
1563 
1564 	return b53_arl_op(priv, 0, port, addr, vid, true);
1565 }
1566 EXPORT_SYMBOL(b53_fdb_add);
1567 
1568 int b53_fdb_del(struct dsa_switch *ds, int port,
1569 		const unsigned char *addr, u16 vid)
1570 {
1571 	struct b53_device *priv = ds->priv;
1572 
1573 	return b53_arl_op(priv, 0, port, addr, vid, false);
1574 }
1575 EXPORT_SYMBOL(b53_fdb_del);
1576 
1577 static int b53_arl_search_wait(struct b53_device *dev)
1578 {
1579 	unsigned int timeout = 1000;
1580 	u8 reg;
1581 
1582 	do {
1583 		b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, &reg);
1584 		if (!(reg & ARL_SRCH_STDN))
1585 			return 0;
1586 
1587 		if (reg & ARL_SRCH_VLID)
1588 			return 0;
1589 
1590 		usleep_range(1000, 2000);
1591 	} while (timeout--);
1592 
1593 	return -ETIMEDOUT;
1594 }
1595 
1596 static void b53_arl_search_rd(struct b53_device *dev, u8 idx,
1597 			      struct b53_arl_entry *ent)
1598 {
1599 	u64 mac_vid;
1600 	u32 fwd_entry;
1601 
1602 	b53_read64(dev, B53_ARLIO_PAGE,
1603 		   B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid);
1604 	b53_read32(dev, B53_ARLIO_PAGE,
1605 		   B53_ARL_SRCH_RSTL(idx), &fwd_entry);
1606 	b53_arl_to_entry(ent, mac_vid, fwd_entry);
1607 }
1608 
1609 static int b53_fdb_copy(int port, const struct b53_arl_entry *ent,
1610 			dsa_fdb_dump_cb_t *cb, void *data)
1611 {
1612 	if (!ent->is_valid)
1613 		return 0;
1614 
1615 	if (port != ent->port)
1616 		return 0;
1617 
1618 	return cb(ent->mac, ent->vid, ent->is_static, data);
1619 }
1620 
1621 int b53_fdb_dump(struct dsa_switch *ds, int port,
1622 		 dsa_fdb_dump_cb_t *cb, void *data)
1623 {
1624 	struct b53_device *priv = ds->priv;
1625 	struct b53_arl_entry results[2];
1626 	unsigned int count = 0;
1627 	int ret;
1628 	u8 reg;
1629 
1630 	/* Start search operation */
1631 	reg = ARL_SRCH_STDN;
1632 	b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg);
1633 
1634 	do {
1635 		ret = b53_arl_search_wait(priv);
1636 		if (ret)
1637 			return ret;
1638 
1639 		b53_arl_search_rd(priv, 0, &results[0]);
1640 		ret = b53_fdb_copy(port, &results[0], cb, data);
1641 		if (ret)
1642 			return ret;
1643 
1644 		if (priv->num_arl_entries > 2) {
1645 			b53_arl_search_rd(priv, 1, &results[1]);
1646 			ret = b53_fdb_copy(port, &results[1], cb, data);
1647 			if (ret)
1648 				return ret;
1649 
1650 			if (!results[0].is_valid && !results[1].is_valid)
1651 				break;
1652 		}
1653 
1654 	} while (count++ < 1024);
1655 
1656 	return 0;
1657 }
1658 EXPORT_SYMBOL(b53_fdb_dump);
1659 
1660 int b53_mdb_prepare(struct dsa_switch *ds, int port,
1661 		    const struct switchdev_obj_port_mdb *mdb)
1662 {
1663 	struct b53_device *priv = ds->priv;
1664 
1665 	/* 5325 and 5365 require some more massaging, but could
1666 	 * be supported eventually
1667 	 */
1668 	if (is5325(priv) || is5365(priv))
1669 		return -EOPNOTSUPP;
1670 
1671 	return 0;
1672 }
1673 EXPORT_SYMBOL(b53_mdb_prepare);
1674 
1675 void b53_mdb_add(struct dsa_switch *ds, int port,
1676 		 const struct switchdev_obj_port_mdb *mdb)
1677 {
1678 	struct b53_device *priv = ds->priv;
1679 	int ret;
1680 
1681 	ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true);
1682 	if (ret)
1683 		dev_err(ds->dev, "failed to add MDB entry\n");
1684 }
1685 EXPORT_SYMBOL(b53_mdb_add);
1686 
1687 int b53_mdb_del(struct dsa_switch *ds, int port,
1688 		const struct switchdev_obj_port_mdb *mdb)
1689 {
1690 	struct b53_device *priv = ds->priv;
1691 	int ret;
1692 
1693 	ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false);
1694 	if (ret)
1695 		dev_err(ds->dev, "failed to delete MDB entry\n");
1696 
1697 	return ret;
1698 }
1699 EXPORT_SYMBOL(b53_mdb_del);
1700 
1701 int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
1702 {
1703 	struct b53_device *dev = ds->priv;
1704 	s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
1705 	u16 pvlan, reg;
1706 	unsigned int i;
1707 
1708 	/* Make this port leave the all VLANs join since we will have proper
1709 	 * VLAN entries from now on
1710 	 */
1711 	if (is58xx(dev)) {
1712 		b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
1713 		reg &= ~BIT(port);
1714 		if ((reg & BIT(cpu_port)) == BIT(cpu_port))
1715 			reg &= ~BIT(cpu_port);
1716 		b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
1717 	}
1718 
1719 	b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
1720 
1721 	b53_for_each_port(dev, i) {
1722 		if (dsa_to_port(ds, i)->bridge_dev != br)
1723 			continue;
1724 
1725 		/* Add this local port to the remote port VLAN control
1726 		 * membership and update the remote port bitmask
1727 		 */
1728 		b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &reg);
1729 		reg |= BIT(port);
1730 		b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
1731 		dev->ports[i].vlan_ctl_mask = reg;
1732 
1733 		pvlan |= BIT(i);
1734 	}
1735 
1736 	/* Configure the local port VLAN control membership to include
1737 	 * remote ports and update the local port bitmask
1738 	 */
1739 	b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
1740 	dev->ports[port].vlan_ctl_mask = pvlan;
1741 
1742 	return 0;
1743 }
1744 EXPORT_SYMBOL(b53_br_join);
1745 
1746 void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
1747 {
1748 	struct b53_device *dev = ds->priv;
1749 	struct b53_vlan *vl = &dev->vlans[0];
1750 	s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
1751 	unsigned int i;
1752 	u16 pvlan, reg, pvid;
1753 
1754 	b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
1755 
1756 	b53_for_each_port(dev, i) {
1757 		/* Don't touch the remaining ports */
1758 		if (dsa_to_port(ds, i)->bridge_dev != br)
1759 			continue;
1760 
1761 		b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &reg);
1762 		reg &= ~BIT(port);
1763 		b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
1764 		dev->ports[port].vlan_ctl_mask = reg;
1765 
1766 		/* Prevent self removal to preserve isolation */
1767 		if (port != i)
1768 			pvlan &= ~BIT(i);
1769 	}
1770 
1771 	b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
1772 	dev->ports[port].vlan_ctl_mask = pvlan;
1773 
1774 	pvid = b53_default_pvid(dev);
1775 
1776 	/* Make this port join all VLANs without VLAN entries */
1777 	if (is58xx(dev)) {
1778 		b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
1779 		reg |= BIT(port);
1780 		if (!(reg & BIT(cpu_port)))
1781 			reg |= BIT(cpu_port);
1782 		b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
1783 	} else {
1784 		b53_get_vlan_entry(dev, pvid, vl);
1785 		vl->members |= BIT(port) | BIT(cpu_port);
1786 		vl->untag |= BIT(port) | BIT(cpu_port);
1787 		b53_set_vlan_entry(dev, pvid, vl);
1788 	}
1789 }
1790 EXPORT_SYMBOL(b53_br_leave);
1791 
1792 void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state)
1793 {
1794 	struct b53_device *dev = ds->priv;
1795 	u8 hw_state;
1796 	u8 reg;
1797 
1798 	switch (state) {
1799 	case BR_STATE_DISABLED:
1800 		hw_state = PORT_CTRL_DIS_STATE;
1801 		break;
1802 	case BR_STATE_LISTENING:
1803 		hw_state = PORT_CTRL_LISTEN_STATE;
1804 		break;
1805 	case BR_STATE_LEARNING:
1806 		hw_state = PORT_CTRL_LEARN_STATE;
1807 		break;
1808 	case BR_STATE_FORWARDING:
1809 		hw_state = PORT_CTRL_FWD_STATE;
1810 		break;
1811 	case BR_STATE_BLOCKING:
1812 		hw_state = PORT_CTRL_BLOCK_STATE;
1813 		break;
1814 	default:
1815 		dev_err(ds->dev, "invalid STP state: %d\n", state);
1816 		return;
1817 	}
1818 
1819 	b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
1820 	reg &= ~PORT_CTRL_STP_STATE_MASK;
1821 	reg |= hw_state;
1822 	b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
1823 }
1824 EXPORT_SYMBOL(b53_br_set_stp_state);
1825 
1826 void b53_br_fast_age(struct dsa_switch *ds, int port)
1827 {
1828 	struct b53_device *dev = ds->priv;
1829 
1830 	if (b53_fast_age_port(dev, port))
1831 		dev_err(ds->dev, "fast ageing failed\n");
1832 }
1833 EXPORT_SYMBOL(b53_br_fast_age);
1834 
1835 int b53_br_egress_floods(struct dsa_switch *ds, int port,
1836 			 bool unicast, bool multicast)
1837 {
1838 	struct b53_device *dev = ds->priv;
1839 	u16 uc, mc;
1840 
1841 	b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
1842 	if (unicast)
1843 		uc |= BIT(port);
1844 	else
1845 		uc &= ~BIT(port);
1846 	b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
1847 
1848 	b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
1849 	if (multicast)
1850 		mc |= BIT(port);
1851 	else
1852 		mc &= ~BIT(port);
1853 	b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
1854 
1855 	b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
1856 	if (multicast)
1857 		mc |= BIT(port);
1858 	else
1859 		mc &= ~BIT(port);
1860 	b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
1861 
1862 	return 0;
1863 
1864 }
1865 EXPORT_SYMBOL(b53_br_egress_floods);
1866 
1867 static bool b53_possible_cpu_port(struct dsa_switch *ds, int port)
1868 {
1869 	/* Broadcom switches will accept enabling Broadcom tags on the
1870 	 * following ports: 5, 7 and 8, any other port is not supported
1871 	 */
1872 	switch (port) {
1873 	case B53_CPU_PORT_25:
1874 	case 7:
1875 	case B53_CPU_PORT:
1876 		return true;
1877 	}
1878 
1879 	return false;
1880 }
1881 
1882 static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port,
1883 				     enum dsa_tag_protocol tag_protocol)
1884 {
1885 	bool ret = b53_possible_cpu_port(ds, port);
1886 
1887 	if (!ret) {
1888 		dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n",
1889 			 port);
1890 		return ret;
1891 	}
1892 
1893 	switch (tag_protocol) {
1894 	case DSA_TAG_PROTO_BRCM:
1895 	case DSA_TAG_PROTO_BRCM_PREPEND:
1896 		dev_warn(ds->dev,
1897 			 "Port %d is stacked to Broadcom tag switch\n", port);
1898 		ret = false;
1899 		break;
1900 	default:
1901 		ret = true;
1902 		break;
1903 	}
1904 
1905 	return ret;
1906 }
1907 
1908 enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
1909 					   enum dsa_tag_protocol mprot)
1910 {
1911 	struct b53_device *dev = ds->priv;
1912 
1913 	/* Older models (5325, 5365) support a different tag format that we do
1914 	 * not support in net/dsa/tag_brcm.c yet.
1915 	 */
1916 	if (is5325(dev) || is5365(dev) ||
1917 	    !b53_can_enable_brcm_tags(ds, port, mprot)) {
1918 		dev->tag_protocol = DSA_TAG_PROTO_NONE;
1919 		goto out;
1920 	}
1921 
1922 	/* Broadcom BCM58xx chips have a flow accelerator on Port 8
1923 	 * which requires us to use the prepended Broadcom tag type
1924 	 */
1925 	if (dev->chip_id == BCM58XX_DEVICE_ID && port == B53_CPU_PORT) {
1926 		dev->tag_protocol = DSA_TAG_PROTO_BRCM_PREPEND;
1927 		goto out;
1928 	}
1929 
1930 	dev->tag_protocol = DSA_TAG_PROTO_BRCM;
1931 out:
1932 	return dev->tag_protocol;
1933 }
1934 EXPORT_SYMBOL(b53_get_tag_protocol);
1935 
1936 int b53_mirror_add(struct dsa_switch *ds, int port,
1937 		   struct dsa_mall_mirror_tc_entry *mirror, bool ingress)
1938 {
1939 	struct b53_device *dev = ds->priv;
1940 	u16 reg, loc;
1941 
1942 	if (ingress)
1943 		loc = B53_IG_MIR_CTL;
1944 	else
1945 		loc = B53_EG_MIR_CTL;
1946 
1947 	b53_read16(dev, B53_MGMT_PAGE, loc, &reg);
1948 	reg |= BIT(port);
1949 	b53_write16(dev, B53_MGMT_PAGE, loc, reg);
1950 
1951 	b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, &reg);
1952 	reg &= ~CAP_PORT_MASK;
1953 	reg |= mirror->to_local_port;
1954 	reg |= MIRROR_EN;
1955 	b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
1956 
1957 	return 0;
1958 }
1959 EXPORT_SYMBOL(b53_mirror_add);
1960 
1961 void b53_mirror_del(struct dsa_switch *ds, int port,
1962 		    struct dsa_mall_mirror_tc_entry *mirror)
1963 {
1964 	struct b53_device *dev = ds->priv;
1965 	bool loc_disable = false, other_loc_disable = false;
1966 	u16 reg, loc;
1967 
1968 	if (mirror->ingress)
1969 		loc = B53_IG_MIR_CTL;
1970 	else
1971 		loc = B53_EG_MIR_CTL;
1972 
1973 	/* Update the desired ingress/egress register */
1974 	b53_read16(dev, B53_MGMT_PAGE, loc, &reg);
1975 	reg &= ~BIT(port);
1976 	if (!(reg & MIRROR_MASK))
1977 		loc_disable = true;
1978 	b53_write16(dev, B53_MGMT_PAGE, loc, reg);
1979 
1980 	/* Now look at the other one to know if we can disable mirroring
1981 	 * entirely
1982 	 */
1983 	if (mirror->ingress)
1984 		b53_read16(dev, B53_MGMT_PAGE, B53_EG_MIR_CTL, &reg);
1985 	else
1986 		b53_read16(dev, B53_MGMT_PAGE, B53_IG_MIR_CTL, &reg);
1987 	if (!(reg & MIRROR_MASK))
1988 		other_loc_disable = true;
1989 
1990 	b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, &reg);
1991 	/* Both no longer have ports, let's disable mirroring */
1992 	if (loc_disable && other_loc_disable) {
1993 		reg &= ~MIRROR_EN;
1994 		reg &= ~mirror->to_local_port;
1995 	}
1996 	b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
1997 }
1998 EXPORT_SYMBOL(b53_mirror_del);
1999 
2000 void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
2001 {
2002 	struct b53_device *dev = ds->priv;
2003 	u16 reg;
2004 
2005 	b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, &reg);
2006 	if (enable)
2007 		reg |= BIT(port);
2008 	else
2009 		reg &= ~BIT(port);
2010 	b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg);
2011 }
2012 EXPORT_SYMBOL(b53_eee_enable_set);
2013 
2014 
2015 /* Returns 0 if EEE was not enabled, or 1 otherwise
2016  */
2017 int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
2018 {
2019 	int ret;
2020 
2021 	ret = phy_init_eee(phy, 0);
2022 	if (ret)
2023 		return 0;
2024 
2025 	b53_eee_enable_set(ds, port, true);
2026 
2027 	return 1;
2028 }
2029 EXPORT_SYMBOL(b53_eee_init);
2030 
2031 int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
2032 {
2033 	struct b53_device *dev = ds->priv;
2034 	struct ethtool_eee *p = &dev->ports[port].eee;
2035 	u16 reg;
2036 
2037 	if (is5325(dev) || is5365(dev))
2038 		return -EOPNOTSUPP;
2039 
2040 	b53_read16(dev, B53_EEE_PAGE, B53_EEE_LPI_INDICATE, &reg);
2041 	e->eee_enabled = p->eee_enabled;
2042 	e->eee_active = !!(reg & BIT(port));
2043 
2044 	return 0;
2045 }
2046 EXPORT_SYMBOL(b53_get_mac_eee);
2047 
2048 int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
2049 {
2050 	struct b53_device *dev = ds->priv;
2051 	struct ethtool_eee *p = &dev->ports[port].eee;
2052 
2053 	if (is5325(dev) || is5365(dev))
2054 		return -EOPNOTSUPP;
2055 
2056 	p->eee_enabled = e->eee_enabled;
2057 	b53_eee_enable_set(ds, port, e->eee_enabled);
2058 
2059 	return 0;
2060 }
2061 EXPORT_SYMBOL(b53_set_mac_eee);
2062 
2063 static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu)
2064 {
2065 	struct b53_device *dev = ds->priv;
2066 	bool enable_jumbo;
2067 	bool allow_10_100;
2068 
2069 	if (is5325(dev) || is5365(dev))
2070 		return -EOPNOTSUPP;
2071 
2072 	enable_jumbo = (mtu >= JMS_MIN_SIZE);
2073 	allow_10_100 = (dev->chip_id == BCM583XX_DEVICE_ID);
2074 
2075 	return b53_set_jumbo(dev, enable_jumbo, allow_10_100);
2076 }
2077 
2078 static int b53_get_max_mtu(struct dsa_switch *ds, int port)
2079 {
2080 	return JMS_MAX_SIZE;
2081 }
2082 
2083 static const struct dsa_switch_ops b53_switch_ops = {
2084 	.get_tag_protocol	= b53_get_tag_protocol,
2085 	.setup			= b53_setup,
2086 	.get_strings		= b53_get_strings,
2087 	.get_ethtool_stats	= b53_get_ethtool_stats,
2088 	.get_sset_count		= b53_get_sset_count,
2089 	.get_ethtool_phy_stats	= b53_get_ethtool_phy_stats,
2090 	.phy_read		= b53_phy_read16,
2091 	.phy_write		= b53_phy_write16,
2092 	.adjust_link		= b53_adjust_link,
2093 	.phylink_validate	= b53_phylink_validate,
2094 	.phylink_mac_link_state	= b53_phylink_mac_link_state,
2095 	.phylink_mac_config	= b53_phylink_mac_config,
2096 	.phylink_mac_an_restart	= b53_phylink_mac_an_restart,
2097 	.phylink_mac_link_down	= b53_phylink_mac_link_down,
2098 	.phylink_mac_link_up	= b53_phylink_mac_link_up,
2099 	.port_enable		= b53_enable_port,
2100 	.port_disable		= b53_disable_port,
2101 	.get_mac_eee		= b53_get_mac_eee,
2102 	.set_mac_eee		= b53_set_mac_eee,
2103 	.port_bridge_join	= b53_br_join,
2104 	.port_bridge_leave	= b53_br_leave,
2105 	.port_stp_state_set	= b53_br_set_stp_state,
2106 	.port_fast_age		= b53_br_fast_age,
2107 	.port_egress_floods	= b53_br_egress_floods,
2108 	.port_vlan_filtering	= b53_vlan_filtering,
2109 	.port_vlan_prepare	= b53_vlan_prepare,
2110 	.port_vlan_add		= b53_vlan_add,
2111 	.port_vlan_del		= b53_vlan_del,
2112 	.port_fdb_dump		= b53_fdb_dump,
2113 	.port_fdb_add		= b53_fdb_add,
2114 	.port_fdb_del		= b53_fdb_del,
2115 	.port_mirror_add	= b53_mirror_add,
2116 	.port_mirror_del	= b53_mirror_del,
2117 	.port_mdb_prepare	= b53_mdb_prepare,
2118 	.port_mdb_add		= b53_mdb_add,
2119 	.port_mdb_del		= b53_mdb_del,
2120 	.port_max_mtu		= b53_get_max_mtu,
2121 	.port_change_mtu	= b53_change_mtu,
2122 };
2123 
2124 struct b53_chip_data {
2125 	u32 chip_id;
2126 	const char *dev_name;
2127 	u16 vlans;
2128 	u16 enabled_ports;
2129 	u8 cpu_port;
2130 	u8 vta_regs[3];
2131 	u8 arl_entries;
2132 	u8 duplex_reg;
2133 	u8 jumbo_pm_reg;
2134 	u8 jumbo_size_reg;
2135 };
2136 
2137 #define B53_VTA_REGS	\
2138 	{ B53_VT_ACCESS, B53_VT_INDEX, B53_VT_ENTRY }
2139 #define B53_VTA_REGS_9798 \
2140 	{ B53_VT_ACCESS_9798, B53_VT_INDEX_9798, B53_VT_ENTRY_9798 }
2141 #define B53_VTA_REGS_63XX \
2142 	{ B53_VT_ACCESS_63XX, B53_VT_INDEX_63XX, B53_VT_ENTRY_63XX }
2143 
2144 static const struct b53_chip_data b53_switch_chips[] = {
2145 	{
2146 		.chip_id = BCM5325_DEVICE_ID,
2147 		.dev_name = "BCM5325",
2148 		.vlans = 16,
2149 		.enabled_ports = 0x1f,
2150 		.arl_entries = 2,
2151 		.cpu_port = B53_CPU_PORT_25,
2152 		.duplex_reg = B53_DUPLEX_STAT_FE,
2153 	},
2154 	{
2155 		.chip_id = BCM5365_DEVICE_ID,
2156 		.dev_name = "BCM5365",
2157 		.vlans = 256,
2158 		.enabled_ports = 0x1f,
2159 		.arl_entries = 2,
2160 		.cpu_port = B53_CPU_PORT_25,
2161 		.duplex_reg = B53_DUPLEX_STAT_FE,
2162 	},
2163 	{
2164 		.chip_id = BCM5389_DEVICE_ID,
2165 		.dev_name = "BCM5389",
2166 		.vlans = 4096,
2167 		.enabled_ports = 0x1f,
2168 		.arl_entries = 4,
2169 		.cpu_port = B53_CPU_PORT,
2170 		.vta_regs = B53_VTA_REGS,
2171 		.duplex_reg = B53_DUPLEX_STAT_GE,
2172 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2173 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2174 	},
2175 	{
2176 		.chip_id = BCM5395_DEVICE_ID,
2177 		.dev_name = "BCM5395",
2178 		.vlans = 4096,
2179 		.enabled_ports = 0x1f,
2180 		.arl_entries = 4,
2181 		.cpu_port = B53_CPU_PORT,
2182 		.vta_regs = B53_VTA_REGS,
2183 		.duplex_reg = B53_DUPLEX_STAT_GE,
2184 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2185 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2186 	},
2187 	{
2188 		.chip_id = BCM5397_DEVICE_ID,
2189 		.dev_name = "BCM5397",
2190 		.vlans = 4096,
2191 		.enabled_ports = 0x1f,
2192 		.arl_entries = 4,
2193 		.cpu_port = B53_CPU_PORT,
2194 		.vta_regs = B53_VTA_REGS_9798,
2195 		.duplex_reg = B53_DUPLEX_STAT_GE,
2196 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2197 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2198 	},
2199 	{
2200 		.chip_id = BCM5398_DEVICE_ID,
2201 		.dev_name = "BCM5398",
2202 		.vlans = 4096,
2203 		.enabled_ports = 0x7f,
2204 		.arl_entries = 4,
2205 		.cpu_port = B53_CPU_PORT,
2206 		.vta_regs = B53_VTA_REGS_9798,
2207 		.duplex_reg = B53_DUPLEX_STAT_GE,
2208 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2209 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2210 	},
2211 	{
2212 		.chip_id = BCM53115_DEVICE_ID,
2213 		.dev_name = "BCM53115",
2214 		.vlans = 4096,
2215 		.enabled_ports = 0x1f,
2216 		.arl_entries = 4,
2217 		.vta_regs = B53_VTA_REGS,
2218 		.cpu_port = B53_CPU_PORT,
2219 		.duplex_reg = B53_DUPLEX_STAT_GE,
2220 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2221 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2222 	},
2223 	{
2224 		.chip_id = BCM53125_DEVICE_ID,
2225 		.dev_name = "BCM53125",
2226 		.vlans = 4096,
2227 		.enabled_ports = 0xff,
2228 		.arl_entries = 4,
2229 		.cpu_port = B53_CPU_PORT,
2230 		.vta_regs = B53_VTA_REGS,
2231 		.duplex_reg = B53_DUPLEX_STAT_GE,
2232 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2233 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2234 	},
2235 	{
2236 		.chip_id = BCM53128_DEVICE_ID,
2237 		.dev_name = "BCM53128",
2238 		.vlans = 4096,
2239 		.enabled_ports = 0x1ff,
2240 		.arl_entries = 4,
2241 		.cpu_port = B53_CPU_PORT,
2242 		.vta_regs = B53_VTA_REGS,
2243 		.duplex_reg = B53_DUPLEX_STAT_GE,
2244 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2245 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2246 	},
2247 	{
2248 		.chip_id = BCM63XX_DEVICE_ID,
2249 		.dev_name = "BCM63xx",
2250 		.vlans = 4096,
2251 		.enabled_ports = 0, /* pdata must provide them */
2252 		.arl_entries = 4,
2253 		.cpu_port = B53_CPU_PORT,
2254 		.vta_regs = B53_VTA_REGS_63XX,
2255 		.duplex_reg = B53_DUPLEX_STAT_63XX,
2256 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX,
2257 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX,
2258 	},
2259 	{
2260 		.chip_id = BCM53010_DEVICE_ID,
2261 		.dev_name = "BCM53010",
2262 		.vlans = 4096,
2263 		.enabled_ports = 0x1f,
2264 		.arl_entries = 4,
2265 		.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2266 		.vta_regs = B53_VTA_REGS,
2267 		.duplex_reg = B53_DUPLEX_STAT_GE,
2268 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2269 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2270 	},
2271 	{
2272 		.chip_id = BCM53011_DEVICE_ID,
2273 		.dev_name = "BCM53011",
2274 		.vlans = 4096,
2275 		.enabled_ports = 0x1bf,
2276 		.arl_entries = 4,
2277 		.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2278 		.vta_regs = B53_VTA_REGS,
2279 		.duplex_reg = B53_DUPLEX_STAT_GE,
2280 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2281 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2282 	},
2283 	{
2284 		.chip_id = BCM53012_DEVICE_ID,
2285 		.dev_name = "BCM53012",
2286 		.vlans = 4096,
2287 		.enabled_ports = 0x1bf,
2288 		.arl_entries = 4,
2289 		.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2290 		.vta_regs = B53_VTA_REGS,
2291 		.duplex_reg = B53_DUPLEX_STAT_GE,
2292 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2293 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2294 	},
2295 	{
2296 		.chip_id = BCM53018_DEVICE_ID,
2297 		.dev_name = "BCM53018",
2298 		.vlans = 4096,
2299 		.enabled_ports = 0x1f,
2300 		.arl_entries = 4,
2301 		.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2302 		.vta_regs = B53_VTA_REGS,
2303 		.duplex_reg = B53_DUPLEX_STAT_GE,
2304 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2305 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2306 	},
2307 	{
2308 		.chip_id = BCM53019_DEVICE_ID,
2309 		.dev_name = "BCM53019",
2310 		.vlans = 4096,
2311 		.enabled_ports = 0x1f,
2312 		.arl_entries = 4,
2313 		.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2314 		.vta_regs = B53_VTA_REGS,
2315 		.duplex_reg = B53_DUPLEX_STAT_GE,
2316 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2317 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2318 	},
2319 	{
2320 		.chip_id = BCM58XX_DEVICE_ID,
2321 		.dev_name = "BCM585xx/586xx/88312",
2322 		.vlans	= 4096,
2323 		.enabled_ports = 0x1ff,
2324 		.arl_entries = 4,
2325 		.cpu_port = B53_CPU_PORT,
2326 		.vta_regs = B53_VTA_REGS,
2327 		.duplex_reg = B53_DUPLEX_STAT_GE,
2328 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2329 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2330 	},
2331 	{
2332 		.chip_id = BCM583XX_DEVICE_ID,
2333 		.dev_name = "BCM583xx/11360",
2334 		.vlans = 4096,
2335 		.enabled_ports = 0x103,
2336 		.arl_entries = 4,
2337 		.cpu_port = B53_CPU_PORT,
2338 		.vta_regs = B53_VTA_REGS,
2339 		.duplex_reg = B53_DUPLEX_STAT_GE,
2340 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2341 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2342 	},
2343 	{
2344 		.chip_id = BCM7445_DEVICE_ID,
2345 		.dev_name = "BCM7445",
2346 		.vlans	= 4096,
2347 		.enabled_ports = 0x1ff,
2348 		.arl_entries = 4,
2349 		.cpu_port = B53_CPU_PORT,
2350 		.vta_regs = B53_VTA_REGS,
2351 		.duplex_reg = B53_DUPLEX_STAT_GE,
2352 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2353 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2354 	},
2355 	{
2356 		.chip_id = BCM7278_DEVICE_ID,
2357 		.dev_name = "BCM7278",
2358 		.vlans = 4096,
2359 		.enabled_ports = 0x1ff,
2360 		.arl_entries= 4,
2361 		.cpu_port = B53_CPU_PORT,
2362 		.vta_regs = B53_VTA_REGS,
2363 		.duplex_reg = B53_DUPLEX_STAT_GE,
2364 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2365 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2366 	},
2367 };
2368 
2369 static int b53_switch_init(struct b53_device *dev)
2370 {
2371 	unsigned int i;
2372 	int ret;
2373 
2374 	for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) {
2375 		const struct b53_chip_data *chip = &b53_switch_chips[i];
2376 
2377 		if (chip->chip_id == dev->chip_id) {
2378 			if (!dev->enabled_ports)
2379 				dev->enabled_ports = chip->enabled_ports;
2380 			dev->name = chip->dev_name;
2381 			dev->duplex_reg = chip->duplex_reg;
2382 			dev->vta_regs[0] = chip->vta_regs[0];
2383 			dev->vta_regs[1] = chip->vta_regs[1];
2384 			dev->vta_regs[2] = chip->vta_regs[2];
2385 			dev->jumbo_pm_reg = chip->jumbo_pm_reg;
2386 			dev->cpu_port = chip->cpu_port;
2387 			dev->num_vlans = chip->vlans;
2388 			dev->num_arl_entries = chip->arl_entries;
2389 			break;
2390 		}
2391 	}
2392 
2393 	/* check which BCM5325x version we have */
2394 	if (is5325(dev)) {
2395 		u8 vc4;
2396 
2397 		b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
2398 
2399 		/* check reserved bits */
2400 		switch (vc4 & 3) {
2401 		case 1:
2402 			/* BCM5325E */
2403 			break;
2404 		case 3:
2405 			/* BCM5325F - do not use port 4 */
2406 			dev->enabled_ports &= ~BIT(4);
2407 			break;
2408 		default:
2409 /* On the BCM47XX SoCs this is the supported internal switch.*/
2410 #ifndef CONFIG_BCM47XX
2411 			/* BCM5325M */
2412 			return -EINVAL;
2413 #else
2414 			break;
2415 #endif
2416 		}
2417 	} else if (dev->chip_id == BCM53115_DEVICE_ID) {
2418 		u64 strap_value;
2419 
2420 		b53_read48(dev, B53_STAT_PAGE, B53_STRAP_VALUE, &strap_value);
2421 		/* use second IMP port if GMII is enabled */
2422 		if (strap_value & SV_GMII_CTRL_115)
2423 			dev->cpu_port = 5;
2424 	}
2425 
2426 	/* cpu port is always last */
2427 	dev->num_ports = dev->cpu_port + 1;
2428 	dev->enabled_ports |= BIT(dev->cpu_port);
2429 
2430 	/* Include non standard CPU port built-in PHYs to be probed */
2431 	if (is539x(dev) || is531x5(dev)) {
2432 		for (i = 0; i < dev->num_ports; i++) {
2433 			if (!(dev->ds->phys_mii_mask & BIT(i)) &&
2434 			    !b53_possible_cpu_port(dev->ds, i))
2435 				dev->ds->phys_mii_mask |= BIT(i);
2436 		}
2437 	}
2438 
2439 	dev->ports = devm_kcalloc(dev->dev,
2440 				  dev->num_ports, sizeof(struct b53_port),
2441 				  GFP_KERNEL);
2442 	if (!dev->ports)
2443 		return -ENOMEM;
2444 
2445 	dev->vlans = devm_kcalloc(dev->dev,
2446 				  dev->num_vlans, sizeof(struct b53_vlan),
2447 				  GFP_KERNEL);
2448 	if (!dev->vlans)
2449 		return -ENOMEM;
2450 
2451 	dev->reset_gpio = b53_switch_get_reset_gpio(dev);
2452 	if (dev->reset_gpio >= 0) {
2453 		ret = devm_gpio_request_one(dev->dev, dev->reset_gpio,
2454 					    GPIOF_OUT_INIT_HIGH, "robo_reset");
2455 		if (ret)
2456 			return ret;
2457 	}
2458 
2459 	return 0;
2460 }
2461 
2462 struct b53_device *b53_switch_alloc(struct device *base,
2463 				    const struct b53_io_ops *ops,
2464 				    void *priv)
2465 {
2466 	struct dsa_switch *ds;
2467 	struct b53_device *dev;
2468 
2469 	ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
2470 	if (!ds)
2471 		return NULL;
2472 
2473 	ds->dev = base;
2474 	ds->num_ports = DSA_MAX_PORTS;
2475 
2476 	dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL);
2477 	if (!dev)
2478 		return NULL;
2479 
2480 	ds->priv = dev;
2481 	dev->dev = base;
2482 
2483 	dev->ds = ds;
2484 	dev->priv = priv;
2485 	dev->ops = ops;
2486 	ds->ops = &b53_switch_ops;
2487 	mutex_init(&dev->reg_mutex);
2488 	mutex_init(&dev->stats_mutex);
2489 
2490 	return dev;
2491 }
2492 EXPORT_SYMBOL(b53_switch_alloc);
2493 
2494 int b53_switch_detect(struct b53_device *dev)
2495 {
2496 	u32 id32;
2497 	u16 tmp;
2498 	u8 id8;
2499 	int ret;
2500 
2501 	ret = b53_read8(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id8);
2502 	if (ret)
2503 		return ret;
2504 
2505 	switch (id8) {
2506 	case 0:
2507 		/* BCM5325 and BCM5365 do not have this register so reads
2508 		 * return 0. But the read operation did succeed, so assume this
2509 		 * is one of them.
2510 		 *
2511 		 * Next check if we can write to the 5325's VTA register; for
2512 		 * 5365 it is read only.
2513 		 */
2514 		b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf);
2515 		b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp);
2516 
2517 		if (tmp == 0xf)
2518 			dev->chip_id = BCM5325_DEVICE_ID;
2519 		else
2520 			dev->chip_id = BCM5365_DEVICE_ID;
2521 		break;
2522 	case BCM5389_DEVICE_ID:
2523 	case BCM5395_DEVICE_ID:
2524 	case BCM5397_DEVICE_ID:
2525 	case BCM5398_DEVICE_ID:
2526 		dev->chip_id = id8;
2527 		break;
2528 	default:
2529 		ret = b53_read32(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id32);
2530 		if (ret)
2531 			return ret;
2532 
2533 		switch (id32) {
2534 		case BCM53115_DEVICE_ID:
2535 		case BCM53125_DEVICE_ID:
2536 		case BCM53128_DEVICE_ID:
2537 		case BCM53010_DEVICE_ID:
2538 		case BCM53011_DEVICE_ID:
2539 		case BCM53012_DEVICE_ID:
2540 		case BCM53018_DEVICE_ID:
2541 		case BCM53019_DEVICE_ID:
2542 			dev->chip_id = id32;
2543 			break;
2544 		default:
2545 			pr_err("unsupported switch detected (BCM53%02x/BCM%x)\n",
2546 			       id8, id32);
2547 			return -ENODEV;
2548 		}
2549 	}
2550 
2551 	if (dev->chip_id == BCM5325_DEVICE_ID)
2552 		return b53_read8(dev, B53_STAT_PAGE, B53_REV_ID_25,
2553 				 &dev->core_rev);
2554 	else
2555 		return b53_read8(dev, B53_MGMT_PAGE, B53_REV_ID,
2556 				 &dev->core_rev);
2557 }
2558 EXPORT_SYMBOL(b53_switch_detect);
2559 
2560 int b53_switch_register(struct b53_device *dev)
2561 {
2562 	int ret;
2563 
2564 	if (dev->pdata) {
2565 		dev->chip_id = dev->pdata->chip_id;
2566 		dev->enabled_ports = dev->pdata->enabled_ports;
2567 	}
2568 
2569 	if (!dev->chip_id && b53_switch_detect(dev))
2570 		return -EINVAL;
2571 
2572 	ret = b53_switch_init(dev);
2573 	if (ret)
2574 		return ret;
2575 
2576 	pr_info("found switch: %s, rev %i\n", dev->name, dev->core_rev);
2577 
2578 	return dsa_register_switch(dev->ds);
2579 }
2580 EXPORT_SYMBOL(b53_switch_register);
2581 
2582 MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
2583 MODULE_DESCRIPTION("B53 switch library");
2584 MODULE_LICENSE("Dual BSD/GPL");
2585