1 /*
2 * B53 switch driver main logic
3 *
4 * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
5 * Copyright (C) 2016 Florian Fainelli <f.fainelli@gmail.com>
6 *
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <linux/delay.h>
21 #include <linux/export.h>
22 #include <linux/gpio.h>
23 #include <linux/kernel.h>
24 #include <linux/math.h>
25 #include <linux/minmax.h>
26 #include <linux/module.h>
27 #include <linux/platform_data/b53.h>
28 #include <linux/phy.h>
29 #include <linux/phylink.h>
30 #include <linux/etherdevice.h>
31 #include <linux/if_bridge.h>
32 #include <linux/if_vlan.h>
33 #include <net/dsa.h>
34
35 #include "b53_regs.h"
36 #include "b53_priv.h"
37
38 struct b53_mib_desc {
39 u8 size;
40 u8 offset;
41 const char *name;
42 };
43
44 /* BCM5365 MIB counters */
45 static const struct b53_mib_desc b53_mibs_65[] = {
46 { 8, 0x00, "TxOctets" },
47 { 4, 0x08, "TxDropPkts" },
48 { 4, 0x10, "TxBroadcastPkts" },
49 { 4, 0x14, "TxMulticastPkts" },
50 { 4, 0x18, "TxUnicastPkts" },
51 { 4, 0x1c, "TxCollisions" },
52 { 4, 0x20, "TxSingleCollision" },
53 { 4, 0x24, "TxMultipleCollision" },
54 { 4, 0x28, "TxDeferredTransmit" },
55 { 4, 0x2c, "TxLateCollision" },
56 { 4, 0x30, "TxExcessiveCollision" },
57 { 4, 0x38, "TxPausePkts" },
58 { 8, 0x44, "RxOctets" },
59 { 4, 0x4c, "RxUndersizePkts" },
60 { 4, 0x50, "RxPausePkts" },
61 { 4, 0x54, "Pkts64Octets" },
62 { 4, 0x58, "Pkts65to127Octets" },
63 { 4, 0x5c, "Pkts128to255Octets" },
64 { 4, 0x60, "Pkts256to511Octets" },
65 { 4, 0x64, "Pkts512to1023Octets" },
66 { 4, 0x68, "Pkts1024to1522Octets" },
67 { 4, 0x6c, "RxOversizePkts" },
68 { 4, 0x70, "RxJabbers" },
69 { 4, 0x74, "RxAlignmentErrors" },
70 { 4, 0x78, "RxFCSErrors" },
71 { 8, 0x7c, "RxGoodOctets" },
72 { 4, 0x84, "RxDropPkts" },
73 { 4, 0x88, "RxUnicastPkts" },
74 { 4, 0x8c, "RxMulticastPkts" },
75 { 4, 0x90, "RxBroadcastPkts" },
76 { 4, 0x94, "RxSAChanges" },
77 { 4, 0x98, "RxFragments" },
78 };
79
80 #define B53_MIBS_65_SIZE ARRAY_SIZE(b53_mibs_65)
81
82 /* BCM63xx MIB counters */
83 static const struct b53_mib_desc b53_mibs_63xx[] = {
84 { 8, 0x00, "TxOctets" },
85 { 4, 0x08, "TxDropPkts" },
86 { 4, 0x0c, "TxQoSPkts" },
87 { 4, 0x10, "TxBroadcastPkts" },
88 { 4, 0x14, "TxMulticastPkts" },
89 { 4, 0x18, "TxUnicastPkts" },
90 { 4, 0x1c, "TxCollisions" },
91 { 4, 0x20, "TxSingleCollision" },
92 { 4, 0x24, "TxMultipleCollision" },
93 { 4, 0x28, "TxDeferredTransmit" },
94 { 4, 0x2c, "TxLateCollision" },
95 { 4, 0x30, "TxExcessiveCollision" },
96 { 4, 0x38, "TxPausePkts" },
97 { 8, 0x3c, "TxQoSOctets" },
98 { 8, 0x44, "RxOctets" },
99 { 4, 0x4c, "RxUndersizePkts" },
100 { 4, 0x50, "RxPausePkts" },
101 { 4, 0x54, "Pkts64Octets" },
102 { 4, 0x58, "Pkts65to127Octets" },
103 { 4, 0x5c, "Pkts128to255Octets" },
104 { 4, 0x60, "Pkts256to511Octets" },
105 { 4, 0x64, "Pkts512to1023Octets" },
106 { 4, 0x68, "Pkts1024to1522Octets" },
107 { 4, 0x6c, "RxOversizePkts" },
108 { 4, 0x70, "RxJabbers" },
109 { 4, 0x74, "RxAlignmentErrors" },
110 { 4, 0x78, "RxFCSErrors" },
111 { 8, 0x7c, "RxGoodOctets" },
112 { 4, 0x84, "RxDropPkts" },
113 { 4, 0x88, "RxUnicastPkts" },
114 { 4, 0x8c, "RxMulticastPkts" },
115 { 4, 0x90, "RxBroadcastPkts" },
116 { 4, 0x94, "RxSAChanges" },
117 { 4, 0x98, "RxFragments" },
118 { 4, 0xa0, "RxSymbolErrors" },
119 { 4, 0xa4, "RxQoSPkts" },
120 { 8, 0xa8, "RxQoSOctets" },
121 { 4, 0xb0, "Pkts1523to2047Octets" },
122 { 4, 0xb4, "Pkts2048to4095Octets" },
123 { 4, 0xb8, "Pkts4096to8191Octets" },
124 { 4, 0xbc, "Pkts8192to9728Octets" },
125 { 4, 0xc0, "RxDiscarded" },
126 };
127
128 #define B53_MIBS_63XX_SIZE ARRAY_SIZE(b53_mibs_63xx)
129
130 /* MIB counters */
131 static const struct b53_mib_desc b53_mibs[] = {
132 { 8, 0x00, "TxOctets" },
133 { 4, 0x08, "TxDropPkts" },
134 { 4, 0x10, "TxBroadcastPkts" },
135 { 4, 0x14, "TxMulticastPkts" },
136 { 4, 0x18, "TxUnicastPkts" },
137 { 4, 0x1c, "TxCollisions" },
138 { 4, 0x20, "TxSingleCollision" },
139 { 4, 0x24, "TxMultipleCollision" },
140 { 4, 0x28, "TxDeferredTransmit" },
141 { 4, 0x2c, "TxLateCollision" },
142 { 4, 0x30, "TxExcessiveCollision" },
143 { 4, 0x38, "TxPausePkts" },
144 { 8, 0x50, "RxOctets" },
145 { 4, 0x58, "RxUndersizePkts" },
146 { 4, 0x5c, "RxPausePkts" },
147 { 4, 0x60, "Pkts64Octets" },
148 { 4, 0x64, "Pkts65to127Octets" },
149 { 4, 0x68, "Pkts128to255Octets" },
150 { 4, 0x6c, "Pkts256to511Octets" },
151 { 4, 0x70, "Pkts512to1023Octets" },
152 { 4, 0x74, "Pkts1024to1522Octets" },
153 { 4, 0x78, "RxOversizePkts" },
154 { 4, 0x7c, "RxJabbers" },
155 { 4, 0x80, "RxAlignmentErrors" },
156 { 4, 0x84, "RxFCSErrors" },
157 { 8, 0x88, "RxGoodOctets" },
158 { 4, 0x90, "RxDropPkts" },
159 { 4, 0x94, "RxUnicastPkts" },
160 { 4, 0x98, "RxMulticastPkts" },
161 { 4, 0x9c, "RxBroadcastPkts" },
162 { 4, 0xa0, "RxSAChanges" },
163 { 4, 0xa4, "RxFragments" },
164 { 4, 0xa8, "RxJumboPkts" },
165 { 4, 0xac, "RxSymbolErrors" },
166 { 4, 0xc0, "RxDiscarded" },
167 };
168
169 #define B53_MIBS_SIZE ARRAY_SIZE(b53_mibs)
170
171 static const struct b53_mib_desc b53_mibs_58xx[] = {
172 { 8, 0x00, "TxOctets" },
173 { 4, 0x08, "TxDropPkts" },
174 { 4, 0x0c, "TxQPKTQ0" },
175 { 4, 0x10, "TxBroadcastPkts" },
176 { 4, 0x14, "TxMulticastPkts" },
177 { 4, 0x18, "TxUnicastPKts" },
178 { 4, 0x1c, "TxCollisions" },
179 { 4, 0x20, "TxSingleCollision" },
180 { 4, 0x24, "TxMultipleCollision" },
181 { 4, 0x28, "TxDeferredCollision" },
182 { 4, 0x2c, "TxLateCollision" },
183 { 4, 0x30, "TxExcessiveCollision" },
184 { 4, 0x34, "TxFrameInDisc" },
185 { 4, 0x38, "TxPausePkts" },
186 { 4, 0x3c, "TxQPKTQ1" },
187 { 4, 0x40, "TxQPKTQ2" },
188 { 4, 0x44, "TxQPKTQ3" },
189 { 4, 0x48, "TxQPKTQ4" },
190 { 4, 0x4c, "TxQPKTQ5" },
191 { 8, 0x50, "RxOctets" },
192 { 4, 0x58, "RxUndersizePkts" },
193 { 4, 0x5c, "RxPausePkts" },
194 { 4, 0x60, "RxPkts64Octets" },
195 { 4, 0x64, "RxPkts65to127Octets" },
196 { 4, 0x68, "RxPkts128to255Octets" },
197 { 4, 0x6c, "RxPkts256to511Octets" },
198 { 4, 0x70, "RxPkts512to1023Octets" },
199 { 4, 0x74, "RxPkts1024toMaxPktsOctets" },
200 { 4, 0x78, "RxOversizePkts" },
201 { 4, 0x7c, "RxJabbers" },
202 { 4, 0x80, "RxAlignmentErrors" },
203 { 4, 0x84, "RxFCSErrors" },
204 { 8, 0x88, "RxGoodOctets" },
205 { 4, 0x90, "RxDropPkts" },
206 { 4, 0x94, "RxUnicastPkts" },
207 { 4, 0x98, "RxMulticastPkts" },
208 { 4, 0x9c, "RxBroadcastPkts" },
209 { 4, 0xa0, "RxSAChanges" },
210 { 4, 0xa4, "RxFragments" },
211 { 4, 0xa8, "RxJumboPkt" },
212 { 4, 0xac, "RxSymblErr" },
213 { 4, 0xb0, "InRangeErrCount" },
214 { 4, 0xb4, "OutRangeErrCount" },
215 { 4, 0xb8, "EEELpiEvent" },
216 { 4, 0xbc, "EEELpiDuration" },
217 { 4, 0xc0, "RxDiscard" },
218 { 4, 0xc8, "TxQPKTQ6" },
219 { 4, 0xcc, "TxQPKTQ7" },
220 { 4, 0xd0, "TxPkts64Octets" },
221 { 4, 0xd4, "TxPkts65to127Octets" },
222 { 4, 0xd8, "TxPkts128to255Octets" },
223 { 4, 0xdc, "TxPkts256to511Ocets" },
224 { 4, 0xe0, "TxPkts512to1023Ocets" },
225 { 4, 0xe4, "TxPkts1024toMaxPktOcets" },
226 };
227
228 #define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx)
229
230 #define B53_MAX_MTU_25 (1536 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN)
231 #define B53_MAX_MTU (9720 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN)
232
b53_do_vlan_op(struct b53_device * dev,u8 op)233 static int b53_do_vlan_op(struct b53_device *dev, u8 op)
234 {
235 unsigned int i;
236
237 b53_write8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], VTA_START_CMD | op);
238
239 for (i = 0; i < 10; i++) {
240 u8 vta;
241
242 b53_read8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], &vta);
243 if (!(vta & VTA_START_CMD))
244 return 0;
245
246 usleep_range(100, 200);
247 }
248
249 return -EIO;
250 }
251
b53_set_vlan_entry(struct b53_device * dev,u16 vid,struct b53_vlan * vlan)252 static void b53_set_vlan_entry(struct b53_device *dev, u16 vid,
253 struct b53_vlan *vlan)
254 {
255 if (is5325(dev)) {
256 u32 entry = 0;
257
258 if (vlan->members) {
259 entry = ((vlan->untag & VA_UNTAG_MASK_25) <<
260 VA_UNTAG_S_25) | vlan->members;
261 if (dev->core_rev >= 3)
262 entry |= VA_VALID_25_R4 | vid << VA_VID_HIGH_S;
263 else
264 entry |= VA_VALID_25;
265 }
266
267 b53_write32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, entry);
268 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
269 VTA_RW_STATE_WR | VTA_RW_OP_EN);
270 } else if (is5365(dev)) {
271 u16 entry = 0;
272
273 if (vlan->members)
274 entry = ((vlan->untag & VA_UNTAG_MASK_65) <<
275 VA_UNTAG_S_65) | vlan->members | VA_VALID_65;
276
277 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, entry);
278 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
279 VTA_RW_STATE_WR | VTA_RW_OP_EN);
280 } else {
281 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
282 b53_write32(dev, B53_ARLIO_PAGE, dev->vta_regs[2],
283 (vlan->untag << VTE_UNTAG_S) | vlan->members);
284
285 b53_do_vlan_op(dev, VTA_CMD_WRITE);
286 }
287
288 dev_dbg(dev->ds->dev, "VID: %d, members: 0x%04x, untag: 0x%04x\n",
289 vid, vlan->members, vlan->untag);
290 }
291
b53_get_vlan_entry(struct b53_device * dev,u16 vid,struct b53_vlan * vlan)292 static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
293 struct b53_vlan *vlan)
294 {
295 if (is5325(dev)) {
296 u32 entry = 0;
297
298 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
299 VTA_RW_STATE_RD | VTA_RW_OP_EN);
300 b53_read32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, &entry);
301
302 if (dev->core_rev >= 3)
303 vlan->valid = !!(entry & VA_VALID_25_R4);
304 else
305 vlan->valid = !!(entry & VA_VALID_25);
306 vlan->members = entry & VA_MEMBER_MASK;
307 vlan->untag = (entry >> VA_UNTAG_S_25) & VA_UNTAG_MASK_25;
308
309 } else if (is5365(dev)) {
310 u16 entry = 0;
311
312 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
313 VTA_RW_STATE_WR | VTA_RW_OP_EN);
314 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, &entry);
315
316 vlan->valid = !!(entry & VA_VALID_65);
317 vlan->members = entry & VA_MEMBER_MASK;
318 vlan->untag = (entry >> VA_UNTAG_S_65) & VA_UNTAG_MASK_65;
319 } else {
320 u32 entry = 0;
321
322 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
323 b53_do_vlan_op(dev, VTA_CMD_READ);
324 b53_read32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], &entry);
325 vlan->members = entry & VTE_MEMBERS;
326 vlan->untag = (entry >> VTE_UNTAG_S) & VTE_MEMBERS;
327 vlan->valid = true;
328 }
329 }
330
b53_set_eap_mode(struct b53_device * dev,int port,int mode)331 static void b53_set_eap_mode(struct b53_device *dev, int port, int mode)
332 {
333 u64 eap_conf;
334
335 if (is5325(dev) || is5365(dev) || dev->chip_id == BCM5389_DEVICE_ID)
336 return;
337
338 b53_read64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), &eap_conf);
339
340 if (is63xx(dev)) {
341 eap_conf &= ~EAP_MODE_MASK_63XX;
342 eap_conf |= (u64)mode << EAP_MODE_SHIFT_63XX;
343 } else {
344 eap_conf &= ~EAP_MODE_MASK;
345 eap_conf |= (u64)mode << EAP_MODE_SHIFT;
346 }
347
348 b53_write64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), eap_conf);
349 }
350
b53_set_forwarding(struct b53_device * dev,int enable)351 static void b53_set_forwarding(struct b53_device *dev, int enable)
352 {
353 u8 mgmt;
354
355 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
356
357 if (enable)
358 mgmt |= SM_SW_FWD_EN;
359 else
360 mgmt &= ~SM_SW_FWD_EN;
361
362 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
363
364 if (!is5325(dev)) {
365 /* Include IMP port in dumb forwarding mode */
366 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
367 mgmt |= B53_MII_DUMB_FWDG_EN;
368 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
369
370 /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether
371 * frames should be flooded or not.
372 */
373 b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
374 mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IP_MC;
375 b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
376 } else {
377 b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
378 mgmt |= B53_IP_MC;
379 b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
380 }
381 }
382
b53_enable_vlan(struct b53_device * dev,int port,bool enable,bool enable_filtering)383 static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
384 bool enable_filtering)
385 {
386 u8 mgmt, vc0, vc1, vc4 = 0, vc5;
387
388 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
389 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, &vc0);
390 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, &vc1);
391
392 if (is5325(dev) || is5365(dev)) {
393 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
394 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, &vc5);
395 } else if (is63xx(dev)) {
396 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, &vc4);
397 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, &vc5);
398 } else {
399 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, &vc4);
400 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5);
401 }
402
403 vc1 &= ~VC1_RX_MCST_FWD_EN;
404
405 if (enable) {
406 vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
407 vc1 |= VC1_RX_MCST_UNTAG_EN;
408 vc4 &= ~VC4_ING_VID_CHECK_MASK;
409 if (enable_filtering) {
410 vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
411 vc5 |= VC5_DROP_VTABLE_MISS;
412 } else {
413 vc4 |= VC4_NO_ING_VID_CHK << VC4_ING_VID_CHECK_S;
414 vc5 &= ~VC5_DROP_VTABLE_MISS;
415 }
416
417 if (is5325(dev))
418 vc0 &= ~VC0_RESERVED_1;
419
420 if (is5325(dev) || is5365(dev))
421 vc1 |= VC1_RX_MCST_TAG_EN;
422
423 } else {
424 vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID);
425 vc1 &= ~VC1_RX_MCST_UNTAG_EN;
426 vc4 &= ~VC4_ING_VID_CHECK_MASK;
427 vc5 &= ~VC5_DROP_VTABLE_MISS;
428
429 if (is5325(dev) || is5365(dev))
430 vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
431 else
432 vc4 |= VC4_ING_VID_VIO_TO_IMP << VC4_ING_VID_CHECK_S;
433
434 if (is5325(dev) || is5365(dev))
435 vc1 &= ~VC1_RX_MCST_TAG_EN;
436 }
437
438 if (!is5325(dev) && !is5365(dev))
439 vc5 &= ~VC5_VID_FFF_EN;
440
441 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, vc0);
442 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, vc1);
443
444 if (is5325(dev) || is5365(dev)) {
445 /* enable the high 8 bit vid check on 5325 */
446 if (is5325(dev) && enable)
447 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3,
448 VC3_HIGH_8BIT_EN);
449 else
450 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
451
452 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, vc4);
453 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, vc5);
454 } else if (is63xx(dev)) {
455 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3_63XX, 0);
456 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, vc4);
457 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, vc5);
458 } else {
459 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
460 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, vc4);
461 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, vc5);
462 }
463
464 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
465
466 dev->vlan_enabled = enable;
467
468 dev_dbg(dev->dev, "Port %d VLAN enabled: %d, filtering: %d\n",
469 port, enable, enable_filtering);
470 }
471
b53_set_jumbo(struct b53_device * dev,bool enable,bool allow_10_100)472 static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100)
473 {
474 u32 port_mask = 0;
475 u16 max_size = JMS_MIN_SIZE;
476
477 if (is5325(dev) || is5365(dev))
478 return -EINVAL;
479
480 if (enable) {
481 port_mask = dev->enabled_ports;
482 max_size = JMS_MAX_SIZE;
483 if (allow_10_100)
484 port_mask |= JPM_10_100_JUMBO_EN;
485 }
486
487 b53_write32(dev, B53_JUMBO_PAGE, dev->jumbo_pm_reg, port_mask);
488 return b53_write16(dev, B53_JUMBO_PAGE, dev->jumbo_size_reg, max_size);
489 }
490
b53_flush_arl(struct b53_device * dev,u8 mask)491 static int b53_flush_arl(struct b53_device *dev, u8 mask)
492 {
493 unsigned int i;
494
495 if (is5325(dev))
496 return 0;
497
498 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
499 FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask);
500
501 for (i = 0; i < 10; i++) {
502 u8 fast_age_ctrl;
503
504 b53_read8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
505 &fast_age_ctrl);
506
507 if (!(fast_age_ctrl & FAST_AGE_DONE))
508 goto out;
509
510 msleep(1);
511 }
512
513 return -ETIMEDOUT;
514 out:
515 /* Only age dynamic entries (default behavior) */
516 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, FAST_AGE_DYNAMIC);
517 return 0;
518 }
519
b53_fast_age_port(struct b53_device * dev,int port)520 static int b53_fast_age_port(struct b53_device *dev, int port)
521 {
522 if (is5325(dev))
523 return 0;
524
525 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port);
526
527 return b53_flush_arl(dev, FAST_AGE_PORT);
528 }
529
b53_fast_age_vlan(struct b53_device * dev,u16 vid)530 static int b53_fast_age_vlan(struct b53_device *dev, u16 vid)
531 {
532 if (is5325(dev))
533 return 0;
534
535 b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid);
536
537 return b53_flush_arl(dev, FAST_AGE_VLAN);
538 }
539
b53_imp_vlan_setup(struct dsa_switch * ds,int cpu_port)540 void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
541 {
542 struct b53_device *dev = ds->priv;
543 unsigned int i;
544 u16 pvlan;
545
546 /* BCM5325 CPU port is at 8 */
547 if ((is5325(dev) || is5365(dev)) && cpu_port == B53_CPU_PORT_25)
548 cpu_port = B53_CPU_PORT;
549
550 /* Enable the IMP port to be in the same VLAN as the other ports
551 * on a per-port basis such that we only have Port i and IMP in
552 * the same VLAN.
553 */
554 b53_for_each_port(dev, i) {
555 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &pvlan);
556 pvlan |= BIT(cpu_port);
557 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan);
558 }
559 }
560 EXPORT_SYMBOL(b53_imp_vlan_setup);
561
b53_port_set_ucast_flood(struct b53_device * dev,int port,bool unicast)562 static void b53_port_set_ucast_flood(struct b53_device *dev, int port,
563 bool unicast)
564 {
565 u16 uc;
566
567 if (is5325(dev)) {
568 if (port == B53_CPU_PORT_25)
569 port = B53_CPU_PORT;
570
571 b53_read16(dev, B53_IEEE_PAGE, B53_IEEE_UCAST_DLF, &uc);
572 if (unicast)
573 uc |= BIT(port) | B53_IEEE_UCAST_DROP_EN;
574 else
575 uc &= ~BIT(port);
576 b53_write16(dev, B53_IEEE_PAGE, B53_IEEE_UCAST_DLF, uc);
577 } else {
578 b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
579 if (unicast)
580 uc |= BIT(port);
581 else
582 uc &= ~BIT(port);
583 b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
584 }
585 }
586
b53_port_set_mcast_flood(struct b53_device * dev,int port,bool multicast)587 static void b53_port_set_mcast_flood(struct b53_device *dev, int port,
588 bool multicast)
589 {
590 u16 mc;
591
592 if (is5325(dev)) {
593 if (port == B53_CPU_PORT_25)
594 port = B53_CPU_PORT;
595
596 b53_read16(dev, B53_IEEE_PAGE, B53_IEEE_MCAST_DLF, &mc);
597 if (multicast)
598 mc |= BIT(port) | B53_IEEE_MCAST_DROP_EN;
599 else
600 mc &= ~BIT(port);
601 b53_write16(dev, B53_IEEE_PAGE, B53_IEEE_MCAST_DLF, mc);
602 } else {
603 b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
604 if (multicast)
605 mc |= BIT(port);
606 else
607 mc &= ~BIT(port);
608 b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
609
610 b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
611 if (multicast)
612 mc |= BIT(port);
613 else
614 mc &= ~BIT(port);
615 b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
616 }
617 }
618
b53_port_set_learning(struct b53_device * dev,int port,bool learning)619 static void b53_port_set_learning(struct b53_device *dev, int port,
620 bool learning)
621 {
622 u16 reg;
623
624 if (is5325(dev))
625 return;
626
627 b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, ®);
628 if (learning)
629 reg &= ~BIT(port);
630 else
631 reg |= BIT(port);
632 b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg);
633 }
634
b53_port_set_isolated(struct b53_device * dev,int port,bool isolated)635 static void b53_port_set_isolated(struct b53_device *dev, int port,
636 bool isolated)
637 {
638 u8 offset;
639 u16 reg;
640
641 if (is5325(dev))
642 offset = B53_PROTECTED_PORT_SEL_25;
643 else
644 offset = B53_PROTECTED_PORT_SEL;
645
646 b53_read16(dev, B53_CTRL_PAGE, offset, ®);
647 if (isolated)
648 reg |= BIT(port);
649 else
650 reg &= ~BIT(port);
651 b53_write16(dev, B53_CTRL_PAGE, offset, reg);
652 }
653
b53_eee_enable_set(struct dsa_switch * ds,int port,bool enable)654 static void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
655 {
656 struct b53_device *dev = ds->priv;
657 u16 reg;
658
659 b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, ®);
660 if (enable)
661 reg |= BIT(port);
662 else
663 reg &= ~BIT(port);
664 b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg);
665 }
666
b53_setup_port(struct dsa_switch * ds,int port)667 int b53_setup_port(struct dsa_switch *ds, int port)
668 {
669 struct b53_device *dev = ds->priv;
670
671 b53_port_set_ucast_flood(dev, port, true);
672 b53_port_set_mcast_flood(dev, port, true);
673 b53_port_set_learning(dev, port, false);
674 b53_port_set_isolated(dev, port, false);
675
676 /* Force all traffic to go to the CPU port to prevent the ASIC from
677 * trying to forward to bridged ports on matching FDB entries, then
678 * dropping frames because it isn't allowed to forward there.
679 */
680 if (dsa_is_user_port(ds, port))
681 b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED);
682
683 if (is5325(dev) &&
684 in_range(port, 1, 4)) {
685 u8 reg;
686
687 b53_read8(dev, B53_CTRL_PAGE, B53_PD_MODE_CTRL_25, ®);
688 reg &= ~PD_MODE_POWER_DOWN_PORT(0);
689 if (dsa_is_unused_port(ds, port))
690 reg |= PD_MODE_POWER_DOWN_PORT(port);
691 else
692 reg &= ~PD_MODE_POWER_DOWN_PORT(port);
693 b53_write8(dev, B53_CTRL_PAGE, B53_PD_MODE_CTRL_25, reg);
694 }
695
696 return 0;
697 }
698 EXPORT_SYMBOL(b53_setup_port);
699
b53_enable_port(struct dsa_switch * ds,int port,struct phy_device * phy)700 int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
701 {
702 struct b53_device *dev = ds->priv;
703 unsigned int cpu_port;
704 int ret = 0;
705 u16 pvlan;
706
707 if (!dsa_is_user_port(ds, port))
708 return 0;
709
710 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
711
712 if (dev->ops->phy_enable)
713 dev->ops->phy_enable(dev, port);
714
715 if (dev->ops->irq_enable)
716 ret = dev->ops->irq_enable(dev, port);
717 if (ret)
718 return ret;
719
720 /* Clear the Rx and Tx disable bits and set to no spanning tree */
721 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0);
722
723 /* Set this port, and only this one to be in the default VLAN,
724 * if member of a bridge, restore its membership prior to
725 * bringing down this port.
726 */
727 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
728 pvlan &= ~0x1ff;
729 pvlan |= BIT(port);
730 pvlan |= dev->ports[port].vlan_ctl_mask;
731 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
732
733 b53_imp_vlan_setup(ds, cpu_port);
734
735 /* If EEE was enabled, restore it */
736 if (dev->ports[port].eee.eee_enabled)
737 b53_eee_enable_set(ds, port, true);
738
739 return 0;
740 }
741 EXPORT_SYMBOL(b53_enable_port);
742
b53_disable_port(struct dsa_switch * ds,int port)743 void b53_disable_port(struct dsa_switch *ds, int port)
744 {
745 struct b53_device *dev = ds->priv;
746 u8 reg;
747
748 /* Disable Tx/Rx for the port */
749 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®);
750 reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE;
751 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
752
753 if (dev->ops->phy_disable)
754 dev->ops->phy_disable(dev, port);
755
756 if (dev->ops->irq_disable)
757 dev->ops->irq_disable(dev, port);
758 }
759 EXPORT_SYMBOL(b53_disable_port);
760
b53_brcm_hdr_setup(struct dsa_switch * ds,int port)761 void b53_brcm_hdr_setup(struct dsa_switch *ds, int port)
762 {
763 struct b53_device *dev = ds->priv;
764 bool tag_en = !(dev->tag_protocol == DSA_TAG_PROTO_NONE);
765 u8 hdr_ctl, val;
766 u16 reg;
767
768 /* Resolve which bit controls the Broadcom tag */
769 switch (port) {
770 case 8:
771 val = BRCM_HDR_P8_EN;
772 break;
773 case 7:
774 val = BRCM_HDR_P7_EN;
775 break;
776 case 5:
777 val = BRCM_HDR_P5_EN;
778 break;
779 default:
780 val = 0;
781 break;
782 }
783
784 /* Enable management mode if tagging is requested */
785 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &hdr_ctl);
786 if (tag_en)
787 hdr_ctl |= SM_SW_FWD_MODE;
788 else
789 hdr_ctl &= ~SM_SW_FWD_MODE;
790 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, hdr_ctl);
791
792 /* Configure the appropriate IMP port */
793 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &hdr_ctl);
794 if (port == 8)
795 hdr_ctl |= GC_FRM_MGMT_PORT_MII;
796 else if (port == 5)
797 hdr_ctl |= GC_FRM_MGMT_PORT_M;
798 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, hdr_ctl);
799
800 /* B53_BRCM_HDR not present on devices with legacy tags */
801 if (dev->tag_protocol == DSA_TAG_PROTO_BRCM_LEGACY ||
802 dev->tag_protocol == DSA_TAG_PROTO_BRCM_LEGACY_FCS)
803 return;
804
805 /* Enable Broadcom tags for IMP port */
806 b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl);
807 if (tag_en)
808 hdr_ctl |= val;
809 else
810 hdr_ctl &= ~val;
811 b53_write8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, hdr_ctl);
812
813 /* Registers below are only accessible on newer devices */
814 if (!is58xx(dev))
815 return;
816
817 /* Enable reception Broadcom tag for CPU TX (switch RX) to
818 * allow us to tag outgoing frames
819 */
820 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, ®);
821 if (tag_en)
822 reg &= ~BIT(port);
823 else
824 reg |= BIT(port);
825 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, reg);
826
827 /* Enable transmission of Broadcom tags from the switch (CPU RX) to
828 * allow delivering frames to the per-port net_devices
829 */
830 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, ®);
831 if (tag_en)
832 reg &= ~BIT(port);
833 else
834 reg |= BIT(port);
835 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, reg);
836 }
837 EXPORT_SYMBOL(b53_brcm_hdr_setup);
838
b53_enable_cpu_port(struct b53_device * dev,int port)839 static void b53_enable_cpu_port(struct b53_device *dev, int port)
840 {
841 u8 port_ctrl;
842
843 /* BCM5325 CPU port is at 8 */
844 if ((is5325(dev) || is5365(dev)) && port == B53_CPU_PORT_25)
845 port = B53_CPU_PORT;
846
847 port_ctrl = PORT_CTRL_RX_BCST_EN |
848 PORT_CTRL_RX_MCST_EN |
849 PORT_CTRL_RX_UCST_EN;
850 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl);
851
852 b53_brcm_hdr_setup(dev->ds, port);
853 }
854
b53_enable_mib(struct b53_device * dev)855 static void b53_enable_mib(struct b53_device *dev)
856 {
857 u8 gc;
858
859 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
860 gc &= ~(GC_RESET_MIB | GC_MIB_AC_EN);
861 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
862 }
863
b53_enable_stp(struct b53_device * dev)864 static void b53_enable_stp(struct b53_device *dev)
865 {
866 u8 gc;
867
868 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
869 gc |= GC_RX_BPDU_EN;
870 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
871 }
872
b53_default_pvid(struct b53_device * dev)873 static u16 b53_default_pvid(struct b53_device *dev)
874 {
875 return 0;
876 }
877
b53_vlan_port_needs_forced_tagged(struct dsa_switch * ds,int port)878 static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port)
879 {
880 struct b53_device *dev = ds->priv;
881
882 return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port);
883 }
884
b53_vlan_port_may_join_untagged(struct dsa_switch * ds,int port)885 static bool b53_vlan_port_may_join_untagged(struct dsa_switch *ds, int port)
886 {
887 struct b53_device *dev = ds->priv;
888 struct dsa_port *dp;
889
890 if (!dev->vlan_filtering)
891 return true;
892
893 dp = dsa_to_port(ds, port);
894
895 if (dsa_port_is_cpu(dp))
896 return true;
897
898 return dp->bridge == NULL;
899 }
900
b53_configure_vlan(struct dsa_switch * ds)901 int b53_configure_vlan(struct dsa_switch *ds)
902 {
903 struct b53_device *dev = ds->priv;
904 struct b53_vlan vl = { 0 };
905 struct b53_vlan *v;
906 int i, def_vid;
907 u16 vid;
908
909 def_vid = b53_default_pvid(dev);
910
911 /* clear all vlan entries */
912 if (is5325(dev) || is5365(dev)) {
913 for (i = def_vid; i < dev->num_vlans; i++)
914 b53_set_vlan_entry(dev, i, &vl);
915 } else {
916 b53_do_vlan_op(dev, VTA_CMD_CLEAR);
917 }
918
919 b53_enable_vlan(dev, -1, dev->vlan_enabled, dev->vlan_filtering);
920
921 /* Create an untagged VLAN entry for the default PVID in case
922 * CONFIG_VLAN_8021Q is disabled and there are no calls to
923 * dsa_user_vlan_rx_add_vid() to create the default VLAN
924 * entry. Do this only when the tagging protocol is not
925 * DSA_TAG_PROTO_NONE
926 */
927 v = &dev->vlans[def_vid];
928 b53_for_each_port(dev, i) {
929 if (!b53_vlan_port_may_join_untagged(ds, i))
930 continue;
931
932 vl.members |= BIT(i);
933 if (!b53_vlan_port_needs_forced_tagged(ds, i))
934 vl.untag = vl.members;
935 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(i),
936 def_vid);
937 }
938 b53_set_vlan_entry(dev, def_vid, &vl);
939
940 if (dev->vlan_filtering) {
941 /* Upon initial call we have not set-up any VLANs, but upon
942 * system resume, we need to restore all VLAN entries.
943 */
944 for (vid = def_vid + 1; vid < dev->num_vlans; vid++) {
945 v = &dev->vlans[vid];
946
947 if (!v->members)
948 continue;
949
950 b53_set_vlan_entry(dev, vid, v);
951 b53_fast_age_vlan(dev, vid);
952 }
953
954 b53_for_each_port(dev, i) {
955 if (!dsa_is_cpu_port(ds, i))
956 b53_write16(dev, B53_VLAN_PAGE,
957 B53_VLAN_PORT_DEF_TAG(i),
958 dev->ports[i].pvid);
959 }
960 }
961
962 return 0;
963 }
964 EXPORT_SYMBOL(b53_configure_vlan);
965
b53_switch_reset_gpio(struct b53_device * dev)966 static void b53_switch_reset_gpio(struct b53_device *dev)
967 {
968 int gpio = dev->reset_gpio;
969
970 if (gpio < 0)
971 return;
972
973 /* Reset sequence: RESET low(50ms)->high(20ms)
974 */
975 gpio_set_value(gpio, 0);
976 mdelay(50);
977
978 gpio_set_value(gpio, 1);
979 mdelay(20);
980
981 dev->current_page = 0xff;
982 }
983
b53_switch_reset(struct b53_device * dev)984 static int b53_switch_reset(struct b53_device *dev)
985 {
986 unsigned int timeout = 1000;
987 u8 mgmt, reg;
988
989 b53_switch_reset_gpio(dev);
990
991 if (is539x(dev)) {
992 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83);
993 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00);
994 }
995
996 /* This is specific to 58xx devices here, do not use is58xx() which
997 * covers the larger Starfigther 2 family, including 7445/7278 which
998 * still use this driver as a library and need to perform the reset
999 * earlier.
1000 */
1001 if (dev->chip_id == BCM58XX_DEVICE_ID ||
1002 dev->chip_id == BCM583XX_DEVICE_ID) {
1003 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®);
1004 reg |= SW_RST | EN_SW_RST | EN_CH_RST;
1005 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg);
1006
1007 do {
1008 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®);
1009 if (!(reg & SW_RST))
1010 break;
1011
1012 usleep_range(1000, 2000);
1013 } while (timeout-- > 0);
1014
1015 if (timeout == 0) {
1016 dev_err(dev->dev,
1017 "Timeout waiting for SW_RST to clear!\n");
1018 return -ETIMEDOUT;
1019 }
1020 }
1021
1022 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
1023
1024 if (!(mgmt & SM_SW_FWD_EN)) {
1025 mgmt &= ~SM_SW_FWD_MODE;
1026 mgmt |= SM_SW_FWD_EN;
1027
1028 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
1029 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
1030
1031 if (!(mgmt & SM_SW_FWD_EN)) {
1032 dev_err(dev->dev, "Failed to enable switch!\n");
1033 return -EINVAL;
1034 }
1035 }
1036
1037 b53_enable_mib(dev);
1038 b53_enable_stp(dev);
1039
1040 return b53_flush_arl(dev, FAST_AGE_STATIC);
1041 }
1042
b53_phy_read16(struct dsa_switch * ds,int addr,int reg)1043 static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg)
1044 {
1045 struct b53_device *priv = ds->priv;
1046 u16 value = 0;
1047 int ret;
1048
1049 if (priv->ops->phy_read16)
1050 ret = priv->ops->phy_read16(priv, addr, reg, &value);
1051 else
1052 ret = b53_read16(priv, B53_PORT_MII_PAGE(addr),
1053 reg * 2, &value);
1054
1055 return ret ? ret : value;
1056 }
1057
b53_phy_write16(struct dsa_switch * ds,int addr,int reg,u16 val)1058 static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
1059 {
1060 struct b53_device *priv = ds->priv;
1061
1062 if (priv->ops->phy_write16)
1063 return priv->ops->phy_write16(priv, addr, reg, val);
1064
1065 return b53_write16(priv, B53_PORT_MII_PAGE(addr), reg * 2, val);
1066 }
1067
b53_reset_switch(struct b53_device * priv)1068 static int b53_reset_switch(struct b53_device *priv)
1069 {
1070 /* reset vlans */
1071 memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans);
1072 memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports);
1073
1074 priv->serdes_lane = B53_INVALID_LANE;
1075
1076 return b53_switch_reset(priv);
1077 }
1078
b53_apply_config(struct b53_device * priv)1079 static int b53_apply_config(struct b53_device *priv)
1080 {
1081 /* disable switching */
1082 b53_set_forwarding(priv, 0);
1083
1084 b53_configure_vlan(priv->ds);
1085
1086 /* enable switching */
1087 b53_set_forwarding(priv, 1);
1088
1089 return 0;
1090 }
1091
b53_reset_mib(struct b53_device * priv)1092 static void b53_reset_mib(struct b53_device *priv)
1093 {
1094 u8 gc;
1095
1096 b53_read8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
1097
1098 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc | GC_RESET_MIB);
1099 msleep(1);
1100 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc & ~GC_RESET_MIB);
1101 msleep(1);
1102 }
1103
b53_get_mib(struct b53_device * dev)1104 static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev)
1105 {
1106 if (is5365(dev))
1107 return b53_mibs_65;
1108 else if (is63xx(dev))
1109 return b53_mibs_63xx;
1110 else if (is58xx(dev))
1111 return b53_mibs_58xx;
1112 else
1113 return b53_mibs;
1114 }
1115
b53_get_mib_size(struct b53_device * dev)1116 static unsigned int b53_get_mib_size(struct b53_device *dev)
1117 {
1118 if (is5365(dev))
1119 return B53_MIBS_65_SIZE;
1120 else if (is63xx(dev))
1121 return B53_MIBS_63XX_SIZE;
1122 else if (is58xx(dev))
1123 return B53_MIBS_58XX_SIZE;
1124 else
1125 return B53_MIBS_SIZE;
1126 }
1127
b53_get_phy_device(struct dsa_switch * ds,int port)1128 static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port)
1129 {
1130 /* These ports typically do not have built-in PHYs */
1131 switch (port) {
1132 case B53_CPU_PORT_25:
1133 case 7:
1134 case B53_CPU_PORT:
1135 return NULL;
1136 }
1137
1138 return mdiobus_get_phy(ds->user_mii_bus, port);
1139 }
1140
b53_get_strings(struct dsa_switch * ds,int port,u32 stringset,uint8_t * data)1141 void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
1142 uint8_t *data)
1143 {
1144 struct b53_device *dev = ds->priv;
1145 const struct b53_mib_desc *mibs = b53_get_mib(dev);
1146 unsigned int mib_size = b53_get_mib_size(dev);
1147 struct phy_device *phydev;
1148 unsigned int i;
1149
1150 if (stringset == ETH_SS_STATS) {
1151 for (i = 0; i < mib_size; i++)
1152 ethtool_puts(&data, mibs[i].name);
1153 } else if (stringset == ETH_SS_PHY_STATS) {
1154 phydev = b53_get_phy_device(ds, port);
1155 if (!phydev)
1156 return;
1157
1158 phy_ethtool_get_strings(phydev, data);
1159 }
1160 }
1161 EXPORT_SYMBOL(b53_get_strings);
1162
b53_get_ethtool_stats(struct dsa_switch * ds,int port,uint64_t * data)1163 void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
1164 {
1165 struct b53_device *dev = ds->priv;
1166 const struct b53_mib_desc *mibs = b53_get_mib(dev);
1167 unsigned int mib_size = b53_get_mib_size(dev);
1168 const struct b53_mib_desc *s;
1169 unsigned int i;
1170 u64 val = 0;
1171
1172 if (is5365(dev) && port == 5)
1173 port = 8;
1174
1175 mutex_lock(&dev->stats_mutex);
1176
1177 for (i = 0; i < mib_size; i++) {
1178 s = &mibs[i];
1179
1180 if (s->size == 8) {
1181 b53_read64(dev, B53_MIB_PAGE(port), s->offset, &val);
1182 } else {
1183 u32 val32;
1184
1185 b53_read32(dev, B53_MIB_PAGE(port), s->offset,
1186 &val32);
1187 val = val32;
1188 }
1189 data[i] = (u64)val;
1190 }
1191
1192 mutex_unlock(&dev->stats_mutex);
1193 }
1194 EXPORT_SYMBOL(b53_get_ethtool_stats);
1195
b53_get_ethtool_phy_stats(struct dsa_switch * ds,int port,uint64_t * data)1196 void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data)
1197 {
1198 struct phy_device *phydev;
1199
1200 phydev = b53_get_phy_device(ds, port);
1201 if (!phydev)
1202 return;
1203
1204 phy_ethtool_get_stats(phydev, NULL, data);
1205 }
1206 EXPORT_SYMBOL(b53_get_ethtool_phy_stats);
1207
b53_get_sset_count(struct dsa_switch * ds,int port,int sset)1208 int b53_get_sset_count(struct dsa_switch *ds, int port, int sset)
1209 {
1210 struct b53_device *dev = ds->priv;
1211 struct phy_device *phydev;
1212
1213 if (sset == ETH_SS_STATS) {
1214 return b53_get_mib_size(dev);
1215 } else if (sset == ETH_SS_PHY_STATS) {
1216 phydev = b53_get_phy_device(ds, port);
1217 if (!phydev)
1218 return 0;
1219
1220 return phy_ethtool_get_sset_count(phydev);
1221 }
1222
1223 return 0;
1224 }
1225 EXPORT_SYMBOL(b53_get_sset_count);
1226
1227 enum b53_devlink_resource_id {
1228 B53_DEVLINK_PARAM_ID_VLAN_TABLE,
1229 };
1230
b53_devlink_vlan_table_get(void * priv)1231 static u64 b53_devlink_vlan_table_get(void *priv)
1232 {
1233 struct b53_device *dev = priv;
1234 struct b53_vlan *vl;
1235 unsigned int i;
1236 u64 count = 0;
1237
1238 for (i = 0; i < dev->num_vlans; i++) {
1239 vl = &dev->vlans[i];
1240 if (vl->members)
1241 count++;
1242 }
1243
1244 return count;
1245 }
1246
b53_setup_devlink_resources(struct dsa_switch * ds)1247 int b53_setup_devlink_resources(struct dsa_switch *ds)
1248 {
1249 struct devlink_resource_size_params size_params;
1250 struct b53_device *dev = ds->priv;
1251 int err;
1252
1253 devlink_resource_size_params_init(&size_params, dev->num_vlans,
1254 dev->num_vlans,
1255 1, DEVLINK_RESOURCE_UNIT_ENTRY);
1256
1257 err = dsa_devlink_resource_register(ds, "VLAN", dev->num_vlans,
1258 B53_DEVLINK_PARAM_ID_VLAN_TABLE,
1259 DEVLINK_RESOURCE_ID_PARENT_TOP,
1260 &size_params);
1261 if (err)
1262 goto out;
1263
1264 dsa_devlink_resource_occ_get_register(ds,
1265 B53_DEVLINK_PARAM_ID_VLAN_TABLE,
1266 b53_devlink_vlan_table_get, dev);
1267
1268 return 0;
1269 out:
1270 dsa_devlink_resources_unregister(ds);
1271 return err;
1272 }
1273 EXPORT_SYMBOL(b53_setup_devlink_resources);
1274
b53_setup(struct dsa_switch * ds)1275 static int b53_setup(struct dsa_switch *ds)
1276 {
1277 struct b53_device *dev = ds->priv;
1278 struct b53_vlan *vl;
1279 unsigned int port;
1280 u16 pvid;
1281 int ret;
1282
1283 /* Request bridge PVID untagged when DSA_TAG_PROTO_NONE is set
1284 * which forces the CPU port to be tagged in all VLANs.
1285 */
1286 ds->untag_bridge_pvid = dev->tag_protocol == DSA_TAG_PROTO_NONE;
1287
1288 /* The switch does not tell us the original VLAN for untagged
1289 * packets, so keep the CPU port always tagged.
1290 */
1291 ds->untag_vlan_aware_bridge_pvid = true;
1292
1293 if (dev->chip_id == BCM53101_DEVICE_ID) {
1294 /* BCM53101 uses 0.5 second increments */
1295 ds->ageing_time_min = 1 * 500;
1296 ds->ageing_time_max = AGE_TIME_MAX * 500;
1297 } else {
1298 /* Everything else uses 1 second increments */
1299 ds->ageing_time_min = 1 * 1000;
1300 ds->ageing_time_max = AGE_TIME_MAX * 1000;
1301 }
1302
1303 ret = b53_reset_switch(dev);
1304 if (ret) {
1305 dev_err(ds->dev, "failed to reset switch\n");
1306 return ret;
1307 }
1308
1309 /* setup default vlan for filtering mode */
1310 pvid = b53_default_pvid(dev);
1311 vl = &dev->vlans[pvid];
1312 b53_for_each_port(dev, port) {
1313 vl->members |= BIT(port);
1314 if (!b53_vlan_port_needs_forced_tagged(ds, port))
1315 vl->untag |= BIT(port);
1316 }
1317
1318 b53_reset_mib(dev);
1319
1320 ret = b53_apply_config(dev);
1321 if (ret) {
1322 dev_err(ds->dev, "failed to apply configuration\n");
1323 return ret;
1324 }
1325
1326 /* Configure IMP/CPU port, disable all other ports. Enabled
1327 * ports will be configured with .port_enable
1328 */
1329 for (port = 0; port < dev->num_ports; port++) {
1330 if (dsa_is_cpu_port(ds, port))
1331 b53_enable_cpu_port(dev, port);
1332 else
1333 b53_disable_port(ds, port);
1334 }
1335
1336 return b53_setup_devlink_resources(ds);
1337 }
1338
b53_teardown(struct dsa_switch * ds)1339 static void b53_teardown(struct dsa_switch *ds)
1340 {
1341 dsa_devlink_resources_unregister(ds);
1342 }
1343
b53_force_link(struct b53_device * dev,int port,int link)1344 static void b53_force_link(struct b53_device *dev, int port, int link)
1345 {
1346 u8 reg, val, off;
1347
1348 /* Override the port settings */
1349 if (port == dev->imp_port) {
1350 off = B53_PORT_OVERRIDE_CTRL;
1351 val = PORT_OVERRIDE_EN;
1352 } else if (is5325(dev)) {
1353 return;
1354 } else {
1355 off = B53_GMII_PORT_OVERRIDE_CTRL(port);
1356 val = GMII_PO_EN;
1357 }
1358
1359 b53_read8(dev, B53_CTRL_PAGE, off, ®);
1360 reg |= val;
1361 if (link)
1362 reg |= PORT_OVERRIDE_LINK;
1363 else
1364 reg &= ~PORT_OVERRIDE_LINK;
1365 b53_write8(dev, B53_CTRL_PAGE, off, reg);
1366 }
1367
b53_force_port_config(struct b53_device * dev,int port,int speed,int duplex,bool tx_pause,bool rx_pause)1368 static void b53_force_port_config(struct b53_device *dev, int port,
1369 int speed, int duplex,
1370 bool tx_pause, bool rx_pause)
1371 {
1372 u8 reg, val, off;
1373
1374 /* Override the port settings */
1375 if (port == dev->imp_port) {
1376 off = B53_PORT_OVERRIDE_CTRL;
1377 val = PORT_OVERRIDE_EN;
1378 } else if (is5325(dev)) {
1379 return;
1380 } else {
1381 off = B53_GMII_PORT_OVERRIDE_CTRL(port);
1382 val = GMII_PO_EN;
1383 }
1384
1385 b53_read8(dev, B53_CTRL_PAGE, off, ®);
1386 reg |= val;
1387 if (duplex == DUPLEX_FULL)
1388 reg |= PORT_OVERRIDE_FULL_DUPLEX;
1389 else
1390 reg &= ~PORT_OVERRIDE_FULL_DUPLEX;
1391
1392 reg &= ~(0x3 << GMII_PO_SPEED_S);
1393 if (is5301x(dev) || is58xx(dev))
1394 reg &= ~PORT_OVERRIDE_SPEED_2000M;
1395
1396 switch (speed) {
1397 case 2000:
1398 reg |= PORT_OVERRIDE_SPEED_2000M;
1399 fallthrough;
1400 case SPEED_1000:
1401 reg |= PORT_OVERRIDE_SPEED_1000M;
1402 break;
1403 case SPEED_100:
1404 reg |= PORT_OVERRIDE_SPEED_100M;
1405 break;
1406 case SPEED_10:
1407 reg |= PORT_OVERRIDE_SPEED_10M;
1408 break;
1409 default:
1410 dev_err(dev->dev, "unknown speed: %d\n", speed);
1411 return;
1412 }
1413
1414 if (is5325(dev))
1415 reg &= ~PORT_OVERRIDE_LP_FLOW_25;
1416 else
1417 reg &= ~(PORT_OVERRIDE_RX_FLOW | PORT_OVERRIDE_TX_FLOW);
1418
1419 if (rx_pause) {
1420 if (is5325(dev))
1421 reg |= PORT_OVERRIDE_LP_FLOW_25;
1422 else
1423 reg |= PORT_OVERRIDE_RX_FLOW;
1424 }
1425
1426 if (tx_pause) {
1427 if (is5325(dev))
1428 reg |= PORT_OVERRIDE_LP_FLOW_25;
1429 else
1430 reg |= PORT_OVERRIDE_TX_FLOW;
1431 }
1432
1433 b53_write8(dev, B53_CTRL_PAGE, off, reg);
1434 }
1435
b53_adjust_63xx_rgmii(struct dsa_switch * ds,int port,phy_interface_t interface)1436 static void b53_adjust_63xx_rgmii(struct dsa_switch *ds, int port,
1437 phy_interface_t interface)
1438 {
1439 struct b53_device *dev = ds->priv;
1440 u8 rgmii_ctrl = 0;
1441
1442 b53_read8(dev, B53_CTRL_PAGE, B53_RGMII_CTRL_P(port), &rgmii_ctrl);
1443 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
1444
1445 if (is6318_268(dev))
1446 rgmii_ctrl |= RGMII_CTRL_MII_OVERRIDE;
1447
1448 rgmii_ctrl |= RGMII_CTRL_ENABLE_GMII;
1449
1450 b53_write8(dev, B53_CTRL_PAGE, B53_RGMII_CTRL_P(port), rgmii_ctrl);
1451
1452 dev_dbg(ds->dev, "Configured port %d for %s\n", port,
1453 phy_modes(interface));
1454 }
1455
b53_adjust_531x5_rgmii(struct dsa_switch * ds,int port,phy_interface_t interface)1456 static void b53_adjust_531x5_rgmii(struct dsa_switch *ds, int port,
1457 phy_interface_t interface)
1458 {
1459 struct b53_device *dev = ds->priv;
1460 u8 rgmii_ctrl = 0, off;
1461
1462 if (port == dev->imp_port)
1463 off = B53_RGMII_CTRL_IMP;
1464 else
1465 off = B53_RGMII_CTRL_P(port);
1466
1467 /* Configure the port RGMII clock delay by DLL disabled and
1468 * tx_clk aligned timing (restoring to reset defaults)
1469 */
1470 b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
1471 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
1472
1473 /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make
1474 * sure that we enable the port TX clock internal delay to
1475 * account for this internal delay that is inserted, otherwise
1476 * the switch won't be able to receive correctly.
1477 *
1478 * PHY_INTERFACE_MODE_RGMII means that we are not introducing
1479 * any delay neither on transmission nor reception, so the
1480 * BCM53125 must also be configured accordingly to account for
1481 * the lack of delay and introduce
1482 *
1483 * The BCM53125 switch has its RX clock and TX clock control
1484 * swapped, hence the reason why we modify the TX clock path in
1485 * the "RGMII" case
1486 */
1487 if (interface == PHY_INTERFACE_MODE_RGMII_TXID)
1488 rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
1489 if (interface == PHY_INTERFACE_MODE_RGMII)
1490 rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC;
1491
1492 if (dev->chip_id != BCM53115_DEVICE_ID)
1493 rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
1494
1495 b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
1496
1497 dev_info(ds->dev, "Configured port %d for %s\n", port,
1498 phy_modes(interface));
1499 }
1500
b53_adjust_5325_mii(struct dsa_switch * ds,int port)1501 static void b53_adjust_5325_mii(struct dsa_switch *ds, int port)
1502 {
1503 struct b53_device *dev = ds->priv;
1504 u8 reg = 0;
1505
1506 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
1507 ®);
1508
1509 /* reverse mii needs to be enabled */
1510 if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
1511 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
1512 reg | PORT_OVERRIDE_RV_MII_25);
1513 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
1514 ®);
1515
1516 if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
1517 dev_err(ds->dev,
1518 "Failed to enable reverse MII mode\n");
1519 return;
1520 }
1521 }
1522 }
1523
b53_port_event(struct dsa_switch * ds,int port)1524 void b53_port_event(struct dsa_switch *ds, int port)
1525 {
1526 struct b53_device *dev = ds->priv;
1527 bool link;
1528 u16 sts;
1529
1530 b53_read16(dev, B53_STAT_PAGE, B53_LINK_STAT, &sts);
1531 link = !!(sts & BIT(port));
1532 dsa_port_phylink_mac_change(ds, port, link);
1533 }
1534 EXPORT_SYMBOL(b53_port_event);
1535
b53_phylink_get_caps(struct dsa_switch * ds,int port,struct phylink_config * config)1536 static void b53_phylink_get_caps(struct dsa_switch *ds, int port,
1537 struct phylink_config *config)
1538 {
1539 struct b53_device *dev = ds->priv;
1540
1541 /* Internal ports need GMII for PHYLIB */
1542 __set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces);
1543
1544 /* These switches appear to support MII and RevMII too, but beyond
1545 * this, the code gives very few clues. FIXME: We probably need more
1546 * interface modes here.
1547 *
1548 * According to b53_srab_mux_init(), ports 3..5 can support:
1549 * SGMII, MII, GMII, RGMII or INTERNAL depending on the MUX setting.
1550 * However, the interface mode read from the MUX configuration is
1551 * not passed back to DSA, so phylink uses NA.
1552 * DT can specify RGMII for ports 0, 1.
1553 * For MDIO, port 8 can be RGMII_TXID.
1554 */
1555 __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
1556 __set_bit(PHY_INTERFACE_MODE_REVMII, config->supported_interfaces);
1557
1558 /* BCM63xx RGMII ports support RGMII */
1559 if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4))
1560 phy_interface_set_rgmii(config->supported_interfaces);
1561
1562 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1563 MAC_10 | MAC_100;
1564
1565 /* 5325/5365 are not capable of gigabit speeds, everything else is.
1566 * Note: the original code also exclulded Gigagbit for MII, RevMII
1567 * and 802.3z modes. MII and RevMII are not able to work above 100M,
1568 * so will be excluded by the generic validator implementation.
1569 * However, the exclusion of Gigabit for 802.3z just seems wrong.
1570 */
1571 if (!(is5325(dev) || is5365(dev)))
1572 config->mac_capabilities |= MAC_1000;
1573
1574 /* Get the implementation specific capabilities */
1575 if (dev->ops->phylink_get_caps)
1576 dev->ops->phylink_get_caps(dev, port, config);
1577 }
1578
b53_phylink_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)1579 static struct phylink_pcs *b53_phylink_mac_select_pcs(struct phylink_config *config,
1580 phy_interface_t interface)
1581 {
1582 struct dsa_port *dp = dsa_phylink_to_port(config);
1583 struct b53_device *dev = dp->ds->priv;
1584
1585 if (!dev->ops->phylink_mac_select_pcs)
1586 return NULL;
1587
1588 return dev->ops->phylink_mac_select_pcs(dev, dp->index, interface);
1589 }
1590
b53_phylink_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)1591 static void b53_phylink_mac_config(struct phylink_config *config,
1592 unsigned int mode,
1593 const struct phylink_link_state *state)
1594 {
1595 struct dsa_port *dp = dsa_phylink_to_port(config);
1596 phy_interface_t interface = state->interface;
1597 struct dsa_switch *ds = dp->ds;
1598 struct b53_device *dev = ds->priv;
1599 int port = dp->index;
1600
1601 if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4))
1602 b53_adjust_63xx_rgmii(ds, port, interface);
1603
1604 if (mode == MLO_AN_FIXED) {
1605 if (is531x5(dev) && phy_interface_mode_is_rgmii(interface))
1606 b53_adjust_531x5_rgmii(ds, port, interface);
1607
1608 /* configure MII port if necessary */
1609 if (is5325(dev))
1610 b53_adjust_5325_mii(ds, port);
1611 }
1612 }
1613
b53_phylink_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)1614 static void b53_phylink_mac_link_down(struct phylink_config *config,
1615 unsigned int mode,
1616 phy_interface_t interface)
1617 {
1618 struct dsa_port *dp = dsa_phylink_to_port(config);
1619 struct b53_device *dev = dp->ds->priv;
1620 int port = dp->index;
1621
1622 if (mode == MLO_AN_PHY) {
1623 if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4))
1624 b53_force_link(dev, port, false);
1625 return;
1626 }
1627
1628 if (mode == MLO_AN_FIXED) {
1629 b53_force_link(dev, port, false);
1630 return;
1631 }
1632
1633 if (phy_interface_mode_is_8023z(interface) &&
1634 dev->ops->serdes_link_set)
1635 dev->ops->serdes_link_set(dev, port, mode, interface, false);
1636 }
1637
b53_phylink_mac_link_up(struct phylink_config * config,struct phy_device * phydev,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)1638 static void b53_phylink_mac_link_up(struct phylink_config *config,
1639 struct phy_device *phydev,
1640 unsigned int mode,
1641 phy_interface_t interface,
1642 int speed, int duplex,
1643 bool tx_pause, bool rx_pause)
1644 {
1645 struct dsa_port *dp = dsa_phylink_to_port(config);
1646 struct dsa_switch *ds = dp->ds;
1647 struct b53_device *dev = ds->priv;
1648 struct ethtool_keee *p = &dev->ports[dp->index].eee;
1649 int port = dp->index;
1650
1651 if (mode == MLO_AN_PHY) {
1652 /* Re-negotiate EEE if it was enabled already */
1653 p->eee_enabled = b53_eee_init(ds, port, phydev);
1654
1655 if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4)) {
1656 b53_force_port_config(dev, port, speed, duplex,
1657 tx_pause, rx_pause);
1658 b53_force_link(dev, port, true);
1659 }
1660
1661 return;
1662 }
1663
1664 if (mode == MLO_AN_FIXED) {
1665 /* Force flow control on BCM5301x's CPU port */
1666 if (is5301x(dev) && dsa_is_cpu_port(ds, port))
1667 tx_pause = rx_pause = true;
1668
1669 b53_force_port_config(dev, port, speed, duplex,
1670 tx_pause, rx_pause);
1671 b53_force_link(dev, port, true);
1672 return;
1673 }
1674
1675 if (phy_interface_mode_is_8023z(interface) &&
1676 dev->ops->serdes_link_set)
1677 dev->ops->serdes_link_set(dev, port, mode, interface, true);
1678 }
1679
b53_vlan_filtering(struct dsa_switch * ds,int port,bool vlan_filtering,struct netlink_ext_ack * extack)1680 int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
1681 struct netlink_ext_ack *extack)
1682 {
1683 struct b53_device *dev = ds->priv;
1684
1685 if (dev->vlan_filtering != vlan_filtering) {
1686 dev->vlan_filtering = vlan_filtering;
1687 b53_apply_config(dev);
1688 }
1689
1690 return 0;
1691 }
1692 EXPORT_SYMBOL(b53_vlan_filtering);
1693
b53_vlan_prepare(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan)1694 static int b53_vlan_prepare(struct dsa_switch *ds, int port,
1695 const struct switchdev_obj_port_vlan *vlan)
1696 {
1697 struct b53_device *dev = ds->priv;
1698
1699 /* Port 7 on 7278 connects to the ASP's UniMAC which is not capable of
1700 * receiving VLAN tagged frames at all, we can still allow the port to
1701 * be configured for egress untagged.
1702 */
1703 if (dev->chip_id == BCM7278_DEVICE_ID && port == 7 &&
1704 !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED))
1705 return -EINVAL;
1706
1707 if (vlan->vid >= dev->num_vlans)
1708 return -ERANGE;
1709
1710 b53_enable_vlan(dev, port, true, dev->vlan_filtering);
1711
1712 return 0;
1713 }
1714
b53_vlan_add(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan,struct netlink_ext_ack * extack)1715 int b53_vlan_add(struct dsa_switch *ds, int port,
1716 const struct switchdev_obj_port_vlan *vlan,
1717 struct netlink_ext_ack *extack)
1718 {
1719 struct b53_device *dev = ds->priv;
1720 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1721 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1722 struct b53_vlan *vl;
1723 u16 old_pvid, new_pvid;
1724 int err;
1725
1726 err = b53_vlan_prepare(ds, port, vlan);
1727 if (err)
1728 return err;
1729
1730 if (vlan->vid == 0)
1731 return 0;
1732
1733 old_pvid = dev->ports[port].pvid;
1734 if (pvid)
1735 new_pvid = vlan->vid;
1736 else if (!pvid && vlan->vid == old_pvid)
1737 new_pvid = b53_default_pvid(dev);
1738 else
1739 new_pvid = old_pvid;
1740 dev->ports[port].pvid = new_pvid;
1741
1742 vl = &dev->vlans[vlan->vid];
1743
1744 if (dsa_is_cpu_port(ds, port))
1745 untagged = false;
1746
1747 vl->members |= BIT(port);
1748 if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
1749 vl->untag |= BIT(port);
1750 else
1751 vl->untag &= ~BIT(port);
1752
1753 if (!dev->vlan_filtering)
1754 return 0;
1755
1756 b53_set_vlan_entry(dev, vlan->vid, vl);
1757 b53_fast_age_vlan(dev, vlan->vid);
1758
1759 if (!dsa_is_cpu_port(ds, port) && new_pvid != old_pvid) {
1760 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
1761 new_pvid);
1762 b53_fast_age_vlan(dev, old_pvid);
1763 }
1764
1765 return 0;
1766 }
1767 EXPORT_SYMBOL(b53_vlan_add);
1768
b53_vlan_del(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan)1769 int b53_vlan_del(struct dsa_switch *ds, int port,
1770 const struct switchdev_obj_port_vlan *vlan)
1771 {
1772 struct b53_device *dev = ds->priv;
1773 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1774 struct b53_vlan *vl;
1775 u16 pvid;
1776
1777 if (vlan->vid == 0)
1778 return 0;
1779
1780 pvid = dev->ports[port].pvid;
1781
1782 vl = &dev->vlans[vlan->vid];
1783
1784 vl->members &= ~BIT(port);
1785
1786 if (pvid == vlan->vid)
1787 pvid = b53_default_pvid(dev);
1788 dev->ports[port].pvid = pvid;
1789
1790 if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
1791 vl->untag &= ~(BIT(port));
1792
1793 if (!dev->vlan_filtering)
1794 return 0;
1795
1796 b53_set_vlan_entry(dev, vlan->vid, vl);
1797 b53_fast_age_vlan(dev, vlan->vid);
1798
1799 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid);
1800 b53_fast_age_vlan(dev, pvid);
1801
1802 return 0;
1803 }
1804 EXPORT_SYMBOL(b53_vlan_del);
1805
1806 /* Address Resolution Logic routines. Caller must hold &dev->arl_mutex. */
b53_arl_op_wait(struct b53_device * dev)1807 static int b53_arl_op_wait(struct b53_device *dev)
1808 {
1809 unsigned int timeout = 10;
1810 u8 reg;
1811
1812 do {
1813 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®);
1814 if (!(reg & ARLTBL_START_DONE))
1815 return 0;
1816
1817 usleep_range(1000, 2000);
1818 } while (timeout--);
1819
1820 dev_warn(dev->dev, "timeout waiting for ARL to finish: 0x%02x\n", reg);
1821
1822 return -ETIMEDOUT;
1823 }
1824
b53_arl_rw_op(struct b53_device * dev,unsigned int op)1825 static int b53_arl_rw_op(struct b53_device *dev, unsigned int op)
1826 {
1827 u8 reg;
1828
1829 if (op > ARLTBL_RW)
1830 return -EINVAL;
1831
1832 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®);
1833 reg |= ARLTBL_START_DONE;
1834 if (op)
1835 reg |= ARLTBL_RW;
1836 else
1837 reg &= ~ARLTBL_RW;
1838 if (dev->vlan_enabled)
1839 reg &= ~ARLTBL_IVL_SVL_SELECT;
1840 else
1841 reg |= ARLTBL_IVL_SVL_SELECT;
1842 b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg);
1843
1844 return b53_arl_op_wait(dev);
1845 }
1846
b53_arl_read_entry_25(struct b53_device * dev,struct b53_arl_entry * ent,u8 idx)1847 static void b53_arl_read_entry_25(struct b53_device *dev,
1848 struct b53_arl_entry *ent, u8 idx)
1849 {
1850 u8 vid_entry;
1851 u64 mac_vid;
1852
1853 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_VID_ENTRY_25(idx),
1854 &vid_entry);
1855 b53_read64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx),
1856 &mac_vid);
1857 b53_arl_to_entry_25(ent, mac_vid, vid_entry);
1858 }
1859
b53_arl_write_entry_25(struct b53_device * dev,const struct b53_arl_entry * ent,u8 idx)1860 static void b53_arl_write_entry_25(struct b53_device *dev,
1861 const struct b53_arl_entry *ent, u8 idx)
1862 {
1863 u8 vid_entry;
1864 u64 mac_vid;
1865
1866 b53_arl_from_entry_25(&mac_vid, &vid_entry, ent);
1867 b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_VID_ENTRY_25(idx), vid_entry);
1868 b53_write64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx),
1869 mac_vid);
1870 }
1871
b53_arl_read_entry_89(struct b53_device * dev,struct b53_arl_entry * ent,u8 idx)1872 static void b53_arl_read_entry_89(struct b53_device *dev,
1873 struct b53_arl_entry *ent, u8 idx)
1874 {
1875 u64 mac_vid;
1876 u16 fwd_entry;
1877
1878 b53_read64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx),
1879 &mac_vid);
1880 b53_read16(dev, B53_ARLIO_PAGE, B53_ARLTBL_DATA_ENTRY(idx), &fwd_entry);
1881 b53_arl_to_entry_89(ent, mac_vid, fwd_entry);
1882 }
1883
b53_arl_write_entry_89(struct b53_device * dev,const struct b53_arl_entry * ent,u8 idx)1884 static void b53_arl_write_entry_89(struct b53_device *dev,
1885 const struct b53_arl_entry *ent, u8 idx)
1886 {
1887 u32 fwd_entry;
1888 u64 mac_vid;
1889
1890 b53_arl_from_entry_89(&mac_vid, &fwd_entry, ent);
1891 b53_write64(dev, B53_ARLIO_PAGE,
1892 B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid);
1893 b53_write16(dev, B53_ARLIO_PAGE,
1894 B53_ARLTBL_DATA_ENTRY(idx), fwd_entry);
1895 }
1896
b53_arl_read_entry_95(struct b53_device * dev,struct b53_arl_entry * ent,u8 idx)1897 static void b53_arl_read_entry_95(struct b53_device *dev,
1898 struct b53_arl_entry *ent, u8 idx)
1899 {
1900 u32 fwd_entry;
1901 u64 mac_vid;
1902
1903 b53_read64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx),
1904 &mac_vid);
1905 b53_read32(dev, B53_ARLIO_PAGE, B53_ARLTBL_DATA_ENTRY(idx), &fwd_entry);
1906 b53_arl_to_entry(ent, mac_vid, fwd_entry);
1907 }
1908
b53_arl_write_entry_95(struct b53_device * dev,const struct b53_arl_entry * ent,u8 idx)1909 static void b53_arl_write_entry_95(struct b53_device *dev,
1910 const struct b53_arl_entry *ent, u8 idx)
1911 {
1912 u32 fwd_entry;
1913 u64 mac_vid;
1914
1915 b53_arl_from_entry(&mac_vid, &fwd_entry, ent);
1916 b53_write64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx),
1917 mac_vid);
1918 b53_write32(dev, B53_ARLIO_PAGE, B53_ARLTBL_DATA_ENTRY(idx),
1919 fwd_entry);
1920 }
1921
b53_arl_read(struct b53_device * dev,const u8 * mac,u16 vid,struct b53_arl_entry * ent,u8 * idx)1922 static int b53_arl_read(struct b53_device *dev, const u8 *mac,
1923 u16 vid, struct b53_arl_entry *ent, u8 *idx)
1924 {
1925 DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES);
1926 unsigned int i;
1927 int ret;
1928
1929 ret = b53_arl_op_wait(dev);
1930 if (ret)
1931 return ret;
1932
1933 bitmap_zero(free_bins, dev->num_arl_bins);
1934
1935 /* Read the bins */
1936 for (i = 0; i < dev->num_arl_bins; i++) {
1937 b53_arl_read_entry(dev, ent, i);
1938
1939 if (!ent->is_valid) {
1940 set_bit(i, free_bins);
1941 continue;
1942 }
1943 if (!ether_addr_equal(ent->mac, mac))
1944 continue;
1945 if (dev->vlan_enabled && ent->vid != vid)
1946 continue;
1947 *idx = i;
1948 return 0;
1949 }
1950
1951 *idx = find_first_bit(free_bins, dev->num_arl_bins);
1952 return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT;
1953 }
1954
b53_arl_op(struct b53_device * dev,int op,int port,const unsigned char * addr,u16 vid,bool is_valid)1955 static int b53_arl_op(struct b53_device *dev, int op, int port,
1956 const unsigned char *addr, u16 vid, bool is_valid)
1957 {
1958 struct b53_arl_entry ent;
1959 u8 idx = 0;
1960 u64 mac;
1961 int ret;
1962
1963 /* Convert the array into a 64-bit MAC */
1964 mac = ether_addr_to_u64(addr);
1965
1966 /* Perform a read for the given MAC and VID */
1967 b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac);
1968 if (!is5325m(dev)) {
1969 if (is5325(dev) || is5365(dev))
1970 b53_write8(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
1971 else
1972 b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
1973 }
1974
1975 /* Issue a read operation for this MAC */
1976 ret = b53_arl_rw_op(dev, 1);
1977 if (ret)
1978 return ret;
1979
1980 ret = b53_arl_read(dev, addr, vid, &ent, &idx);
1981
1982 /* If this is a read, just finish now */
1983 if (op)
1984 return ret;
1985
1986 switch (ret) {
1987 case -ETIMEDOUT:
1988 return ret;
1989 case -ENOSPC:
1990 dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n",
1991 addr, vid);
1992 return is_valid ? ret : 0;
1993 case -ENOENT:
1994 /* We could not find a matching MAC, so reset to a new entry */
1995 dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n",
1996 addr, vid, idx);
1997 break;
1998 default:
1999 dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n",
2000 addr, vid, idx);
2001 break;
2002 }
2003
2004 /* For multicast address, the port is a bitmask and the validity
2005 * is determined by having at least one port being still active
2006 */
2007 if (!is_multicast_ether_addr(addr)) {
2008 ent.port = port;
2009 ent.is_valid = is_valid;
2010 } else {
2011 if (is_valid)
2012 ent.port |= BIT(port);
2013 else
2014 ent.port &= ~BIT(port);
2015
2016 ent.is_valid = !!(ent.port);
2017 }
2018
2019 ent.vid = vid;
2020 ent.is_static = true;
2021 ent.is_age = false;
2022 memcpy(ent.mac, addr, ETH_ALEN);
2023 b53_arl_write_entry(dev, &ent, idx);
2024
2025 return b53_arl_rw_op(dev, 0);
2026 }
2027
b53_fdb_add(struct dsa_switch * ds,int port,const unsigned char * addr,u16 vid,struct dsa_db db)2028 int b53_fdb_add(struct dsa_switch *ds, int port,
2029 const unsigned char *addr, u16 vid,
2030 struct dsa_db db)
2031 {
2032 struct b53_device *priv = ds->priv;
2033 int ret;
2034
2035 mutex_lock(&priv->arl_mutex);
2036 ret = b53_arl_op(priv, 0, port, addr, vid, true);
2037 mutex_unlock(&priv->arl_mutex);
2038
2039 return ret;
2040 }
2041 EXPORT_SYMBOL(b53_fdb_add);
2042
b53_fdb_del(struct dsa_switch * ds,int port,const unsigned char * addr,u16 vid,struct dsa_db db)2043 int b53_fdb_del(struct dsa_switch *ds, int port,
2044 const unsigned char *addr, u16 vid,
2045 struct dsa_db db)
2046 {
2047 struct b53_device *priv = ds->priv;
2048 int ret;
2049
2050 mutex_lock(&priv->arl_mutex);
2051 ret = b53_arl_op(priv, 0, port, addr, vid, false);
2052 mutex_unlock(&priv->arl_mutex);
2053
2054 return ret;
2055 }
2056 EXPORT_SYMBOL(b53_fdb_del);
2057
b53_read_arl_srch_ctl(struct b53_device * dev,u8 * val)2058 static void b53_read_arl_srch_ctl(struct b53_device *dev, u8 *val)
2059 {
2060 u8 offset;
2061
2062 if (is5325(dev) || is5365(dev))
2063 offset = B53_ARL_SRCH_CTL_25;
2064 else if (dev->chip_id == BCM5389_DEVICE_ID || is5397_98(dev) ||
2065 is63xx(dev))
2066 offset = B53_ARL_SRCH_CTL_89;
2067 else
2068 offset = B53_ARL_SRCH_CTL;
2069
2070 if (is63xx(dev)) {
2071 u16 val16;
2072
2073 b53_read16(dev, B53_ARLIO_PAGE, offset, &val16);
2074 *val = val16 & 0xff;
2075 } else {
2076 b53_read8(dev, B53_ARLIO_PAGE, offset, val);
2077 }
2078 }
2079
b53_write_arl_srch_ctl(struct b53_device * dev,u8 val)2080 static void b53_write_arl_srch_ctl(struct b53_device *dev, u8 val)
2081 {
2082 u8 offset;
2083
2084 if (is5325(dev) || is5365(dev))
2085 offset = B53_ARL_SRCH_CTL_25;
2086 else if (dev->chip_id == BCM5389_DEVICE_ID || is5397_98(dev) ||
2087 is63xx(dev))
2088 offset = B53_ARL_SRCH_CTL_89;
2089 else
2090 offset = B53_ARL_SRCH_CTL;
2091
2092 if (is63xx(dev))
2093 b53_write16(dev, B53_ARLIO_PAGE, offset, val);
2094 else
2095 b53_write8(dev, B53_ARLIO_PAGE, offset, val);
2096 }
2097
b53_arl_search_wait(struct b53_device * dev)2098 static int b53_arl_search_wait(struct b53_device *dev)
2099 {
2100 unsigned int timeout = 1000;
2101 u8 reg;
2102
2103 do {
2104 b53_read_arl_srch_ctl(dev, ®);
2105 if (!(reg & ARL_SRCH_STDN))
2106 return -ENOENT;
2107
2108 if (reg & ARL_SRCH_VLID)
2109 return 0;
2110
2111 usleep_range(1000, 2000);
2112 } while (timeout--);
2113
2114 return -ETIMEDOUT;
2115 }
2116
b53_arl_search_read_25(struct b53_device * dev,u8 idx,struct b53_arl_entry * ent)2117 static void b53_arl_search_read_25(struct b53_device *dev, u8 idx,
2118 struct b53_arl_entry *ent)
2119 {
2120 u64 mac_vid;
2121 u8 ext;
2122
2123 b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_EXT_25, &ext);
2124 b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_0_MACVID_25,
2125 &mac_vid);
2126 b53_arl_search_to_entry_25(ent, mac_vid, ext);
2127 }
2128
b53_arl_search_read_89(struct b53_device * dev,u8 idx,struct b53_arl_entry * ent)2129 static void b53_arl_search_read_89(struct b53_device *dev, u8 idx,
2130 struct b53_arl_entry *ent)
2131 {
2132 u16 fwd_entry;
2133 u64 mac_vid;
2134
2135 b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_MACVID_89,
2136 &mac_vid);
2137 b53_read16(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_89, &fwd_entry);
2138 b53_arl_to_entry_89(ent, mac_vid, fwd_entry);
2139 }
2140
b53_arl_search_read_63xx(struct b53_device * dev,u8 idx,struct b53_arl_entry * ent)2141 static void b53_arl_search_read_63xx(struct b53_device *dev, u8 idx,
2142 struct b53_arl_entry *ent)
2143 {
2144 u16 fwd_entry;
2145 u64 mac_vid;
2146
2147 b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_MACVID_63XX,
2148 &mac_vid);
2149 b53_read16(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_63XX, &fwd_entry);
2150 b53_arl_search_to_entry_63xx(ent, mac_vid, fwd_entry);
2151 }
2152
b53_arl_search_read_95(struct b53_device * dev,u8 idx,struct b53_arl_entry * ent)2153 static void b53_arl_search_read_95(struct b53_device *dev, u8 idx,
2154 struct b53_arl_entry *ent)
2155 {
2156 u32 fwd_entry;
2157 u64 mac_vid;
2158
2159 b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_MACVID(idx),
2160 &mac_vid);
2161 b53_read32(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL(idx),
2162 &fwd_entry);
2163 b53_arl_to_entry(ent, mac_vid, fwd_entry);
2164 }
2165
b53_fdb_copy(int port,const struct b53_arl_entry * ent,dsa_fdb_dump_cb_t * cb,void * data)2166 static int b53_fdb_copy(int port, const struct b53_arl_entry *ent,
2167 dsa_fdb_dump_cb_t *cb, void *data)
2168 {
2169 if (!ent->is_valid)
2170 return 0;
2171
2172 if (is_multicast_ether_addr(ent->mac))
2173 return 0;
2174
2175 if (port != ent->port)
2176 return 0;
2177
2178 return cb(ent->mac, ent->vid, ent->is_static, data);
2179 }
2180
b53_fdb_dump(struct dsa_switch * ds,int port,dsa_fdb_dump_cb_t * cb,void * data)2181 int b53_fdb_dump(struct dsa_switch *ds, int port,
2182 dsa_fdb_dump_cb_t *cb, void *data)
2183 {
2184 unsigned int count = 0, results_per_hit = 1;
2185 struct b53_device *priv = ds->priv;
2186 struct b53_arl_entry results[2];
2187 int ret;
2188
2189 if (priv->num_arl_bins > 2)
2190 results_per_hit = 2;
2191
2192 mutex_lock(&priv->arl_mutex);
2193
2194 /* Start search operation */
2195 b53_write_arl_srch_ctl(priv, ARL_SRCH_STDN);
2196
2197 do {
2198 ret = b53_arl_search_wait(priv);
2199 if (ret)
2200 break;
2201
2202 b53_arl_search_read(priv, 0, &results[0]);
2203 ret = b53_fdb_copy(port, &results[0], cb, data);
2204 if (ret)
2205 break;
2206
2207 if (results_per_hit == 2) {
2208 b53_arl_search_read(priv, 1, &results[1]);
2209 ret = b53_fdb_copy(port, &results[1], cb, data);
2210 if (ret)
2211 break;
2212
2213 if (!results[0].is_valid && !results[1].is_valid)
2214 break;
2215 }
2216
2217 } while (count++ < b53_max_arl_entries(priv) / results_per_hit);
2218
2219 mutex_unlock(&priv->arl_mutex);
2220
2221 return 0;
2222 }
2223 EXPORT_SYMBOL(b53_fdb_dump);
2224
b53_mdb_add(struct dsa_switch * ds,int port,const struct switchdev_obj_port_mdb * mdb,struct dsa_db db)2225 int b53_mdb_add(struct dsa_switch *ds, int port,
2226 const struct switchdev_obj_port_mdb *mdb,
2227 struct dsa_db db)
2228 {
2229 struct b53_device *priv = ds->priv;
2230 int ret;
2231
2232 /* 5325 and 5365 require some more massaging, but could
2233 * be supported eventually
2234 */
2235 if (is5325(priv) || is5365(priv))
2236 return -EOPNOTSUPP;
2237
2238 mutex_lock(&priv->arl_mutex);
2239 ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true);
2240 mutex_unlock(&priv->arl_mutex);
2241
2242 return ret;
2243 }
2244 EXPORT_SYMBOL(b53_mdb_add);
2245
b53_mdb_del(struct dsa_switch * ds,int port,const struct switchdev_obj_port_mdb * mdb,struct dsa_db db)2246 int b53_mdb_del(struct dsa_switch *ds, int port,
2247 const struct switchdev_obj_port_mdb *mdb,
2248 struct dsa_db db)
2249 {
2250 struct b53_device *priv = ds->priv;
2251 int ret;
2252
2253 mutex_lock(&priv->arl_mutex);
2254 ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false);
2255 mutex_unlock(&priv->arl_mutex);
2256 if (ret)
2257 dev_err(ds->dev, "failed to delete MDB entry\n");
2258
2259 return ret;
2260 }
2261 EXPORT_SYMBOL(b53_mdb_del);
2262
b53_br_join(struct dsa_switch * ds,int port,struct dsa_bridge bridge,bool * tx_fwd_offload,struct netlink_ext_ack * extack)2263 int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
2264 bool *tx_fwd_offload, struct netlink_ext_ack *extack)
2265 {
2266 struct b53_device *dev = ds->priv;
2267 struct b53_vlan *vl;
2268 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
2269 u16 pvlan, reg, pvid;
2270 unsigned int i;
2271
2272 /* On 7278, port 7 which connects to the ASP should only receive
2273 * traffic from matching CFP rules.
2274 */
2275 if (dev->chip_id == BCM7278_DEVICE_ID && port == 7)
2276 return -EINVAL;
2277
2278 pvid = b53_default_pvid(dev);
2279 vl = &dev->vlans[pvid];
2280
2281 if (dev->vlan_filtering) {
2282 /* Make this port leave the all VLANs join since we will have
2283 * proper VLAN entries from now on
2284 */
2285 if (is58xx(dev)) {
2286 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN,
2287 ®);
2288 reg &= ~BIT(port);
2289 if ((reg & BIT(cpu_port)) == BIT(cpu_port))
2290 reg &= ~BIT(cpu_port);
2291 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN,
2292 reg);
2293 }
2294
2295 b53_get_vlan_entry(dev, pvid, vl);
2296 vl->members &= ~BIT(port);
2297 b53_set_vlan_entry(dev, pvid, vl);
2298 }
2299
2300 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
2301
2302 b53_for_each_port(dev, i) {
2303 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
2304 continue;
2305
2306 /* Add this local port to the remote port VLAN control
2307 * membership and update the remote port bitmask
2308 */
2309 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®);
2310 reg |= BIT(port);
2311 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
2312 dev->ports[i].vlan_ctl_mask = reg;
2313
2314 pvlan |= BIT(i);
2315 }
2316
2317 /* Disable redirection of unknown SA to the CPU port */
2318 b53_set_eap_mode(dev, port, EAP_MODE_BASIC);
2319
2320 /* Configure the local port VLAN control membership to include
2321 * remote ports and update the local port bitmask
2322 */
2323 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
2324 dev->ports[port].vlan_ctl_mask = pvlan;
2325
2326 return 0;
2327 }
2328 EXPORT_SYMBOL(b53_br_join);
2329
b53_br_leave(struct dsa_switch * ds,int port,struct dsa_bridge bridge)2330 void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
2331 {
2332 struct b53_device *dev = ds->priv;
2333 struct b53_vlan *vl;
2334 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
2335 unsigned int i;
2336 u16 pvlan, reg, pvid;
2337
2338 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
2339
2340 b53_for_each_port(dev, i) {
2341 /* Don't touch the remaining ports */
2342 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
2343 continue;
2344
2345 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®);
2346 reg &= ~BIT(port);
2347 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
2348 dev->ports[port].vlan_ctl_mask = reg;
2349
2350 /* Prevent self removal to preserve isolation */
2351 if (port != i)
2352 pvlan &= ~BIT(i);
2353 }
2354
2355 /* Enable redirection of unknown SA to the CPU port */
2356 b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED);
2357
2358 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
2359 dev->ports[port].vlan_ctl_mask = pvlan;
2360
2361 pvid = b53_default_pvid(dev);
2362 vl = &dev->vlans[pvid];
2363
2364 if (dev->vlan_filtering) {
2365 /* Make this port join all VLANs without VLAN entries */
2366 if (is58xx(dev)) {
2367 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®);
2368 reg |= BIT(port);
2369 if (!(reg & BIT(cpu_port)))
2370 reg |= BIT(cpu_port);
2371 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
2372 }
2373
2374 b53_get_vlan_entry(dev, pvid, vl);
2375 vl->members |= BIT(port);
2376 b53_set_vlan_entry(dev, pvid, vl);
2377 }
2378 }
2379 EXPORT_SYMBOL(b53_br_leave);
2380
b53_br_set_stp_state(struct dsa_switch * ds,int port,u8 state)2381 void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state)
2382 {
2383 struct b53_device *dev = ds->priv;
2384 u8 hw_state;
2385 u8 reg;
2386
2387 switch (state) {
2388 case BR_STATE_DISABLED:
2389 hw_state = PORT_CTRL_DIS_STATE;
2390 break;
2391 case BR_STATE_LISTENING:
2392 hw_state = PORT_CTRL_LISTEN_STATE;
2393 break;
2394 case BR_STATE_LEARNING:
2395 hw_state = PORT_CTRL_LEARN_STATE;
2396 break;
2397 case BR_STATE_FORWARDING:
2398 hw_state = PORT_CTRL_FWD_STATE;
2399 break;
2400 case BR_STATE_BLOCKING:
2401 hw_state = PORT_CTRL_BLOCK_STATE;
2402 break;
2403 default:
2404 dev_err(ds->dev, "invalid STP state: %d\n", state);
2405 return;
2406 }
2407
2408 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®);
2409 reg &= ~PORT_CTRL_STP_STATE_MASK;
2410 reg |= hw_state;
2411 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
2412 }
2413 EXPORT_SYMBOL(b53_br_set_stp_state);
2414
b53_br_fast_age(struct dsa_switch * ds,int port)2415 void b53_br_fast_age(struct dsa_switch *ds, int port)
2416 {
2417 struct b53_device *dev = ds->priv;
2418
2419 if (b53_fast_age_port(dev, port))
2420 dev_err(ds->dev, "fast ageing failed\n");
2421 }
2422 EXPORT_SYMBOL(b53_br_fast_age);
2423
b53_br_flags_pre(struct dsa_switch * ds,int port,struct switchdev_brport_flags flags,struct netlink_ext_ack * extack)2424 int b53_br_flags_pre(struct dsa_switch *ds, int port,
2425 struct switchdev_brport_flags flags,
2426 struct netlink_ext_ack *extack)
2427 {
2428 struct b53_device *dev = ds->priv;
2429 unsigned long mask = (BR_FLOOD | BR_MCAST_FLOOD | BR_ISOLATED);
2430
2431 if (!is5325(dev))
2432 mask |= BR_LEARNING;
2433
2434 if (flags.mask & ~mask)
2435 return -EINVAL;
2436
2437 return 0;
2438 }
2439 EXPORT_SYMBOL(b53_br_flags_pre);
2440
b53_br_flags(struct dsa_switch * ds,int port,struct switchdev_brport_flags flags,struct netlink_ext_ack * extack)2441 int b53_br_flags(struct dsa_switch *ds, int port,
2442 struct switchdev_brport_flags flags,
2443 struct netlink_ext_ack *extack)
2444 {
2445 if (flags.mask & BR_FLOOD)
2446 b53_port_set_ucast_flood(ds->priv, port,
2447 !!(flags.val & BR_FLOOD));
2448 if (flags.mask & BR_MCAST_FLOOD)
2449 b53_port_set_mcast_flood(ds->priv, port,
2450 !!(flags.val & BR_MCAST_FLOOD));
2451 if (flags.mask & BR_LEARNING)
2452 b53_port_set_learning(ds->priv, port,
2453 !!(flags.val & BR_LEARNING));
2454 if (flags.mask & BR_ISOLATED)
2455 b53_port_set_isolated(ds->priv, port,
2456 !!(flags.val & BR_ISOLATED));
2457
2458 return 0;
2459 }
2460 EXPORT_SYMBOL(b53_br_flags);
2461
b53_possible_cpu_port(struct dsa_switch * ds,int port)2462 static bool b53_possible_cpu_port(struct dsa_switch *ds, int port)
2463 {
2464 /* Broadcom switches will accept enabling Broadcom tags on the
2465 * following ports: 5, 7 and 8, any other port is not supported
2466 */
2467 switch (port) {
2468 case B53_CPU_PORT_25:
2469 case 7:
2470 case B53_CPU_PORT:
2471 return true;
2472 }
2473
2474 return false;
2475 }
2476
b53_can_enable_brcm_tags(struct dsa_switch * ds,int port,enum dsa_tag_protocol tag_protocol)2477 static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port,
2478 enum dsa_tag_protocol tag_protocol)
2479 {
2480 bool ret = b53_possible_cpu_port(ds, port);
2481
2482 if (!ret) {
2483 dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n",
2484 port);
2485 return ret;
2486 }
2487
2488 switch (tag_protocol) {
2489 case DSA_TAG_PROTO_BRCM:
2490 case DSA_TAG_PROTO_BRCM_PREPEND:
2491 dev_warn(ds->dev,
2492 "Port %d is stacked to Broadcom tag switch\n", port);
2493 ret = false;
2494 break;
2495 default:
2496 ret = true;
2497 break;
2498 }
2499
2500 return ret;
2501 }
2502
b53_get_tag_protocol(struct dsa_switch * ds,int port,enum dsa_tag_protocol mprot)2503 enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
2504 enum dsa_tag_protocol mprot)
2505 {
2506 struct b53_device *dev = ds->priv;
2507
2508 if (!b53_can_enable_brcm_tags(ds, port, mprot)) {
2509 dev->tag_protocol = DSA_TAG_PROTO_NONE;
2510 goto out;
2511 }
2512
2513 /* Older models require different 6 byte tags */
2514 if (is5325(dev) || is5365(dev)) {
2515 dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY_FCS;
2516 goto out;
2517 } else if (is63xx(dev)) {
2518 dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY;
2519 goto out;
2520 }
2521
2522 /* Broadcom BCM58xx chips have a flow accelerator on Port 8
2523 * which requires us to use the prepended Broadcom tag type
2524 */
2525 if (dev->chip_id == BCM58XX_DEVICE_ID && port == B53_CPU_PORT) {
2526 dev->tag_protocol = DSA_TAG_PROTO_BRCM_PREPEND;
2527 goto out;
2528 }
2529
2530 dev->tag_protocol = DSA_TAG_PROTO_BRCM;
2531 out:
2532 return dev->tag_protocol;
2533 }
2534 EXPORT_SYMBOL(b53_get_tag_protocol);
2535
b53_mirror_add(struct dsa_switch * ds,int port,struct dsa_mall_mirror_tc_entry * mirror,bool ingress,struct netlink_ext_ack * extack)2536 int b53_mirror_add(struct dsa_switch *ds, int port,
2537 struct dsa_mall_mirror_tc_entry *mirror, bool ingress,
2538 struct netlink_ext_ack *extack)
2539 {
2540 struct b53_device *dev = ds->priv;
2541 u16 reg, loc;
2542
2543 if (ingress)
2544 loc = B53_IG_MIR_CTL;
2545 else
2546 loc = B53_EG_MIR_CTL;
2547
2548 b53_read16(dev, B53_MGMT_PAGE, loc, ®);
2549 reg |= BIT(port);
2550 b53_write16(dev, B53_MGMT_PAGE, loc, reg);
2551
2552 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®);
2553 reg &= ~CAP_PORT_MASK;
2554 reg |= mirror->to_local_port;
2555 reg |= MIRROR_EN;
2556 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
2557
2558 return 0;
2559 }
2560 EXPORT_SYMBOL(b53_mirror_add);
2561
b53_mirror_del(struct dsa_switch * ds,int port,struct dsa_mall_mirror_tc_entry * mirror)2562 void b53_mirror_del(struct dsa_switch *ds, int port,
2563 struct dsa_mall_mirror_tc_entry *mirror)
2564 {
2565 struct b53_device *dev = ds->priv;
2566 bool loc_disable = false, other_loc_disable = false;
2567 u16 reg, loc;
2568
2569 if (mirror->ingress)
2570 loc = B53_IG_MIR_CTL;
2571 else
2572 loc = B53_EG_MIR_CTL;
2573
2574 /* Update the desired ingress/egress register */
2575 b53_read16(dev, B53_MGMT_PAGE, loc, ®);
2576 reg &= ~BIT(port);
2577 if (!(reg & MIRROR_MASK))
2578 loc_disable = true;
2579 b53_write16(dev, B53_MGMT_PAGE, loc, reg);
2580
2581 /* Now look at the other one to know if we can disable mirroring
2582 * entirely
2583 */
2584 if (mirror->ingress)
2585 b53_read16(dev, B53_MGMT_PAGE, B53_EG_MIR_CTL, ®);
2586 else
2587 b53_read16(dev, B53_MGMT_PAGE, B53_IG_MIR_CTL, ®);
2588 if (!(reg & MIRROR_MASK))
2589 other_loc_disable = true;
2590
2591 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®);
2592 /* Both no longer have ports, let's disable mirroring */
2593 if (loc_disable && other_loc_disable) {
2594 reg &= ~MIRROR_EN;
2595 reg &= ~mirror->to_local_port;
2596 }
2597 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
2598 }
2599 EXPORT_SYMBOL(b53_mirror_del);
2600
2601 /* Returns 0 if EEE was not enabled, or 1 otherwise
2602 */
b53_eee_init(struct dsa_switch * ds,int port,struct phy_device * phy)2603 int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
2604 {
2605 int ret;
2606
2607 if (!b53_support_eee(ds, port))
2608 return 0;
2609
2610 ret = phy_init_eee(phy, false);
2611 if (ret)
2612 return 0;
2613
2614 b53_eee_enable_set(ds, port, true);
2615
2616 return 1;
2617 }
2618 EXPORT_SYMBOL(b53_eee_init);
2619
b53_support_eee(struct dsa_switch * ds,int port)2620 bool b53_support_eee(struct dsa_switch *ds, int port)
2621 {
2622 struct b53_device *dev = ds->priv;
2623
2624 return !is5325(dev) && !is5365(dev) && !is63xx(dev);
2625 }
2626 EXPORT_SYMBOL(b53_support_eee);
2627
b53_set_mac_eee(struct dsa_switch * ds,int port,struct ethtool_keee * e)2628 int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
2629 {
2630 struct b53_device *dev = ds->priv;
2631 struct ethtool_keee *p = &dev->ports[port].eee;
2632
2633 p->eee_enabled = e->eee_enabled;
2634 b53_eee_enable_set(ds, port, e->eee_enabled);
2635
2636 return 0;
2637 }
2638 EXPORT_SYMBOL(b53_set_mac_eee);
2639
b53_change_mtu(struct dsa_switch * ds,int port,int mtu)2640 static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu)
2641 {
2642 struct b53_device *dev = ds->priv;
2643 bool enable_jumbo;
2644 bool allow_10_100;
2645
2646 if (is5325(dev) || is5365(dev))
2647 return 0;
2648
2649 if (!dsa_is_cpu_port(ds, port))
2650 return 0;
2651
2652 enable_jumbo = (mtu > ETH_DATA_LEN);
2653 allow_10_100 = !is63xx(dev);
2654
2655 return b53_set_jumbo(dev, enable_jumbo, allow_10_100);
2656 }
2657
b53_get_max_mtu(struct dsa_switch * ds,int port)2658 static int b53_get_max_mtu(struct dsa_switch *ds, int port)
2659 {
2660 struct b53_device *dev = ds->priv;
2661
2662 if (is5325(dev) || is5365(dev))
2663 return B53_MAX_MTU_25;
2664
2665 return B53_MAX_MTU;
2666 }
2667
b53_set_ageing_time(struct dsa_switch * ds,unsigned int msecs)2668 int b53_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
2669 {
2670 struct b53_device *dev = ds->priv;
2671 u32 atc;
2672 int reg;
2673
2674 if (is63xx(dev))
2675 reg = B53_AGING_TIME_CONTROL_63XX;
2676 else
2677 reg = B53_AGING_TIME_CONTROL;
2678
2679 if (dev->chip_id == BCM53101_DEVICE_ID)
2680 atc = DIV_ROUND_CLOSEST(msecs, 500);
2681 else
2682 atc = DIV_ROUND_CLOSEST(msecs, 1000);
2683
2684 if (!is5325(dev) && !is5365(dev))
2685 atc |= AGE_CHANGE;
2686
2687 b53_write32(dev, B53_MGMT_PAGE, reg, atc);
2688
2689 return 0;
2690 }
2691 EXPORT_SYMBOL_GPL(b53_set_ageing_time);
2692
2693 static const struct phylink_mac_ops b53_phylink_mac_ops = {
2694 .mac_select_pcs = b53_phylink_mac_select_pcs,
2695 .mac_config = b53_phylink_mac_config,
2696 .mac_link_down = b53_phylink_mac_link_down,
2697 .mac_link_up = b53_phylink_mac_link_up,
2698 };
2699
2700 static const struct dsa_switch_ops b53_switch_ops = {
2701 .get_tag_protocol = b53_get_tag_protocol,
2702 .setup = b53_setup,
2703 .teardown = b53_teardown,
2704 .get_strings = b53_get_strings,
2705 .get_ethtool_stats = b53_get_ethtool_stats,
2706 .get_sset_count = b53_get_sset_count,
2707 .get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
2708 .phy_read = b53_phy_read16,
2709 .phy_write = b53_phy_write16,
2710 .phylink_get_caps = b53_phylink_get_caps,
2711 .port_setup = b53_setup_port,
2712 .port_enable = b53_enable_port,
2713 .port_disable = b53_disable_port,
2714 .support_eee = b53_support_eee,
2715 .set_mac_eee = b53_set_mac_eee,
2716 .set_ageing_time = b53_set_ageing_time,
2717 .port_bridge_join = b53_br_join,
2718 .port_bridge_leave = b53_br_leave,
2719 .port_pre_bridge_flags = b53_br_flags_pre,
2720 .port_bridge_flags = b53_br_flags,
2721 .port_stp_state_set = b53_br_set_stp_state,
2722 .port_fast_age = b53_br_fast_age,
2723 .port_vlan_filtering = b53_vlan_filtering,
2724 .port_vlan_add = b53_vlan_add,
2725 .port_vlan_del = b53_vlan_del,
2726 .port_fdb_dump = b53_fdb_dump,
2727 .port_fdb_add = b53_fdb_add,
2728 .port_fdb_del = b53_fdb_del,
2729 .port_mirror_add = b53_mirror_add,
2730 .port_mirror_del = b53_mirror_del,
2731 .port_mdb_add = b53_mdb_add,
2732 .port_mdb_del = b53_mdb_del,
2733 .port_max_mtu = b53_get_max_mtu,
2734 .port_change_mtu = b53_change_mtu,
2735 };
2736
2737 static const struct b53_arl_ops b53_arl_ops_25 = {
2738 .arl_read_entry = b53_arl_read_entry_25,
2739 .arl_write_entry = b53_arl_write_entry_25,
2740 .arl_search_read = b53_arl_search_read_25,
2741 };
2742
2743 static const struct b53_arl_ops b53_arl_ops_89 = {
2744 .arl_read_entry = b53_arl_read_entry_89,
2745 .arl_write_entry = b53_arl_write_entry_89,
2746 .arl_search_read = b53_arl_search_read_89,
2747 };
2748
2749 static const struct b53_arl_ops b53_arl_ops_63xx = {
2750 .arl_read_entry = b53_arl_read_entry_89,
2751 .arl_write_entry = b53_arl_write_entry_89,
2752 .arl_search_read = b53_arl_search_read_63xx,
2753 };
2754
2755 static const struct b53_arl_ops b53_arl_ops_95 = {
2756 .arl_read_entry = b53_arl_read_entry_95,
2757 .arl_write_entry = b53_arl_write_entry_95,
2758 .arl_search_read = b53_arl_search_read_95,
2759 };
2760
2761 struct b53_chip_data {
2762 u32 chip_id;
2763 const char *dev_name;
2764 u16 vlans;
2765 u16 enabled_ports;
2766 u8 imp_port;
2767 u8 cpu_port;
2768 u8 vta_regs[3];
2769 u8 arl_bins;
2770 u16 arl_buckets;
2771 u8 duplex_reg;
2772 u8 jumbo_pm_reg;
2773 u8 jumbo_size_reg;
2774 const struct b53_arl_ops *arl_ops;
2775 };
2776
2777 #define B53_VTA_REGS \
2778 { B53_VT_ACCESS, B53_VT_INDEX, B53_VT_ENTRY }
2779 #define B53_VTA_REGS_9798 \
2780 { B53_VT_ACCESS_9798, B53_VT_INDEX_9798, B53_VT_ENTRY_9798 }
2781 #define B53_VTA_REGS_63XX \
2782 { B53_VT_ACCESS_63XX, B53_VT_INDEX_63XX, B53_VT_ENTRY_63XX }
2783
2784 static const struct b53_chip_data b53_switch_chips[] = {
2785 {
2786 .chip_id = BCM5325_DEVICE_ID,
2787 .dev_name = "BCM5325",
2788 .vlans = 16,
2789 .enabled_ports = 0x3f,
2790 .arl_bins = 2,
2791 .arl_buckets = 1024,
2792 .imp_port = 5,
2793 .duplex_reg = B53_DUPLEX_STAT_FE,
2794 .arl_ops = &b53_arl_ops_25,
2795 },
2796 {
2797 .chip_id = BCM5365_DEVICE_ID,
2798 .dev_name = "BCM5365",
2799 .vlans = 256,
2800 .enabled_ports = 0x3f,
2801 .arl_bins = 2,
2802 .arl_buckets = 1024,
2803 .imp_port = 5,
2804 .duplex_reg = B53_DUPLEX_STAT_FE,
2805 .arl_ops = &b53_arl_ops_25,
2806 },
2807 {
2808 .chip_id = BCM5389_DEVICE_ID,
2809 .dev_name = "BCM5389",
2810 .vlans = 4096,
2811 .enabled_ports = 0x11f,
2812 .arl_bins = 4,
2813 .arl_buckets = 1024,
2814 .imp_port = 8,
2815 .vta_regs = B53_VTA_REGS,
2816 .duplex_reg = B53_DUPLEX_STAT_GE,
2817 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2818 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2819 .arl_ops = &b53_arl_ops_89,
2820 },
2821 {
2822 .chip_id = BCM5395_DEVICE_ID,
2823 .dev_name = "BCM5395",
2824 .vlans = 4096,
2825 .enabled_ports = 0x11f,
2826 .arl_bins = 4,
2827 .arl_buckets = 1024,
2828 .imp_port = 8,
2829 .vta_regs = B53_VTA_REGS,
2830 .duplex_reg = B53_DUPLEX_STAT_GE,
2831 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2832 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2833 .arl_ops = &b53_arl_ops_95,
2834 },
2835 {
2836 .chip_id = BCM5397_DEVICE_ID,
2837 .dev_name = "BCM5397",
2838 .vlans = 4096,
2839 .enabled_ports = 0x11f,
2840 .arl_bins = 4,
2841 .arl_buckets = 1024,
2842 .imp_port = 8,
2843 .vta_regs = B53_VTA_REGS_9798,
2844 .duplex_reg = B53_DUPLEX_STAT_GE,
2845 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2846 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2847 .arl_ops = &b53_arl_ops_89,
2848 },
2849 {
2850 .chip_id = BCM5398_DEVICE_ID,
2851 .dev_name = "BCM5398",
2852 .vlans = 4096,
2853 .enabled_ports = 0x17f,
2854 .arl_bins = 4,
2855 .arl_buckets = 1024,
2856 .imp_port = 8,
2857 .vta_regs = B53_VTA_REGS_9798,
2858 .duplex_reg = B53_DUPLEX_STAT_GE,
2859 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2860 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2861 .arl_ops = &b53_arl_ops_89,
2862 },
2863 {
2864 .chip_id = BCM53101_DEVICE_ID,
2865 .dev_name = "BCM53101",
2866 .vlans = 4096,
2867 .enabled_ports = 0x11f,
2868 .arl_bins = 4,
2869 .arl_buckets = 512,
2870 .vta_regs = B53_VTA_REGS,
2871 .imp_port = 8,
2872 .duplex_reg = B53_DUPLEX_STAT_GE,
2873 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2874 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2875 .arl_ops = &b53_arl_ops_95,
2876 },
2877 {
2878 .chip_id = BCM53115_DEVICE_ID,
2879 .dev_name = "BCM53115",
2880 .vlans = 4096,
2881 .enabled_ports = 0x11f,
2882 .arl_bins = 4,
2883 .arl_buckets = 1024,
2884 .vta_regs = B53_VTA_REGS,
2885 .imp_port = 8,
2886 .duplex_reg = B53_DUPLEX_STAT_GE,
2887 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2888 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2889 .arl_ops = &b53_arl_ops_95,
2890 },
2891 {
2892 .chip_id = BCM53125_DEVICE_ID,
2893 .dev_name = "BCM53125",
2894 .vlans = 4096,
2895 .enabled_ports = 0x1ff,
2896 .arl_bins = 4,
2897 .arl_buckets = 1024,
2898 .imp_port = 8,
2899 .vta_regs = B53_VTA_REGS,
2900 .duplex_reg = B53_DUPLEX_STAT_GE,
2901 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2902 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2903 .arl_ops = &b53_arl_ops_95,
2904 },
2905 {
2906 .chip_id = BCM53128_DEVICE_ID,
2907 .dev_name = "BCM53128",
2908 .vlans = 4096,
2909 .enabled_ports = 0x1ff,
2910 .arl_bins = 4,
2911 .arl_buckets = 1024,
2912 .imp_port = 8,
2913 .vta_regs = B53_VTA_REGS,
2914 .duplex_reg = B53_DUPLEX_STAT_GE,
2915 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2916 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2917 .arl_ops = &b53_arl_ops_95,
2918 },
2919 {
2920 .chip_id = BCM63XX_DEVICE_ID,
2921 .dev_name = "BCM63xx",
2922 .vlans = 4096,
2923 .enabled_ports = 0, /* pdata must provide them */
2924 .arl_bins = 1,
2925 .arl_buckets = 4096,
2926 .imp_port = 8,
2927 .vta_regs = B53_VTA_REGS_63XX,
2928 .duplex_reg = B53_DUPLEX_STAT_63XX,
2929 .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX,
2930 .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX,
2931 .arl_ops = &b53_arl_ops_63xx,
2932 },
2933 {
2934 .chip_id = BCM53010_DEVICE_ID,
2935 .dev_name = "BCM53010",
2936 .vlans = 4096,
2937 .enabled_ports = 0x1bf,
2938 .arl_bins = 4,
2939 .arl_buckets = 1024,
2940 .imp_port = 8,
2941 .vta_regs = B53_VTA_REGS,
2942 .duplex_reg = B53_DUPLEX_STAT_GE,
2943 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2944 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2945 .arl_ops = &b53_arl_ops_95,
2946 },
2947 {
2948 .chip_id = BCM53011_DEVICE_ID,
2949 .dev_name = "BCM53011",
2950 .vlans = 4096,
2951 .enabled_ports = 0x1bf,
2952 .arl_bins = 4,
2953 .arl_buckets = 1024,
2954 .imp_port = 8,
2955 .vta_regs = B53_VTA_REGS,
2956 .duplex_reg = B53_DUPLEX_STAT_GE,
2957 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2958 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2959 .arl_ops = &b53_arl_ops_95,
2960 },
2961 {
2962 .chip_id = BCM53012_DEVICE_ID,
2963 .dev_name = "BCM53012",
2964 .vlans = 4096,
2965 .enabled_ports = 0x1bf,
2966 .arl_bins = 4,
2967 .arl_buckets = 1024,
2968 .imp_port = 8,
2969 .vta_regs = B53_VTA_REGS,
2970 .duplex_reg = B53_DUPLEX_STAT_GE,
2971 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2972 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2973 .arl_ops = &b53_arl_ops_95,
2974 },
2975 {
2976 .chip_id = BCM53018_DEVICE_ID,
2977 .dev_name = "BCM53018",
2978 .vlans = 4096,
2979 .enabled_ports = 0x1bf,
2980 .arl_bins = 4,
2981 .arl_buckets = 1024,
2982 .imp_port = 8,
2983 .vta_regs = B53_VTA_REGS,
2984 .duplex_reg = B53_DUPLEX_STAT_GE,
2985 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2986 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2987 .arl_ops = &b53_arl_ops_95,
2988 },
2989 {
2990 .chip_id = BCM53019_DEVICE_ID,
2991 .dev_name = "BCM53019",
2992 .vlans = 4096,
2993 .enabled_ports = 0x1bf,
2994 .arl_bins = 4,
2995 .arl_buckets = 1024,
2996 .imp_port = 8,
2997 .vta_regs = B53_VTA_REGS,
2998 .duplex_reg = B53_DUPLEX_STAT_GE,
2999 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
3000 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
3001 .arl_ops = &b53_arl_ops_95,
3002 },
3003 {
3004 .chip_id = BCM58XX_DEVICE_ID,
3005 .dev_name = "BCM585xx/586xx/88312",
3006 .vlans = 4096,
3007 .enabled_ports = 0x1ff,
3008 .arl_bins = 4,
3009 .arl_buckets = 1024,
3010 .imp_port = 8,
3011 .vta_regs = B53_VTA_REGS,
3012 .duplex_reg = B53_DUPLEX_STAT_GE,
3013 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
3014 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
3015 .arl_ops = &b53_arl_ops_95,
3016 },
3017 {
3018 .chip_id = BCM583XX_DEVICE_ID,
3019 .dev_name = "BCM583xx/11360",
3020 .vlans = 4096,
3021 .enabled_ports = 0x103,
3022 .arl_bins = 4,
3023 .arl_buckets = 1024,
3024 .imp_port = 8,
3025 .vta_regs = B53_VTA_REGS,
3026 .duplex_reg = B53_DUPLEX_STAT_GE,
3027 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
3028 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
3029 .arl_ops = &b53_arl_ops_95,
3030 },
3031 /* Starfighter 2 */
3032 {
3033 .chip_id = BCM4908_DEVICE_ID,
3034 .dev_name = "BCM4908",
3035 .vlans = 4096,
3036 .enabled_ports = 0x1bf,
3037 .arl_bins = 4,
3038 .arl_buckets = 256,
3039 .imp_port = 8,
3040 .vta_regs = B53_VTA_REGS,
3041 .duplex_reg = B53_DUPLEX_STAT_GE,
3042 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
3043 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
3044 .arl_ops = &b53_arl_ops_95,
3045 },
3046 {
3047 .chip_id = BCM7445_DEVICE_ID,
3048 .dev_name = "BCM7445",
3049 .vlans = 4096,
3050 .enabled_ports = 0x1ff,
3051 .arl_bins = 4,
3052 .arl_buckets = 1024,
3053 .imp_port = 8,
3054 .vta_regs = B53_VTA_REGS,
3055 .duplex_reg = B53_DUPLEX_STAT_GE,
3056 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
3057 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
3058 .arl_ops = &b53_arl_ops_95,
3059 },
3060 {
3061 .chip_id = BCM7278_DEVICE_ID,
3062 .dev_name = "BCM7278",
3063 .vlans = 4096,
3064 .enabled_ports = 0x1ff,
3065 .arl_bins = 4,
3066 .arl_buckets = 256,
3067 .imp_port = 8,
3068 .vta_regs = B53_VTA_REGS,
3069 .duplex_reg = B53_DUPLEX_STAT_GE,
3070 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
3071 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
3072 .arl_ops = &b53_arl_ops_95,
3073 },
3074 {
3075 .chip_id = BCM53134_DEVICE_ID,
3076 .dev_name = "BCM53134",
3077 .vlans = 4096,
3078 .enabled_ports = 0x12f,
3079 .imp_port = 8,
3080 .cpu_port = B53_CPU_PORT,
3081 .vta_regs = B53_VTA_REGS,
3082 .arl_bins = 4,
3083 .arl_buckets = 1024,
3084 .duplex_reg = B53_DUPLEX_STAT_GE,
3085 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
3086 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
3087 .arl_ops = &b53_arl_ops_95,
3088 },
3089 };
3090
b53_switch_init(struct b53_device * dev)3091 static int b53_switch_init(struct b53_device *dev)
3092 {
3093 u32 chip_id = dev->chip_id;
3094 unsigned int i;
3095 int ret;
3096
3097 if (is63xx(dev))
3098 chip_id = BCM63XX_DEVICE_ID;
3099
3100 for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) {
3101 const struct b53_chip_data *chip = &b53_switch_chips[i];
3102
3103 if (chip->chip_id == chip_id) {
3104 if (!dev->enabled_ports)
3105 dev->enabled_ports = chip->enabled_ports;
3106 dev->name = chip->dev_name;
3107 dev->duplex_reg = chip->duplex_reg;
3108 dev->vta_regs[0] = chip->vta_regs[0];
3109 dev->vta_regs[1] = chip->vta_regs[1];
3110 dev->vta_regs[2] = chip->vta_regs[2];
3111 dev->jumbo_pm_reg = chip->jumbo_pm_reg;
3112 dev->imp_port = chip->imp_port;
3113 dev->num_vlans = chip->vlans;
3114 dev->num_arl_bins = chip->arl_bins;
3115 dev->num_arl_buckets = chip->arl_buckets;
3116 dev->arl_ops = chip->arl_ops;
3117 break;
3118 }
3119 }
3120
3121 /* check which BCM5325x version we have */
3122 if (is5325(dev)) {
3123 u8 vc4;
3124
3125 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
3126
3127 /* check reserved bits */
3128 switch (vc4 & 3) {
3129 case 1:
3130 /* BCM5325E */
3131 break;
3132 case 3:
3133 /* BCM5325F - do not use port 4 */
3134 dev->enabled_ports &= ~BIT(4);
3135 break;
3136 default:
3137 /* On the BCM47XX SoCs this is the supported internal switch.*/
3138 #ifndef CONFIG_BCM47XX
3139 /* BCM5325M */
3140 return -EINVAL;
3141 #else
3142 break;
3143 #endif
3144 }
3145 }
3146
3147 if (is5325e(dev))
3148 dev->num_arl_buckets = 512;
3149
3150 dev->num_ports = fls(dev->enabled_ports);
3151
3152 dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS);
3153
3154 /* Include non standard CPU port built-in PHYs to be probed */
3155 if (is539x(dev) || is531x5(dev)) {
3156 for (i = 0; i < dev->num_ports; i++) {
3157 if (!(dev->ds->phys_mii_mask & BIT(i)) &&
3158 !b53_possible_cpu_port(dev->ds, i))
3159 dev->ds->phys_mii_mask |= BIT(i);
3160 }
3161 }
3162
3163 dev->ports = devm_kcalloc(dev->dev,
3164 dev->num_ports, sizeof(struct b53_port),
3165 GFP_KERNEL);
3166 if (!dev->ports)
3167 return -ENOMEM;
3168
3169 dev->vlans = devm_kcalloc(dev->dev,
3170 dev->num_vlans, sizeof(struct b53_vlan),
3171 GFP_KERNEL);
3172 if (!dev->vlans)
3173 return -ENOMEM;
3174
3175 dev->reset_gpio = b53_switch_get_reset_gpio(dev);
3176 if (dev->reset_gpio >= 0) {
3177 ret = devm_gpio_request_one(dev->dev, dev->reset_gpio,
3178 GPIOF_OUT_INIT_HIGH, "robo_reset");
3179 if (ret)
3180 return ret;
3181 }
3182
3183 return 0;
3184 }
3185
b53_switch_alloc(struct device * base,const struct b53_io_ops * ops,void * priv)3186 struct b53_device *b53_switch_alloc(struct device *base,
3187 const struct b53_io_ops *ops,
3188 void *priv)
3189 {
3190 struct dsa_switch *ds;
3191 struct b53_device *dev;
3192
3193 ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
3194 if (!ds)
3195 return NULL;
3196
3197 ds->dev = base;
3198
3199 dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL);
3200 if (!dev)
3201 return NULL;
3202
3203 ds->priv = dev;
3204 dev->dev = base;
3205
3206 dev->ds = ds;
3207 dev->priv = priv;
3208 dev->ops = ops;
3209 ds->ops = &b53_switch_ops;
3210 ds->phylink_mac_ops = &b53_phylink_mac_ops;
3211 dev->vlan_enabled = true;
3212 dev->vlan_filtering = false;
3213 /* Let DSA handle the case were multiple bridges span the same switch
3214 * device and different VLAN awareness settings are requested, which
3215 * would be breaking filtering semantics for any of the other bridge
3216 * devices. (not hardware supported)
3217 */
3218 ds->vlan_filtering_is_global = true;
3219
3220 mutex_init(&dev->reg_mutex);
3221 mutex_init(&dev->stats_mutex);
3222 mutex_init(&dev->arl_mutex);
3223
3224 return dev;
3225 }
3226 EXPORT_SYMBOL(b53_switch_alloc);
3227
b53_switch_detect(struct b53_device * dev)3228 int b53_switch_detect(struct b53_device *dev)
3229 {
3230 u32 id32;
3231 u16 tmp;
3232 u8 id8;
3233 int ret;
3234
3235 ret = b53_read8(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id8);
3236 if (ret)
3237 return ret;
3238
3239 switch (id8) {
3240 case 0:
3241 /* BCM5325 and BCM5365 do not have this register so reads
3242 * return 0. But the read operation did succeed, so assume this
3243 * is one of them.
3244 *
3245 * Next check if we can write to the 5325's VTA register; for
3246 * 5365 it is read only.
3247 */
3248 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf);
3249 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp);
3250
3251 if (tmp == 0xf) {
3252 u32 phy_id;
3253 int val;
3254
3255 dev->chip_id = BCM5325_DEVICE_ID;
3256
3257 val = b53_phy_read16(dev->ds, 0, MII_PHYSID1);
3258 phy_id = (val & 0xffff) << 16;
3259 val = b53_phy_read16(dev->ds, 0, MII_PHYSID2);
3260 phy_id |= (val & 0xfff0);
3261
3262 if (phy_id == 0x00406330)
3263 dev->variant_id = B53_VARIANT_5325M;
3264 else if (phy_id == 0x0143bc30)
3265 dev->variant_id = B53_VARIANT_5325E;
3266 } else {
3267 dev->chip_id = BCM5365_DEVICE_ID;
3268 }
3269 break;
3270 case BCM5389_DEVICE_ID:
3271 case BCM5395_DEVICE_ID:
3272 case BCM5397_DEVICE_ID:
3273 case BCM5398_DEVICE_ID:
3274 dev->chip_id = id8;
3275 break;
3276 default:
3277 ret = b53_read32(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id32);
3278 if (ret)
3279 return ret;
3280
3281 switch (id32) {
3282 case BCM53101_DEVICE_ID:
3283 case BCM53115_DEVICE_ID:
3284 case BCM53125_DEVICE_ID:
3285 case BCM53128_DEVICE_ID:
3286 case BCM53010_DEVICE_ID:
3287 case BCM53011_DEVICE_ID:
3288 case BCM53012_DEVICE_ID:
3289 case BCM53018_DEVICE_ID:
3290 case BCM53019_DEVICE_ID:
3291 case BCM53134_DEVICE_ID:
3292 dev->chip_id = id32;
3293 break;
3294 default:
3295 dev_err(dev->dev,
3296 "unsupported switch detected (BCM53%02x/BCM%x)\n",
3297 id8, id32);
3298 return -ENODEV;
3299 }
3300 }
3301
3302 if (dev->chip_id == BCM5325_DEVICE_ID)
3303 return b53_read8(dev, B53_STAT_PAGE, B53_REV_ID_25,
3304 &dev->core_rev);
3305 else
3306 return b53_read8(dev, B53_MGMT_PAGE, B53_REV_ID,
3307 &dev->core_rev);
3308 }
3309 EXPORT_SYMBOL(b53_switch_detect);
3310
b53_switch_register(struct b53_device * dev)3311 int b53_switch_register(struct b53_device *dev)
3312 {
3313 int ret;
3314
3315 if (dev->pdata) {
3316 dev->chip_id = dev->pdata->chip_id;
3317 dev->enabled_ports = dev->pdata->enabled_ports;
3318 }
3319
3320 if (!dev->chip_id && b53_switch_detect(dev))
3321 return -EINVAL;
3322
3323 ret = b53_switch_init(dev);
3324 if (ret)
3325 return ret;
3326
3327 dev_info(dev->dev, "found switch: %s, rev %i\n",
3328 dev->name, dev->core_rev);
3329
3330 return dsa_register_switch(dev->ds);
3331 }
3332 EXPORT_SYMBOL(b53_switch_register);
3333
3334 MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
3335 MODULE_DESCRIPTION("B53 switch library");
3336 MODULE_LICENSE("Dual BSD/GPL");
3337