1 /* 2 * B53 switch driver main logic 3 * 4 * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org> 5 * Copyright (C) 2016 Florian Fainelli <f.fainelli@gmail.com> 6 * 7 * Permission to use, copy, modify, and/or distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <linux/delay.h> 21 #include <linux/export.h> 22 #include <linux/gpio.h> 23 #include <linux/kernel.h> 24 #include <linux/math.h> 25 #include <linux/minmax.h> 26 #include <linux/module.h> 27 #include <linux/platform_data/b53.h> 28 #include <linux/phy.h> 29 #include <linux/phylink.h> 30 #include <linux/etherdevice.h> 31 #include <linux/if_bridge.h> 32 #include <linux/if_vlan.h> 33 #include <net/dsa.h> 34 35 #include "b53_regs.h" 36 #include "b53_priv.h" 37 38 struct b53_mib_desc { 39 u8 size; 40 u8 offset; 41 const char *name; 42 }; 43 44 /* BCM5365 MIB counters */ 45 static const struct b53_mib_desc b53_mibs_65[] = { 46 { 8, 0x00, "TxOctets" }, 47 { 4, 0x08, "TxDropPkts" }, 48 { 4, 0x10, "TxBroadcastPkts" }, 49 { 4, 0x14, "TxMulticastPkts" }, 50 { 4, 0x18, "TxUnicastPkts" }, 51 { 4, 0x1c, "TxCollisions" }, 52 { 4, 0x20, "TxSingleCollision" }, 53 { 4, 0x24, "TxMultipleCollision" }, 54 { 4, 0x28, "TxDeferredTransmit" }, 55 { 4, 0x2c, "TxLateCollision" }, 56 { 4, 0x30, "TxExcessiveCollision" }, 57 { 4, 0x38, "TxPausePkts" }, 58 { 8, 0x44, "RxOctets" }, 59 { 4, 0x4c, "RxUndersizePkts" }, 60 { 4, 0x50, "RxPausePkts" }, 61 { 4, 0x54, "Pkts64Octets" }, 62 { 4, 0x58, "Pkts65to127Octets" }, 63 { 4, 0x5c, "Pkts128to255Octets" }, 64 { 4, 0x60, "Pkts256to511Octets" }, 65 { 4, 0x64, "Pkts512to1023Octets" }, 66 { 4, 0x68, "Pkts1024to1522Octets" }, 67 { 4, 0x6c, "RxOversizePkts" }, 68 { 4, 0x70, "RxJabbers" }, 69 { 4, 0x74, "RxAlignmentErrors" }, 70 { 4, 0x78, "RxFCSErrors" }, 71 { 8, 0x7c, "RxGoodOctets" }, 72 { 4, 0x84, "RxDropPkts" }, 73 { 4, 0x88, "RxUnicastPkts" }, 74 { 4, 0x8c, "RxMulticastPkts" }, 75 { 4, 0x90, "RxBroadcastPkts" }, 76 { 4, 0x94, "RxSAChanges" }, 77 { 4, 0x98, "RxFragments" }, 78 }; 79 80 #define B53_MIBS_65_SIZE ARRAY_SIZE(b53_mibs_65) 81 82 /* BCM63xx MIB counters */ 83 static const struct b53_mib_desc b53_mibs_63xx[] = { 84 { 8, 0x00, "TxOctets" }, 85 { 4, 0x08, "TxDropPkts" }, 86 { 4, 0x0c, "TxQoSPkts" }, 87 { 4, 0x10, "TxBroadcastPkts" }, 88 { 4, 0x14, "TxMulticastPkts" }, 89 { 4, 0x18, "TxUnicastPkts" }, 90 { 4, 0x1c, "TxCollisions" }, 91 { 4, 0x20, "TxSingleCollision" }, 92 { 4, 0x24, "TxMultipleCollision" }, 93 { 4, 0x28, "TxDeferredTransmit" }, 94 { 4, 0x2c, "TxLateCollision" }, 95 { 4, 0x30, "TxExcessiveCollision" }, 96 { 4, 0x38, "TxPausePkts" }, 97 { 8, 0x3c, "TxQoSOctets" }, 98 { 8, 0x44, "RxOctets" }, 99 { 4, 0x4c, "RxUndersizePkts" }, 100 { 4, 0x50, "RxPausePkts" }, 101 { 4, 0x54, "Pkts64Octets" }, 102 { 4, 0x58, "Pkts65to127Octets" }, 103 { 4, 0x5c, "Pkts128to255Octets" }, 104 { 4, 0x60, "Pkts256to511Octets" }, 105 { 4, 0x64, "Pkts512to1023Octets" }, 106 { 4, 0x68, "Pkts1024to1522Octets" }, 107 { 4, 0x6c, "RxOversizePkts" }, 108 { 4, 0x70, "RxJabbers" }, 109 { 4, 0x74, "RxAlignmentErrors" }, 110 { 4, 0x78, "RxFCSErrors" }, 111 { 8, 0x7c, "RxGoodOctets" }, 112 { 4, 0x84, "RxDropPkts" }, 113 { 4, 0x88, "RxUnicastPkts" }, 114 { 4, 0x8c, "RxMulticastPkts" }, 115 { 4, 0x90, "RxBroadcastPkts" }, 116 { 4, 0x94, "RxSAChanges" }, 117 { 4, 0x98, "RxFragments" }, 118 { 4, 0xa0, "RxSymbolErrors" }, 119 { 4, 0xa4, "RxQoSPkts" }, 120 { 8, 0xa8, "RxQoSOctets" }, 121 { 4, 0xb0, "Pkts1523to2047Octets" }, 122 { 4, 0xb4, "Pkts2048to4095Octets" }, 123 { 4, 0xb8, "Pkts4096to8191Octets" }, 124 { 4, 0xbc, "Pkts8192to9728Octets" }, 125 { 4, 0xc0, "RxDiscarded" }, 126 }; 127 128 #define B53_MIBS_63XX_SIZE ARRAY_SIZE(b53_mibs_63xx) 129 130 /* MIB counters */ 131 static const struct b53_mib_desc b53_mibs[] = { 132 { 8, 0x00, "TxOctets" }, 133 { 4, 0x08, "TxDropPkts" }, 134 { 4, 0x10, "TxBroadcastPkts" }, 135 { 4, 0x14, "TxMulticastPkts" }, 136 { 4, 0x18, "TxUnicastPkts" }, 137 { 4, 0x1c, "TxCollisions" }, 138 { 4, 0x20, "TxSingleCollision" }, 139 { 4, 0x24, "TxMultipleCollision" }, 140 { 4, 0x28, "TxDeferredTransmit" }, 141 { 4, 0x2c, "TxLateCollision" }, 142 { 4, 0x30, "TxExcessiveCollision" }, 143 { 4, 0x38, "TxPausePkts" }, 144 { 8, 0x50, "RxOctets" }, 145 { 4, 0x58, "RxUndersizePkts" }, 146 { 4, 0x5c, "RxPausePkts" }, 147 { 4, 0x60, "Pkts64Octets" }, 148 { 4, 0x64, "Pkts65to127Octets" }, 149 { 4, 0x68, "Pkts128to255Octets" }, 150 { 4, 0x6c, "Pkts256to511Octets" }, 151 { 4, 0x70, "Pkts512to1023Octets" }, 152 { 4, 0x74, "Pkts1024to1522Octets" }, 153 { 4, 0x78, "RxOversizePkts" }, 154 { 4, 0x7c, "RxJabbers" }, 155 { 4, 0x80, "RxAlignmentErrors" }, 156 { 4, 0x84, "RxFCSErrors" }, 157 { 8, 0x88, "RxGoodOctets" }, 158 { 4, 0x90, "RxDropPkts" }, 159 { 4, 0x94, "RxUnicastPkts" }, 160 { 4, 0x98, "RxMulticastPkts" }, 161 { 4, 0x9c, "RxBroadcastPkts" }, 162 { 4, 0xa0, "RxSAChanges" }, 163 { 4, 0xa4, "RxFragments" }, 164 { 4, 0xa8, "RxJumboPkts" }, 165 { 4, 0xac, "RxSymbolErrors" }, 166 { 4, 0xc0, "RxDiscarded" }, 167 }; 168 169 #define B53_MIBS_SIZE ARRAY_SIZE(b53_mibs) 170 171 static const struct b53_mib_desc b53_mibs_58xx[] = { 172 { 8, 0x00, "TxOctets" }, 173 { 4, 0x08, "TxDropPkts" }, 174 { 4, 0x0c, "TxQPKTQ0" }, 175 { 4, 0x10, "TxBroadcastPkts" }, 176 { 4, 0x14, "TxMulticastPkts" }, 177 { 4, 0x18, "TxUnicastPKts" }, 178 { 4, 0x1c, "TxCollisions" }, 179 { 4, 0x20, "TxSingleCollision" }, 180 { 4, 0x24, "TxMultipleCollision" }, 181 { 4, 0x28, "TxDeferredCollision" }, 182 { 4, 0x2c, "TxLateCollision" }, 183 { 4, 0x30, "TxExcessiveCollision" }, 184 { 4, 0x34, "TxFrameInDisc" }, 185 { 4, 0x38, "TxPausePkts" }, 186 { 4, 0x3c, "TxQPKTQ1" }, 187 { 4, 0x40, "TxQPKTQ2" }, 188 { 4, 0x44, "TxQPKTQ3" }, 189 { 4, 0x48, "TxQPKTQ4" }, 190 { 4, 0x4c, "TxQPKTQ5" }, 191 { 8, 0x50, "RxOctets" }, 192 { 4, 0x58, "RxUndersizePkts" }, 193 { 4, 0x5c, "RxPausePkts" }, 194 { 4, 0x60, "RxPkts64Octets" }, 195 { 4, 0x64, "RxPkts65to127Octets" }, 196 { 4, 0x68, "RxPkts128to255Octets" }, 197 { 4, 0x6c, "RxPkts256to511Octets" }, 198 { 4, 0x70, "RxPkts512to1023Octets" }, 199 { 4, 0x74, "RxPkts1024toMaxPktsOctets" }, 200 { 4, 0x78, "RxOversizePkts" }, 201 { 4, 0x7c, "RxJabbers" }, 202 { 4, 0x80, "RxAlignmentErrors" }, 203 { 4, 0x84, "RxFCSErrors" }, 204 { 8, 0x88, "RxGoodOctets" }, 205 { 4, 0x90, "RxDropPkts" }, 206 { 4, 0x94, "RxUnicastPkts" }, 207 { 4, 0x98, "RxMulticastPkts" }, 208 { 4, 0x9c, "RxBroadcastPkts" }, 209 { 4, 0xa0, "RxSAChanges" }, 210 { 4, 0xa4, "RxFragments" }, 211 { 4, 0xa8, "RxJumboPkt" }, 212 { 4, 0xac, "RxSymblErr" }, 213 { 4, 0xb0, "InRangeErrCount" }, 214 { 4, 0xb4, "OutRangeErrCount" }, 215 { 4, 0xb8, "EEELpiEvent" }, 216 { 4, 0xbc, "EEELpiDuration" }, 217 { 4, 0xc0, "RxDiscard" }, 218 { 4, 0xc8, "TxQPKTQ6" }, 219 { 4, 0xcc, "TxQPKTQ7" }, 220 { 4, 0xd0, "TxPkts64Octets" }, 221 { 4, 0xd4, "TxPkts65to127Octets" }, 222 { 4, 0xd8, "TxPkts128to255Octets" }, 223 { 4, 0xdc, "TxPkts256to511Ocets" }, 224 { 4, 0xe0, "TxPkts512to1023Ocets" }, 225 { 4, 0xe4, "TxPkts1024toMaxPktOcets" }, 226 }; 227 228 #define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx) 229 230 #define B53_MAX_MTU_25 (1536 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) 231 #define B53_MAX_MTU (9720 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) 232 233 static int b53_do_vlan_op(struct b53_device *dev, u8 op) 234 { 235 unsigned int i; 236 237 b53_write8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], VTA_START_CMD | op); 238 239 for (i = 0; i < 10; i++) { 240 u8 vta; 241 242 b53_read8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], &vta); 243 if (!(vta & VTA_START_CMD)) 244 return 0; 245 246 usleep_range(100, 200); 247 } 248 249 return -EIO; 250 } 251 252 static void b53_set_vlan_entry(struct b53_device *dev, u16 vid, 253 struct b53_vlan *vlan) 254 { 255 if (is5325(dev)) { 256 u32 entry = 0; 257 258 if (vlan->members) { 259 entry = ((vlan->untag & VA_UNTAG_MASK_25) << 260 VA_UNTAG_S_25) | vlan->members; 261 if (dev->core_rev >= 3) 262 entry |= VA_VALID_25_R4 | vid << VA_VID_HIGH_S; 263 else 264 entry |= VA_VALID_25; 265 } 266 267 b53_write32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, entry); 268 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid | 269 VTA_RW_STATE_WR | VTA_RW_OP_EN); 270 } else if (is5365(dev)) { 271 u16 entry = 0; 272 273 if (vlan->members) 274 entry = ((vlan->untag & VA_UNTAG_MASK_65) << 275 VA_UNTAG_S_65) | vlan->members | VA_VALID_65; 276 277 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, entry); 278 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid | 279 VTA_RW_STATE_WR | VTA_RW_OP_EN); 280 } else { 281 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid); 282 b53_write32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], 283 (vlan->untag << VTE_UNTAG_S) | vlan->members); 284 285 b53_do_vlan_op(dev, VTA_CMD_WRITE); 286 } 287 288 dev_dbg(dev->ds->dev, "VID: %d, members: 0x%04x, untag: 0x%04x\n", 289 vid, vlan->members, vlan->untag); 290 } 291 292 static void b53_get_vlan_entry(struct b53_device *dev, u16 vid, 293 struct b53_vlan *vlan) 294 { 295 if (is5325(dev)) { 296 u32 entry = 0; 297 298 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid | 299 VTA_RW_STATE_RD | VTA_RW_OP_EN); 300 b53_read32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, &entry); 301 302 if (dev->core_rev >= 3) 303 vlan->valid = !!(entry & VA_VALID_25_R4); 304 else 305 vlan->valid = !!(entry & VA_VALID_25); 306 vlan->members = entry & VA_MEMBER_MASK; 307 vlan->untag = (entry >> VA_UNTAG_S_25) & VA_UNTAG_MASK_25; 308 309 } else if (is5365(dev)) { 310 u16 entry = 0; 311 312 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid | 313 VTA_RW_STATE_WR | VTA_RW_OP_EN); 314 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, &entry); 315 316 vlan->valid = !!(entry & VA_VALID_65); 317 vlan->members = entry & VA_MEMBER_MASK; 318 vlan->untag = (entry >> VA_UNTAG_S_65) & VA_UNTAG_MASK_65; 319 } else { 320 u32 entry = 0; 321 322 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid); 323 b53_do_vlan_op(dev, VTA_CMD_READ); 324 b53_read32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], &entry); 325 vlan->members = entry & VTE_MEMBERS; 326 vlan->untag = (entry >> VTE_UNTAG_S) & VTE_MEMBERS; 327 vlan->valid = true; 328 } 329 } 330 331 static void b53_set_eap_mode(struct b53_device *dev, int port, int mode) 332 { 333 u64 eap_conf; 334 335 if (is5325(dev) || is5365(dev) || dev->chip_id == BCM5389_DEVICE_ID) 336 return; 337 338 b53_read64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), &eap_conf); 339 340 if (is63xx(dev)) { 341 eap_conf &= ~EAP_MODE_MASK_63XX; 342 eap_conf |= (u64)mode << EAP_MODE_SHIFT_63XX; 343 } else { 344 eap_conf &= ~EAP_MODE_MASK; 345 eap_conf |= (u64)mode << EAP_MODE_SHIFT; 346 } 347 348 b53_write64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), eap_conf); 349 } 350 351 static void b53_set_forwarding(struct b53_device *dev, int enable) 352 { 353 u8 mgmt; 354 355 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 356 357 if (enable) 358 mgmt |= SM_SW_FWD_EN; 359 else 360 mgmt &= ~SM_SW_FWD_EN; 361 362 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 363 364 /* Include IMP port in dumb forwarding mode 365 */ 366 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt); 367 mgmt |= B53_MII_DUMB_FWDG_EN; 368 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); 369 370 /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether 371 * frames should be flooded or not. 372 */ 373 b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); 374 mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN; 375 b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); 376 } 377 378 static void b53_enable_vlan(struct b53_device *dev, int port, bool enable, 379 bool enable_filtering) 380 { 381 u8 mgmt, vc0, vc1, vc4 = 0, vc5; 382 383 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 384 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, &vc0); 385 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, &vc1); 386 387 if (is5325(dev) || is5365(dev)) { 388 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4); 389 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, &vc5); 390 } else if (is63xx(dev)) { 391 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, &vc4); 392 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, &vc5); 393 } else { 394 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, &vc4); 395 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5); 396 } 397 398 vc1 &= ~VC1_RX_MCST_FWD_EN; 399 400 if (enable) { 401 vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; 402 vc1 |= VC1_RX_MCST_UNTAG_EN; 403 vc4 &= ~VC4_ING_VID_CHECK_MASK; 404 if (enable_filtering) { 405 vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; 406 vc5 |= VC5_DROP_VTABLE_MISS; 407 } else { 408 vc4 |= VC4_NO_ING_VID_CHK << VC4_ING_VID_CHECK_S; 409 vc5 &= ~VC5_DROP_VTABLE_MISS; 410 } 411 412 if (is5325(dev)) 413 vc0 &= ~VC0_RESERVED_1; 414 415 if (is5325(dev) || is5365(dev)) 416 vc1 |= VC1_RX_MCST_TAG_EN; 417 418 } else { 419 vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID); 420 vc1 &= ~VC1_RX_MCST_UNTAG_EN; 421 vc4 &= ~VC4_ING_VID_CHECK_MASK; 422 vc5 &= ~VC5_DROP_VTABLE_MISS; 423 424 if (is5325(dev) || is5365(dev)) 425 vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S; 426 else 427 vc4 |= VC4_ING_VID_VIO_TO_IMP << VC4_ING_VID_CHECK_S; 428 429 if (is5325(dev) || is5365(dev)) 430 vc1 &= ~VC1_RX_MCST_TAG_EN; 431 } 432 433 if (!is5325(dev) && !is5365(dev)) 434 vc5 &= ~VC5_VID_FFF_EN; 435 436 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, vc0); 437 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, vc1); 438 439 if (is5325(dev) || is5365(dev)) { 440 /* enable the high 8 bit vid check on 5325 */ 441 if (is5325(dev) && enable) 442 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 443 VC3_HIGH_8BIT_EN); 444 else 445 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0); 446 447 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, vc4); 448 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, vc5); 449 } else if (is63xx(dev)) { 450 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3_63XX, 0); 451 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, vc4); 452 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, vc5); 453 } else { 454 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0); 455 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, vc4); 456 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, vc5); 457 } 458 459 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 460 461 dev->vlan_enabled = enable; 462 463 dev_dbg(dev->dev, "Port %d VLAN enabled: %d, filtering: %d\n", 464 port, enable, enable_filtering); 465 } 466 467 static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) 468 { 469 u32 port_mask = 0; 470 u16 max_size = JMS_MIN_SIZE; 471 472 if (is5325(dev) || is5365(dev)) 473 return -EINVAL; 474 475 if (enable) { 476 port_mask = dev->enabled_ports; 477 max_size = JMS_MAX_SIZE; 478 if (allow_10_100) 479 port_mask |= JPM_10_100_JUMBO_EN; 480 } 481 482 b53_write32(dev, B53_JUMBO_PAGE, dev->jumbo_pm_reg, port_mask); 483 return b53_write16(dev, B53_JUMBO_PAGE, dev->jumbo_size_reg, max_size); 484 } 485 486 static int b53_flush_arl(struct b53_device *dev, u8 mask) 487 { 488 unsigned int i; 489 490 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, 491 FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask); 492 493 for (i = 0; i < 10; i++) { 494 u8 fast_age_ctrl; 495 496 b53_read8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, 497 &fast_age_ctrl); 498 499 if (!(fast_age_ctrl & FAST_AGE_DONE)) 500 goto out; 501 502 msleep(1); 503 } 504 505 return -ETIMEDOUT; 506 out: 507 /* Only age dynamic entries (default behavior) */ 508 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, FAST_AGE_DYNAMIC); 509 return 0; 510 } 511 512 static int b53_fast_age_port(struct b53_device *dev, int port) 513 { 514 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port); 515 516 return b53_flush_arl(dev, FAST_AGE_PORT); 517 } 518 519 static int b53_fast_age_vlan(struct b53_device *dev, u16 vid) 520 { 521 b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid); 522 523 return b53_flush_arl(dev, FAST_AGE_VLAN); 524 } 525 526 void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) 527 { 528 struct b53_device *dev = ds->priv; 529 unsigned int i; 530 u16 pvlan; 531 532 /* Enable the IMP port to be in the same VLAN as the other ports 533 * on a per-port basis such that we only have Port i and IMP in 534 * the same VLAN. 535 */ 536 b53_for_each_port(dev, i) { 537 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &pvlan); 538 pvlan |= BIT(cpu_port); 539 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan); 540 } 541 } 542 EXPORT_SYMBOL(b53_imp_vlan_setup); 543 544 static void b53_port_set_ucast_flood(struct b53_device *dev, int port, 545 bool unicast) 546 { 547 u16 uc; 548 549 b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc); 550 if (unicast) 551 uc |= BIT(port); 552 else 553 uc &= ~BIT(port); 554 b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc); 555 } 556 557 static void b53_port_set_mcast_flood(struct b53_device *dev, int port, 558 bool multicast) 559 { 560 u16 mc; 561 562 b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc); 563 if (multicast) 564 mc |= BIT(port); 565 else 566 mc &= ~BIT(port); 567 b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc); 568 569 b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc); 570 if (multicast) 571 mc |= BIT(port); 572 else 573 mc &= ~BIT(port); 574 b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc); 575 } 576 577 static void b53_port_set_learning(struct b53_device *dev, int port, 578 bool learning) 579 { 580 u16 reg; 581 582 b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, ®); 583 if (learning) 584 reg &= ~BIT(port); 585 else 586 reg |= BIT(port); 587 b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg); 588 } 589 590 static void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable) 591 { 592 struct b53_device *dev = ds->priv; 593 u16 reg; 594 595 b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, ®); 596 if (enable) 597 reg |= BIT(port); 598 else 599 reg &= ~BIT(port); 600 b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg); 601 } 602 603 int b53_setup_port(struct dsa_switch *ds, int port) 604 { 605 struct b53_device *dev = ds->priv; 606 607 b53_port_set_ucast_flood(dev, port, true); 608 b53_port_set_mcast_flood(dev, port, true); 609 b53_port_set_learning(dev, port, false); 610 611 /* Force all traffic to go to the CPU port to prevent the ASIC from 612 * trying to forward to bridged ports on matching FDB entries, then 613 * dropping frames because it isn't allowed to forward there. 614 */ 615 if (dsa_is_user_port(ds, port)) 616 b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED); 617 618 return 0; 619 } 620 EXPORT_SYMBOL(b53_setup_port); 621 622 int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) 623 { 624 struct b53_device *dev = ds->priv; 625 unsigned int cpu_port; 626 int ret = 0; 627 u16 pvlan; 628 629 if (!dsa_is_user_port(ds, port)) 630 return 0; 631 632 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 633 634 if (dev->ops->irq_enable) 635 ret = dev->ops->irq_enable(dev, port); 636 if (ret) 637 return ret; 638 639 /* Clear the Rx and Tx disable bits and set to no spanning tree */ 640 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0); 641 642 /* Set this port, and only this one to be in the default VLAN, 643 * if member of a bridge, restore its membership prior to 644 * bringing down this port. 645 */ 646 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 647 pvlan &= ~0x1ff; 648 pvlan |= BIT(port); 649 pvlan |= dev->ports[port].vlan_ctl_mask; 650 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 651 652 b53_imp_vlan_setup(ds, cpu_port); 653 654 /* If EEE was enabled, restore it */ 655 if (dev->ports[port].eee.eee_enabled) 656 b53_eee_enable_set(ds, port, true); 657 658 return 0; 659 } 660 EXPORT_SYMBOL(b53_enable_port); 661 662 void b53_disable_port(struct dsa_switch *ds, int port) 663 { 664 struct b53_device *dev = ds->priv; 665 u8 reg; 666 667 /* Disable Tx/Rx for the port */ 668 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); 669 reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE; 670 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); 671 672 if (dev->ops->irq_disable) 673 dev->ops->irq_disable(dev, port); 674 } 675 EXPORT_SYMBOL(b53_disable_port); 676 677 void b53_brcm_hdr_setup(struct dsa_switch *ds, int port) 678 { 679 struct b53_device *dev = ds->priv; 680 bool tag_en = !(dev->tag_protocol == DSA_TAG_PROTO_NONE); 681 u8 hdr_ctl, val; 682 u16 reg; 683 684 /* Resolve which bit controls the Broadcom tag */ 685 switch (port) { 686 case 8: 687 val = BRCM_HDR_P8_EN; 688 break; 689 case 7: 690 val = BRCM_HDR_P7_EN; 691 break; 692 case 5: 693 val = BRCM_HDR_P5_EN; 694 break; 695 default: 696 val = 0; 697 break; 698 } 699 700 /* Enable management mode if tagging is requested */ 701 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &hdr_ctl); 702 if (tag_en) 703 hdr_ctl |= SM_SW_FWD_MODE; 704 else 705 hdr_ctl &= ~SM_SW_FWD_MODE; 706 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, hdr_ctl); 707 708 /* Configure the appropriate IMP port */ 709 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &hdr_ctl); 710 if (port == 8) 711 hdr_ctl |= GC_FRM_MGMT_PORT_MII; 712 else if (port == 5) 713 hdr_ctl |= GC_FRM_MGMT_PORT_M; 714 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, hdr_ctl); 715 716 /* Enable Broadcom tags for IMP port */ 717 b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl); 718 if (tag_en) 719 hdr_ctl |= val; 720 else 721 hdr_ctl &= ~val; 722 b53_write8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, hdr_ctl); 723 724 /* Registers below are only accessible on newer devices */ 725 if (!is58xx(dev)) 726 return; 727 728 /* Enable reception Broadcom tag for CPU TX (switch RX) to 729 * allow us to tag outgoing frames 730 */ 731 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, ®); 732 if (tag_en) 733 reg &= ~BIT(port); 734 else 735 reg |= BIT(port); 736 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, reg); 737 738 /* Enable transmission of Broadcom tags from the switch (CPU RX) to 739 * allow delivering frames to the per-port net_devices 740 */ 741 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, ®); 742 if (tag_en) 743 reg &= ~BIT(port); 744 else 745 reg |= BIT(port); 746 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, reg); 747 } 748 EXPORT_SYMBOL(b53_brcm_hdr_setup); 749 750 static void b53_enable_cpu_port(struct b53_device *dev, int port) 751 { 752 u8 port_ctrl; 753 754 /* BCM5325 CPU port is at 8 */ 755 if ((is5325(dev) || is5365(dev)) && port == B53_CPU_PORT_25) 756 port = B53_CPU_PORT; 757 758 port_ctrl = PORT_CTRL_RX_BCST_EN | 759 PORT_CTRL_RX_MCST_EN | 760 PORT_CTRL_RX_UCST_EN; 761 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl); 762 763 b53_brcm_hdr_setup(dev->ds, port); 764 } 765 766 static void b53_enable_mib(struct b53_device *dev) 767 { 768 u8 gc; 769 770 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); 771 gc &= ~(GC_RESET_MIB | GC_MIB_AC_EN); 772 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); 773 } 774 775 static void b53_enable_stp(struct b53_device *dev) 776 { 777 u8 gc; 778 779 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); 780 gc |= GC_RX_BPDU_EN; 781 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); 782 } 783 784 static u16 b53_default_pvid(struct b53_device *dev) 785 { 786 if (is5325(dev) || is5365(dev)) 787 return 1; 788 else 789 return 0; 790 } 791 792 static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port) 793 { 794 struct b53_device *dev = ds->priv; 795 796 return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port); 797 } 798 799 static bool b53_vlan_port_may_join_untagged(struct dsa_switch *ds, int port) 800 { 801 struct b53_device *dev = ds->priv; 802 struct dsa_port *dp; 803 804 if (!dev->vlan_filtering) 805 return true; 806 807 dp = dsa_to_port(ds, port); 808 809 if (dsa_port_is_cpu(dp)) 810 return true; 811 812 return dp->bridge == NULL; 813 } 814 815 int b53_configure_vlan(struct dsa_switch *ds) 816 { 817 struct b53_device *dev = ds->priv; 818 struct b53_vlan vl = { 0 }; 819 struct b53_vlan *v; 820 int i, def_vid; 821 u16 vid; 822 823 def_vid = b53_default_pvid(dev); 824 825 /* clear all vlan entries */ 826 if (is5325(dev) || is5365(dev)) { 827 for (i = def_vid; i < dev->num_vlans; i++) 828 b53_set_vlan_entry(dev, i, &vl); 829 } else { 830 b53_do_vlan_op(dev, VTA_CMD_CLEAR); 831 } 832 833 b53_enable_vlan(dev, -1, dev->vlan_enabled, dev->vlan_filtering); 834 835 /* Create an untagged VLAN entry for the default PVID in case 836 * CONFIG_VLAN_8021Q is disabled and there are no calls to 837 * dsa_user_vlan_rx_add_vid() to create the default VLAN 838 * entry. Do this only when the tagging protocol is not 839 * DSA_TAG_PROTO_NONE 840 */ 841 v = &dev->vlans[def_vid]; 842 b53_for_each_port(dev, i) { 843 if (!b53_vlan_port_may_join_untagged(ds, i)) 844 continue; 845 846 vl.members |= BIT(i); 847 if (!b53_vlan_port_needs_forced_tagged(ds, i)) 848 vl.untag = vl.members; 849 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(i), 850 def_vid); 851 } 852 b53_set_vlan_entry(dev, def_vid, &vl); 853 854 if (dev->vlan_filtering) { 855 /* Upon initial call we have not set-up any VLANs, but upon 856 * system resume, we need to restore all VLAN entries. 857 */ 858 for (vid = def_vid + 1; vid < dev->num_vlans; vid++) { 859 v = &dev->vlans[vid]; 860 861 if (!v->members) 862 continue; 863 864 b53_set_vlan_entry(dev, vid, v); 865 b53_fast_age_vlan(dev, vid); 866 } 867 868 b53_for_each_port(dev, i) { 869 if (!dsa_is_cpu_port(ds, i)) 870 b53_write16(dev, B53_VLAN_PAGE, 871 B53_VLAN_PORT_DEF_TAG(i), 872 dev->ports[i].pvid); 873 } 874 } 875 876 return 0; 877 } 878 EXPORT_SYMBOL(b53_configure_vlan); 879 880 static void b53_switch_reset_gpio(struct b53_device *dev) 881 { 882 int gpio = dev->reset_gpio; 883 884 if (gpio < 0) 885 return; 886 887 /* Reset sequence: RESET low(50ms)->high(20ms) 888 */ 889 gpio_set_value(gpio, 0); 890 mdelay(50); 891 892 gpio_set_value(gpio, 1); 893 mdelay(20); 894 895 dev->current_page = 0xff; 896 } 897 898 static int b53_switch_reset(struct b53_device *dev) 899 { 900 unsigned int timeout = 1000; 901 u8 mgmt, reg; 902 903 b53_switch_reset_gpio(dev); 904 905 if (is539x(dev)) { 906 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83); 907 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00); 908 } 909 910 /* This is specific to 58xx devices here, do not use is58xx() which 911 * covers the larger Starfigther 2 family, including 7445/7278 which 912 * still use this driver as a library and need to perform the reset 913 * earlier. 914 */ 915 if (dev->chip_id == BCM58XX_DEVICE_ID || 916 dev->chip_id == BCM583XX_DEVICE_ID) { 917 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®); 918 reg |= SW_RST | EN_SW_RST | EN_CH_RST; 919 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg); 920 921 do { 922 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®); 923 if (!(reg & SW_RST)) 924 break; 925 926 usleep_range(1000, 2000); 927 } while (timeout-- > 0); 928 929 if (timeout == 0) { 930 dev_err(dev->dev, 931 "Timeout waiting for SW_RST to clear!\n"); 932 return -ETIMEDOUT; 933 } 934 } 935 936 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 937 938 if (!(mgmt & SM_SW_FWD_EN)) { 939 mgmt &= ~SM_SW_FWD_MODE; 940 mgmt |= SM_SW_FWD_EN; 941 942 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 943 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 944 945 if (!(mgmt & SM_SW_FWD_EN)) { 946 dev_err(dev->dev, "Failed to enable switch!\n"); 947 return -EINVAL; 948 } 949 } 950 951 b53_enable_mib(dev); 952 b53_enable_stp(dev); 953 954 return b53_flush_arl(dev, FAST_AGE_STATIC); 955 } 956 957 static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg) 958 { 959 struct b53_device *priv = ds->priv; 960 u16 value = 0; 961 int ret; 962 963 if (priv->ops->phy_read16) 964 ret = priv->ops->phy_read16(priv, addr, reg, &value); 965 else 966 ret = b53_read16(priv, B53_PORT_MII_PAGE(addr), 967 reg * 2, &value); 968 969 return ret ? ret : value; 970 } 971 972 static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val) 973 { 974 struct b53_device *priv = ds->priv; 975 976 if (priv->ops->phy_write16) 977 return priv->ops->phy_write16(priv, addr, reg, val); 978 979 return b53_write16(priv, B53_PORT_MII_PAGE(addr), reg * 2, val); 980 } 981 982 static int b53_reset_switch(struct b53_device *priv) 983 { 984 /* reset vlans */ 985 memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans); 986 memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports); 987 988 priv->serdes_lane = B53_INVALID_LANE; 989 990 return b53_switch_reset(priv); 991 } 992 993 static int b53_apply_config(struct b53_device *priv) 994 { 995 /* disable switching */ 996 b53_set_forwarding(priv, 0); 997 998 b53_configure_vlan(priv->ds); 999 1000 /* enable switching */ 1001 b53_set_forwarding(priv, 1); 1002 1003 return 0; 1004 } 1005 1006 static void b53_reset_mib(struct b53_device *priv) 1007 { 1008 u8 gc; 1009 1010 b53_read8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); 1011 1012 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc | GC_RESET_MIB); 1013 msleep(1); 1014 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc & ~GC_RESET_MIB); 1015 msleep(1); 1016 } 1017 1018 static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev) 1019 { 1020 if (is5365(dev)) 1021 return b53_mibs_65; 1022 else if (is63xx(dev)) 1023 return b53_mibs_63xx; 1024 else if (is58xx(dev)) 1025 return b53_mibs_58xx; 1026 else 1027 return b53_mibs; 1028 } 1029 1030 static unsigned int b53_get_mib_size(struct b53_device *dev) 1031 { 1032 if (is5365(dev)) 1033 return B53_MIBS_65_SIZE; 1034 else if (is63xx(dev)) 1035 return B53_MIBS_63XX_SIZE; 1036 else if (is58xx(dev)) 1037 return B53_MIBS_58XX_SIZE; 1038 else 1039 return B53_MIBS_SIZE; 1040 } 1041 1042 static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port) 1043 { 1044 /* These ports typically do not have built-in PHYs */ 1045 switch (port) { 1046 case B53_CPU_PORT_25: 1047 case 7: 1048 case B53_CPU_PORT: 1049 return NULL; 1050 } 1051 1052 return mdiobus_get_phy(ds->user_mii_bus, port); 1053 } 1054 1055 void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset, 1056 uint8_t *data) 1057 { 1058 struct b53_device *dev = ds->priv; 1059 const struct b53_mib_desc *mibs = b53_get_mib(dev); 1060 unsigned int mib_size = b53_get_mib_size(dev); 1061 struct phy_device *phydev; 1062 unsigned int i; 1063 1064 if (stringset == ETH_SS_STATS) { 1065 for (i = 0; i < mib_size; i++) 1066 ethtool_puts(&data, mibs[i].name); 1067 } else if (stringset == ETH_SS_PHY_STATS) { 1068 phydev = b53_get_phy_device(ds, port); 1069 if (!phydev) 1070 return; 1071 1072 phy_ethtool_get_strings(phydev, data); 1073 } 1074 } 1075 EXPORT_SYMBOL(b53_get_strings); 1076 1077 void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) 1078 { 1079 struct b53_device *dev = ds->priv; 1080 const struct b53_mib_desc *mibs = b53_get_mib(dev); 1081 unsigned int mib_size = b53_get_mib_size(dev); 1082 const struct b53_mib_desc *s; 1083 unsigned int i; 1084 u64 val = 0; 1085 1086 if (is5365(dev) && port == 5) 1087 port = 8; 1088 1089 mutex_lock(&dev->stats_mutex); 1090 1091 for (i = 0; i < mib_size; i++) { 1092 s = &mibs[i]; 1093 1094 if (s->size == 8) { 1095 b53_read64(dev, B53_MIB_PAGE(port), s->offset, &val); 1096 } else { 1097 u32 val32; 1098 1099 b53_read32(dev, B53_MIB_PAGE(port), s->offset, 1100 &val32); 1101 val = val32; 1102 } 1103 data[i] = (u64)val; 1104 } 1105 1106 mutex_unlock(&dev->stats_mutex); 1107 } 1108 EXPORT_SYMBOL(b53_get_ethtool_stats); 1109 1110 void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data) 1111 { 1112 struct phy_device *phydev; 1113 1114 phydev = b53_get_phy_device(ds, port); 1115 if (!phydev) 1116 return; 1117 1118 phy_ethtool_get_stats(phydev, NULL, data); 1119 } 1120 EXPORT_SYMBOL(b53_get_ethtool_phy_stats); 1121 1122 int b53_get_sset_count(struct dsa_switch *ds, int port, int sset) 1123 { 1124 struct b53_device *dev = ds->priv; 1125 struct phy_device *phydev; 1126 1127 if (sset == ETH_SS_STATS) { 1128 return b53_get_mib_size(dev); 1129 } else if (sset == ETH_SS_PHY_STATS) { 1130 phydev = b53_get_phy_device(ds, port); 1131 if (!phydev) 1132 return 0; 1133 1134 return phy_ethtool_get_sset_count(phydev); 1135 } 1136 1137 return 0; 1138 } 1139 EXPORT_SYMBOL(b53_get_sset_count); 1140 1141 enum b53_devlink_resource_id { 1142 B53_DEVLINK_PARAM_ID_VLAN_TABLE, 1143 }; 1144 1145 static u64 b53_devlink_vlan_table_get(void *priv) 1146 { 1147 struct b53_device *dev = priv; 1148 struct b53_vlan *vl; 1149 unsigned int i; 1150 u64 count = 0; 1151 1152 for (i = 0; i < dev->num_vlans; i++) { 1153 vl = &dev->vlans[i]; 1154 if (vl->members) 1155 count++; 1156 } 1157 1158 return count; 1159 } 1160 1161 int b53_setup_devlink_resources(struct dsa_switch *ds) 1162 { 1163 struct devlink_resource_size_params size_params; 1164 struct b53_device *dev = ds->priv; 1165 int err; 1166 1167 devlink_resource_size_params_init(&size_params, dev->num_vlans, 1168 dev->num_vlans, 1169 1, DEVLINK_RESOURCE_UNIT_ENTRY); 1170 1171 err = dsa_devlink_resource_register(ds, "VLAN", dev->num_vlans, 1172 B53_DEVLINK_PARAM_ID_VLAN_TABLE, 1173 DEVLINK_RESOURCE_ID_PARENT_TOP, 1174 &size_params); 1175 if (err) 1176 goto out; 1177 1178 dsa_devlink_resource_occ_get_register(ds, 1179 B53_DEVLINK_PARAM_ID_VLAN_TABLE, 1180 b53_devlink_vlan_table_get, dev); 1181 1182 return 0; 1183 out: 1184 dsa_devlink_resources_unregister(ds); 1185 return err; 1186 } 1187 EXPORT_SYMBOL(b53_setup_devlink_resources); 1188 1189 static int b53_setup(struct dsa_switch *ds) 1190 { 1191 struct b53_device *dev = ds->priv; 1192 struct b53_vlan *vl; 1193 unsigned int port; 1194 u16 pvid; 1195 int ret; 1196 1197 /* Request bridge PVID untagged when DSA_TAG_PROTO_NONE is set 1198 * which forces the CPU port to be tagged in all VLANs. 1199 */ 1200 ds->untag_bridge_pvid = dev->tag_protocol == DSA_TAG_PROTO_NONE; 1201 1202 /* The switch does not tell us the original VLAN for untagged 1203 * packets, so keep the CPU port always tagged. 1204 */ 1205 ds->untag_vlan_aware_bridge_pvid = true; 1206 1207 /* Ageing time is set in seconds */ 1208 ds->ageing_time_min = 1 * 1000; 1209 ds->ageing_time_max = AGE_TIME_MAX * 1000; 1210 1211 ret = b53_reset_switch(dev); 1212 if (ret) { 1213 dev_err(ds->dev, "failed to reset switch\n"); 1214 return ret; 1215 } 1216 1217 /* setup default vlan for filtering mode */ 1218 pvid = b53_default_pvid(dev); 1219 vl = &dev->vlans[pvid]; 1220 b53_for_each_port(dev, port) { 1221 vl->members |= BIT(port); 1222 if (!b53_vlan_port_needs_forced_tagged(ds, port)) 1223 vl->untag |= BIT(port); 1224 } 1225 1226 b53_reset_mib(dev); 1227 1228 ret = b53_apply_config(dev); 1229 if (ret) { 1230 dev_err(ds->dev, "failed to apply configuration\n"); 1231 return ret; 1232 } 1233 1234 /* Configure IMP/CPU port, disable all other ports. Enabled 1235 * ports will be configured with .port_enable 1236 */ 1237 for (port = 0; port < dev->num_ports; port++) { 1238 if (dsa_is_cpu_port(ds, port)) 1239 b53_enable_cpu_port(dev, port); 1240 else 1241 b53_disable_port(ds, port); 1242 } 1243 1244 return b53_setup_devlink_resources(ds); 1245 } 1246 1247 static void b53_teardown(struct dsa_switch *ds) 1248 { 1249 dsa_devlink_resources_unregister(ds); 1250 } 1251 1252 static void b53_force_link(struct b53_device *dev, int port, int link) 1253 { 1254 u8 reg, val, off; 1255 1256 /* Override the port settings */ 1257 if (port == dev->imp_port) { 1258 off = B53_PORT_OVERRIDE_CTRL; 1259 val = PORT_OVERRIDE_EN; 1260 } else { 1261 off = B53_GMII_PORT_OVERRIDE_CTRL(port); 1262 val = GMII_PO_EN; 1263 } 1264 1265 b53_read8(dev, B53_CTRL_PAGE, off, ®); 1266 reg |= val; 1267 if (link) 1268 reg |= PORT_OVERRIDE_LINK; 1269 else 1270 reg &= ~PORT_OVERRIDE_LINK; 1271 b53_write8(dev, B53_CTRL_PAGE, off, reg); 1272 } 1273 1274 static void b53_force_port_config(struct b53_device *dev, int port, 1275 int speed, int duplex, 1276 bool tx_pause, bool rx_pause) 1277 { 1278 u8 reg, val, off; 1279 1280 /* Override the port settings */ 1281 if (port == dev->imp_port) { 1282 off = B53_PORT_OVERRIDE_CTRL; 1283 val = PORT_OVERRIDE_EN; 1284 } else { 1285 off = B53_GMII_PORT_OVERRIDE_CTRL(port); 1286 val = GMII_PO_EN; 1287 } 1288 1289 b53_read8(dev, B53_CTRL_PAGE, off, ®); 1290 reg |= val; 1291 if (duplex == DUPLEX_FULL) 1292 reg |= PORT_OVERRIDE_FULL_DUPLEX; 1293 else 1294 reg &= ~PORT_OVERRIDE_FULL_DUPLEX; 1295 1296 switch (speed) { 1297 case 2000: 1298 reg |= PORT_OVERRIDE_SPEED_2000M; 1299 fallthrough; 1300 case SPEED_1000: 1301 reg |= PORT_OVERRIDE_SPEED_1000M; 1302 break; 1303 case SPEED_100: 1304 reg |= PORT_OVERRIDE_SPEED_100M; 1305 break; 1306 case SPEED_10: 1307 reg |= PORT_OVERRIDE_SPEED_10M; 1308 break; 1309 default: 1310 dev_err(dev->dev, "unknown speed: %d\n", speed); 1311 return; 1312 } 1313 1314 if (rx_pause) 1315 reg |= PORT_OVERRIDE_RX_FLOW; 1316 if (tx_pause) 1317 reg |= PORT_OVERRIDE_TX_FLOW; 1318 1319 b53_write8(dev, B53_CTRL_PAGE, off, reg); 1320 } 1321 1322 static void b53_adjust_63xx_rgmii(struct dsa_switch *ds, int port, 1323 phy_interface_t interface) 1324 { 1325 struct b53_device *dev = ds->priv; 1326 u8 rgmii_ctrl = 0; 1327 1328 b53_read8(dev, B53_CTRL_PAGE, B53_RGMII_CTRL_P(port), &rgmii_ctrl); 1329 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC); 1330 1331 if (is63268(dev)) 1332 rgmii_ctrl |= RGMII_CTRL_MII_OVERRIDE; 1333 1334 rgmii_ctrl |= RGMII_CTRL_ENABLE_GMII; 1335 1336 b53_write8(dev, B53_CTRL_PAGE, B53_RGMII_CTRL_P(port), rgmii_ctrl); 1337 1338 dev_dbg(ds->dev, "Configured port %d for %s\n", port, 1339 phy_modes(interface)); 1340 } 1341 1342 static void b53_adjust_531x5_rgmii(struct dsa_switch *ds, int port, 1343 phy_interface_t interface) 1344 { 1345 struct b53_device *dev = ds->priv; 1346 u8 rgmii_ctrl = 0, off; 1347 1348 if (port == dev->imp_port) 1349 off = B53_RGMII_CTRL_IMP; 1350 else 1351 off = B53_RGMII_CTRL_P(port); 1352 1353 /* Configure the port RGMII clock delay by DLL disabled and 1354 * tx_clk aligned timing (restoring to reset defaults) 1355 */ 1356 b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl); 1357 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC); 1358 1359 /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make 1360 * sure that we enable the port TX clock internal delay to 1361 * account for this internal delay that is inserted, otherwise 1362 * the switch won't be able to receive correctly. 1363 * 1364 * PHY_INTERFACE_MODE_RGMII means that we are not introducing 1365 * any delay neither on transmission nor reception, so the 1366 * BCM53125 must also be configured accordingly to account for 1367 * the lack of delay and introduce 1368 * 1369 * The BCM53125 switch has its RX clock and TX clock control 1370 * swapped, hence the reason why we modify the TX clock path in 1371 * the "RGMII" case 1372 */ 1373 if (interface == PHY_INTERFACE_MODE_RGMII_TXID) 1374 rgmii_ctrl |= RGMII_CTRL_DLL_TXC; 1375 if (interface == PHY_INTERFACE_MODE_RGMII) 1376 rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC; 1377 1378 if (dev->chip_id != BCM53115_DEVICE_ID) 1379 rgmii_ctrl |= RGMII_CTRL_TIMING_SEL; 1380 1381 b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl); 1382 1383 dev_info(ds->dev, "Configured port %d for %s\n", port, 1384 phy_modes(interface)); 1385 } 1386 1387 static void b53_adjust_5325_mii(struct dsa_switch *ds, int port) 1388 { 1389 struct b53_device *dev = ds->priv; 1390 u8 reg = 0; 1391 1392 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 1393 ®); 1394 1395 /* reverse mii needs to be enabled */ 1396 if (!(reg & PORT_OVERRIDE_RV_MII_25)) { 1397 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 1398 reg | PORT_OVERRIDE_RV_MII_25); 1399 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 1400 ®); 1401 1402 if (!(reg & PORT_OVERRIDE_RV_MII_25)) { 1403 dev_err(ds->dev, 1404 "Failed to enable reverse MII mode\n"); 1405 return; 1406 } 1407 } 1408 } 1409 1410 void b53_port_event(struct dsa_switch *ds, int port) 1411 { 1412 struct b53_device *dev = ds->priv; 1413 bool link; 1414 u16 sts; 1415 1416 b53_read16(dev, B53_STAT_PAGE, B53_LINK_STAT, &sts); 1417 link = !!(sts & BIT(port)); 1418 dsa_port_phylink_mac_change(ds, port, link); 1419 } 1420 EXPORT_SYMBOL(b53_port_event); 1421 1422 static void b53_phylink_get_caps(struct dsa_switch *ds, int port, 1423 struct phylink_config *config) 1424 { 1425 struct b53_device *dev = ds->priv; 1426 1427 /* Internal ports need GMII for PHYLIB */ 1428 __set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces); 1429 1430 /* These switches appear to support MII and RevMII too, but beyond 1431 * this, the code gives very few clues. FIXME: We probably need more 1432 * interface modes here. 1433 * 1434 * According to b53_srab_mux_init(), ports 3..5 can support: 1435 * SGMII, MII, GMII, RGMII or INTERNAL depending on the MUX setting. 1436 * However, the interface mode read from the MUX configuration is 1437 * not passed back to DSA, so phylink uses NA. 1438 * DT can specify RGMII for ports 0, 1. 1439 * For MDIO, port 8 can be RGMII_TXID. 1440 */ 1441 __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces); 1442 __set_bit(PHY_INTERFACE_MODE_REVMII, config->supported_interfaces); 1443 1444 /* BCM63xx RGMII ports support RGMII */ 1445 if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4)) 1446 phy_interface_set_rgmii(config->supported_interfaces); 1447 1448 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | 1449 MAC_10 | MAC_100; 1450 1451 /* 5325/5365 are not capable of gigabit speeds, everything else is. 1452 * Note: the original code also exclulded Gigagbit for MII, RevMII 1453 * and 802.3z modes. MII and RevMII are not able to work above 100M, 1454 * so will be excluded by the generic validator implementation. 1455 * However, the exclusion of Gigabit for 802.3z just seems wrong. 1456 */ 1457 if (!(is5325(dev) || is5365(dev))) 1458 config->mac_capabilities |= MAC_1000; 1459 1460 /* Get the implementation specific capabilities */ 1461 if (dev->ops->phylink_get_caps) 1462 dev->ops->phylink_get_caps(dev, port, config); 1463 } 1464 1465 static struct phylink_pcs *b53_phylink_mac_select_pcs(struct phylink_config *config, 1466 phy_interface_t interface) 1467 { 1468 struct dsa_port *dp = dsa_phylink_to_port(config); 1469 struct b53_device *dev = dp->ds->priv; 1470 1471 if (!dev->ops->phylink_mac_select_pcs) 1472 return NULL; 1473 1474 return dev->ops->phylink_mac_select_pcs(dev, dp->index, interface); 1475 } 1476 1477 static void b53_phylink_mac_config(struct phylink_config *config, 1478 unsigned int mode, 1479 const struct phylink_link_state *state) 1480 { 1481 struct dsa_port *dp = dsa_phylink_to_port(config); 1482 phy_interface_t interface = state->interface; 1483 struct dsa_switch *ds = dp->ds; 1484 struct b53_device *dev = ds->priv; 1485 int port = dp->index; 1486 1487 if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4)) 1488 b53_adjust_63xx_rgmii(ds, port, interface); 1489 1490 if (mode == MLO_AN_FIXED) { 1491 if (is531x5(dev) && phy_interface_mode_is_rgmii(interface)) 1492 b53_adjust_531x5_rgmii(ds, port, interface); 1493 1494 /* configure MII port if necessary */ 1495 if (is5325(dev)) 1496 b53_adjust_5325_mii(ds, port); 1497 } 1498 } 1499 1500 static void b53_phylink_mac_link_down(struct phylink_config *config, 1501 unsigned int mode, 1502 phy_interface_t interface) 1503 { 1504 struct dsa_port *dp = dsa_phylink_to_port(config); 1505 struct b53_device *dev = dp->ds->priv; 1506 int port = dp->index; 1507 1508 if (mode == MLO_AN_PHY) 1509 return; 1510 1511 if (mode == MLO_AN_FIXED) { 1512 b53_force_link(dev, port, false); 1513 return; 1514 } 1515 1516 if (phy_interface_mode_is_8023z(interface) && 1517 dev->ops->serdes_link_set) 1518 dev->ops->serdes_link_set(dev, port, mode, interface, false); 1519 } 1520 1521 static void b53_phylink_mac_link_up(struct phylink_config *config, 1522 struct phy_device *phydev, 1523 unsigned int mode, 1524 phy_interface_t interface, 1525 int speed, int duplex, 1526 bool tx_pause, bool rx_pause) 1527 { 1528 struct dsa_port *dp = dsa_phylink_to_port(config); 1529 struct dsa_switch *ds = dp->ds; 1530 struct b53_device *dev = ds->priv; 1531 struct ethtool_keee *p = &dev->ports[dp->index].eee; 1532 int port = dp->index; 1533 1534 if (mode == MLO_AN_PHY) { 1535 /* Re-negotiate EEE if it was enabled already */ 1536 p->eee_enabled = b53_eee_init(ds, port, phydev); 1537 return; 1538 } 1539 1540 if (mode == MLO_AN_FIXED) { 1541 /* Force flow control on BCM5301x's CPU port */ 1542 if (is5301x(dev) && dsa_is_cpu_port(ds, port)) 1543 tx_pause = rx_pause = true; 1544 1545 b53_force_port_config(dev, port, speed, duplex, 1546 tx_pause, rx_pause); 1547 b53_force_link(dev, port, true); 1548 return; 1549 } 1550 1551 if (phy_interface_mode_is_8023z(interface) && 1552 dev->ops->serdes_link_set) 1553 dev->ops->serdes_link_set(dev, port, mode, interface, true); 1554 } 1555 1556 int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, 1557 struct netlink_ext_ack *extack) 1558 { 1559 struct b53_device *dev = ds->priv; 1560 1561 if (dev->vlan_filtering != vlan_filtering) { 1562 dev->vlan_filtering = vlan_filtering; 1563 b53_apply_config(dev); 1564 } 1565 1566 return 0; 1567 } 1568 EXPORT_SYMBOL(b53_vlan_filtering); 1569 1570 static int b53_vlan_prepare(struct dsa_switch *ds, int port, 1571 const struct switchdev_obj_port_vlan *vlan) 1572 { 1573 struct b53_device *dev = ds->priv; 1574 1575 if ((is5325(dev) || is5365(dev)) && vlan->vid == 0) 1576 return -EOPNOTSUPP; 1577 1578 /* Port 7 on 7278 connects to the ASP's UniMAC which is not capable of 1579 * receiving VLAN tagged frames at all, we can still allow the port to 1580 * be configured for egress untagged. 1581 */ 1582 if (dev->chip_id == BCM7278_DEVICE_ID && port == 7 && 1583 !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) 1584 return -EINVAL; 1585 1586 if (vlan->vid >= dev->num_vlans) 1587 return -ERANGE; 1588 1589 b53_enable_vlan(dev, port, true, dev->vlan_filtering); 1590 1591 return 0; 1592 } 1593 1594 int b53_vlan_add(struct dsa_switch *ds, int port, 1595 const struct switchdev_obj_port_vlan *vlan, 1596 struct netlink_ext_ack *extack) 1597 { 1598 struct b53_device *dev = ds->priv; 1599 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1600 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 1601 struct b53_vlan *vl; 1602 u16 old_pvid, new_pvid; 1603 int err; 1604 1605 err = b53_vlan_prepare(ds, port, vlan); 1606 if (err) 1607 return err; 1608 1609 if (vlan->vid == 0) 1610 return 0; 1611 1612 old_pvid = dev->ports[port].pvid; 1613 if (pvid) 1614 new_pvid = vlan->vid; 1615 else if (!pvid && vlan->vid == old_pvid) 1616 new_pvid = b53_default_pvid(dev); 1617 else 1618 new_pvid = old_pvid; 1619 dev->ports[port].pvid = new_pvid; 1620 1621 vl = &dev->vlans[vlan->vid]; 1622 1623 if (dsa_is_cpu_port(ds, port)) 1624 untagged = false; 1625 1626 vl->members |= BIT(port); 1627 if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port)) 1628 vl->untag |= BIT(port); 1629 else 1630 vl->untag &= ~BIT(port); 1631 1632 if (!dev->vlan_filtering) 1633 return 0; 1634 1635 b53_set_vlan_entry(dev, vlan->vid, vl); 1636 b53_fast_age_vlan(dev, vlan->vid); 1637 1638 if (!dsa_is_cpu_port(ds, port) && new_pvid != old_pvid) { 1639 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), 1640 new_pvid); 1641 b53_fast_age_vlan(dev, old_pvid); 1642 } 1643 1644 return 0; 1645 } 1646 EXPORT_SYMBOL(b53_vlan_add); 1647 1648 int b53_vlan_del(struct dsa_switch *ds, int port, 1649 const struct switchdev_obj_port_vlan *vlan) 1650 { 1651 struct b53_device *dev = ds->priv; 1652 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1653 struct b53_vlan *vl; 1654 u16 pvid; 1655 1656 if (vlan->vid == 0) 1657 return 0; 1658 1659 pvid = dev->ports[port].pvid; 1660 1661 vl = &dev->vlans[vlan->vid]; 1662 1663 vl->members &= ~BIT(port); 1664 1665 if (pvid == vlan->vid) 1666 pvid = b53_default_pvid(dev); 1667 dev->ports[port].pvid = pvid; 1668 1669 if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port)) 1670 vl->untag &= ~(BIT(port)); 1671 1672 if (!dev->vlan_filtering) 1673 return 0; 1674 1675 b53_set_vlan_entry(dev, vlan->vid, vl); 1676 b53_fast_age_vlan(dev, vlan->vid); 1677 1678 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid); 1679 b53_fast_age_vlan(dev, pvid); 1680 1681 return 0; 1682 } 1683 EXPORT_SYMBOL(b53_vlan_del); 1684 1685 /* Address Resolution Logic routines. Caller must hold &dev->arl_mutex. */ 1686 static int b53_arl_op_wait(struct b53_device *dev) 1687 { 1688 unsigned int timeout = 10; 1689 u8 reg; 1690 1691 do { 1692 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®); 1693 if (!(reg & ARLTBL_START_DONE)) 1694 return 0; 1695 1696 usleep_range(1000, 2000); 1697 } while (timeout--); 1698 1699 dev_warn(dev->dev, "timeout waiting for ARL to finish: 0x%02x\n", reg); 1700 1701 return -ETIMEDOUT; 1702 } 1703 1704 static int b53_arl_rw_op(struct b53_device *dev, unsigned int op) 1705 { 1706 u8 reg; 1707 1708 if (op > ARLTBL_RW) 1709 return -EINVAL; 1710 1711 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®); 1712 reg |= ARLTBL_START_DONE; 1713 if (op) 1714 reg |= ARLTBL_RW; 1715 else 1716 reg &= ~ARLTBL_RW; 1717 if (dev->vlan_enabled) 1718 reg &= ~ARLTBL_IVL_SVL_SELECT; 1719 else 1720 reg |= ARLTBL_IVL_SVL_SELECT; 1721 b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg); 1722 1723 return b53_arl_op_wait(dev); 1724 } 1725 1726 static int b53_arl_read(struct b53_device *dev, u64 mac, 1727 u16 vid, struct b53_arl_entry *ent, u8 *idx) 1728 { 1729 DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES); 1730 unsigned int i; 1731 int ret; 1732 1733 ret = b53_arl_op_wait(dev); 1734 if (ret) 1735 return ret; 1736 1737 bitmap_zero(free_bins, dev->num_arl_bins); 1738 1739 /* Read the bins */ 1740 for (i = 0; i < dev->num_arl_bins; i++) { 1741 u64 mac_vid; 1742 u32 fwd_entry; 1743 1744 b53_read64(dev, B53_ARLIO_PAGE, 1745 B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid); 1746 b53_read32(dev, B53_ARLIO_PAGE, 1747 B53_ARLTBL_DATA_ENTRY(i), &fwd_entry); 1748 b53_arl_to_entry(ent, mac_vid, fwd_entry); 1749 1750 if (!(fwd_entry & ARLTBL_VALID)) { 1751 set_bit(i, free_bins); 1752 continue; 1753 } 1754 if ((mac_vid & ARLTBL_MAC_MASK) != mac) 1755 continue; 1756 if (dev->vlan_enabled && 1757 ((mac_vid >> ARLTBL_VID_S) & ARLTBL_VID_MASK) != vid) 1758 continue; 1759 *idx = i; 1760 return 0; 1761 } 1762 1763 *idx = find_first_bit(free_bins, dev->num_arl_bins); 1764 return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT; 1765 } 1766 1767 static int b53_arl_op(struct b53_device *dev, int op, int port, 1768 const unsigned char *addr, u16 vid, bool is_valid) 1769 { 1770 struct b53_arl_entry ent; 1771 u32 fwd_entry; 1772 u64 mac, mac_vid = 0; 1773 u8 idx = 0; 1774 int ret; 1775 1776 /* Convert the array into a 64-bit MAC */ 1777 mac = ether_addr_to_u64(addr); 1778 1779 /* Perform a read for the given MAC and VID */ 1780 b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac); 1781 b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid); 1782 1783 /* Issue a read operation for this MAC */ 1784 ret = b53_arl_rw_op(dev, 1); 1785 if (ret) 1786 return ret; 1787 1788 ret = b53_arl_read(dev, mac, vid, &ent, &idx); 1789 1790 /* If this is a read, just finish now */ 1791 if (op) 1792 return ret; 1793 1794 switch (ret) { 1795 case -ETIMEDOUT: 1796 return ret; 1797 case -ENOSPC: 1798 dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n", 1799 addr, vid); 1800 return is_valid ? ret : 0; 1801 case -ENOENT: 1802 /* We could not find a matching MAC, so reset to a new entry */ 1803 dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n", 1804 addr, vid, idx); 1805 fwd_entry = 0; 1806 break; 1807 default: 1808 dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n", 1809 addr, vid, idx); 1810 break; 1811 } 1812 1813 /* For multicast address, the port is a bitmask and the validity 1814 * is determined by having at least one port being still active 1815 */ 1816 if (!is_multicast_ether_addr(addr)) { 1817 ent.port = port; 1818 ent.is_valid = is_valid; 1819 } else { 1820 if (is_valid) 1821 ent.port |= BIT(port); 1822 else 1823 ent.port &= ~BIT(port); 1824 1825 ent.is_valid = !!(ent.port); 1826 } 1827 1828 ent.vid = vid; 1829 ent.is_static = true; 1830 ent.is_age = false; 1831 memcpy(ent.mac, addr, ETH_ALEN); 1832 b53_arl_from_entry(&mac_vid, &fwd_entry, &ent); 1833 1834 b53_write64(dev, B53_ARLIO_PAGE, 1835 B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid); 1836 b53_write32(dev, B53_ARLIO_PAGE, 1837 B53_ARLTBL_DATA_ENTRY(idx), fwd_entry); 1838 1839 return b53_arl_rw_op(dev, 0); 1840 } 1841 1842 int b53_fdb_add(struct dsa_switch *ds, int port, 1843 const unsigned char *addr, u16 vid, 1844 struct dsa_db db) 1845 { 1846 struct b53_device *priv = ds->priv; 1847 int ret; 1848 1849 /* 5325 and 5365 require some more massaging, but could 1850 * be supported eventually 1851 */ 1852 if (is5325(priv) || is5365(priv)) 1853 return -EOPNOTSUPP; 1854 1855 mutex_lock(&priv->arl_mutex); 1856 ret = b53_arl_op(priv, 0, port, addr, vid, true); 1857 mutex_unlock(&priv->arl_mutex); 1858 1859 return ret; 1860 } 1861 EXPORT_SYMBOL(b53_fdb_add); 1862 1863 int b53_fdb_del(struct dsa_switch *ds, int port, 1864 const unsigned char *addr, u16 vid, 1865 struct dsa_db db) 1866 { 1867 struct b53_device *priv = ds->priv; 1868 int ret; 1869 1870 mutex_lock(&priv->arl_mutex); 1871 ret = b53_arl_op(priv, 0, port, addr, vid, false); 1872 mutex_unlock(&priv->arl_mutex); 1873 1874 return ret; 1875 } 1876 EXPORT_SYMBOL(b53_fdb_del); 1877 1878 static int b53_arl_search_wait(struct b53_device *dev) 1879 { 1880 unsigned int timeout = 1000; 1881 u8 reg; 1882 1883 do { 1884 b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, ®); 1885 if (!(reg & ARL_SRCH_STDN)) 1886 return 0; 1887 1888 if (reg & ARL_SRCH_VLID) 1889 return 0; 1890 1891 usleep_range(1000, 2000); 1892 } while (timeout--); 1893 1894 return -ETIMEDOUT; 1895 } 1896 1897 static void b53_arl_search_rd(struct b53_device *dev, u8 idx, 1898 struct b53_arl_entry *ent) 1899 { 1900 u64 mac_vid; 1901 u32 fwd_entry; 1902 1903 b53_read64(dev, B53_ARLIO_PAGE, 1904 B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid); 1905 b53_read32(dev, B53_ARLIO_PAGE, 1906 B53_ARL_SRCH_RSTL(idx), &fwd_entry); 1907 b53_arl_to_entry(ent, mac_vid, fwd_entry); 1908 } 1909 1910 static int b53_fdb_copy(int port, const struct b53_arl_entry *ent, 1911 dsa_fdb_dump_cb_t *cb, void *data) 1912 { 1913 if (!ent->is_valid) 1914 return 0; 1915 1916 if (port != ent->port) 1917 return 0; 1918 1919 return cb(ent->mac, ent->vid, ent->is_static, data); 1920 } 1921 1922 int b53_fdb_dump(struct dsa_switch *ds, int port, 1923 dsa_fdb_dump_cb_t *cb, void *data) 1924 { 1925 struct b53_device *priv = ds->priv; 1926 struct b53_arl_entry results[2]; 1927 unsigned int count = 0; 1928 int ret; 1929 u8 reg; 1930 1931 mutex_lock(&priv->arl_mutex); 1932 1933 /* Start search operation */ 1934 reg = ARL_SRCH_STDN; 1935 b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg); 1936 1937 do { 1938 ret = b53_arl_search_wait(priv); 1939 if (ret) 1940 break; 1941 1942 b53_arl_search_rd(priv, 0, &results[0]); 1943 ret = b53_fdb_copy(port, &results[0], cb, data); 1944 if (ret) 1945 break; 1946 1947 if (priv->num_arl_bins > 2) { 1948 b53_arl_search_rd(priv, 1, &results[1]); 1949 ret = b53_fdb_copy(port, &results[1], cb, data); 1950 if (ret) 1951 break; 1952 1953 if (!results[0].is_valid && !results[1].is_valid) 1954 break; 1955 } 1956 1957 } while (count++ < b53_max_arl_entries(priv) / 2); 1958 1959 mutex_unlock(&priv->arl_mutex); 1960 1961 return 0; 1962 } 1963 EXPORT_SYMBOL(b53_fdb_dump); 1964 1965 int b53_mdb_add(struct dsa_switch *ds, int port, 1966 const struct switchdev_obj_port_mdb *mdb, 1967 struct dsa_db db) 1968 { 1969 struct b53_device *priv = ds->priv; 1970 int ret; 1971 1972 /* 5325 and 5365 require some more massaging, but could 1973 * be supported eventually 1974 */ 1975 if (is5325(priv) || is5365(priv)) 1976 return -EOPNOTSUPP; 1977 1978 mutex_lock(&priv->arl_mutex); 1979 ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true); 1980 mutex_unlock(&priv->arl_mutex); 1981 1982 return ret; 1983 } 1984 EXPORT_SYMBOL(b53_mdb_add); 1985 1986 int b53_mdb_del(struct dsa_switch *ds, int port, 1987 const struct switchdev_obj_port_mdb *mdb, 1988 struct dsa_db db) 1989 { 1990 struct b53_device *priv = ds->priv; 1991 int ret; 1992 1993 mutex_lock(&priv->arl_mutex); 1994 ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false); 1995 mutex_unlock(&priv->arl_mutex); 1996 if (ret) 1997 dev_err(ds->dev, "failed to delete MDB entry\n"); 1998 1999 return ret; 2000 } 2001 EXPORT_SYMBOL(b53_mdb_del); 2002 2003 int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, 2004 bool *tx_fwd_offload, struct netlink_ext_ack *extack) 2005 { 2006 struct b53_device *dev = ds->priv; 2007 struct b53_vlan *vl; 2008 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 2009 u16 pvlan, reg, pvid; 2010 unsigned int i; 2011 2012 /* On 7278, port 7 which connects to the ASP should only receive 2013 * traffic from matching CFP rules. 2014 */ 2015 if (dev->chip_id == BCM7278_DEVICE_ID && port == 7) 2016 return -EINVAL; 2017 2018 pvid = b53_default_pvid(dev); 2019 vl = &dev->vlans[pvid]; 2020 2021 if (dev->vlan_filtering) { 2022 /* Make this port leave the all VLANs join since we will have 2023 * proper VLAN entries from now on 2024 */ 2025 if (is58xx(dev)) { 2026 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, 2027 ®); 2028 reg &= ~BIT(port); 2029 if ((reg & BIT(cpu_port)) == BIT(cpu_port)) 2030 reg &= ~BIT(cpu_port); 2031 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, 2032 reg); 2033 } 2034 2035 b53_get_vlan_entry(dev, pvid, vl); 2036 vl->members &= ~BIT(port); 2037 if (vl->members == BIT(cpu_port)) 2038 vl->members &= ~BIT(cpu_port); 2039 vl->untag = vl->members; 2040 b53_set_vlan_entry(dev, pvid, vl); 2041 } 2042 2043 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 2044 2045 b53_for_each_port(dev, i) { 2046 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) 2047 continue; 2048 2049 /* Add this local port to the remote port VLAN control 2050 * membership and update the remote port bitmask 2051 */ 2052 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); 2053 reg |= BIT(port); 2054 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg); 2055 dev->ports[i].vlan_ctl_mask = reg; 2056 2057 pvlan |= BIT(i); 2058 } 2059 2060 /* Disable redirection of unknown SA to the CPU port */ 2061 b53_set_eap_mode(dev, port, EAP_MODE_BASIC); 2062 2063 /* Configure the local port VLAN control membership to include 2064 * remote ports and update the local port bitmask 2065 */ 2066 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 2067 dev->ports[port].vlan_ctl_mask = pvlan; 2068 2069 return 0; 2070 } 2071 EXPORT_SYMBOL(b53_br_join); 2072 2073 void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge) 2074 { 2075 struct b53_device *dev = ds->priv; 2076 struct b53_vlan *vl; 2077 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 2078 unsigned int i; 2079 u16 pvlan, reg, pvid; 2080 2081 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 2082 2083 b53_for_each_port(dev, i) { 2084 /* Don't touch the remaining ports */ 2085 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) 2086 continue; 2087 2088 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); 2089 reg &= ~BIT(port); 2090 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg); 2091 dev->ports[port].vlan_ctl_mask = reg; 2092 2093 /* Prevent self removal to preserve isolation */ 2094 if (port != i) 2095 pvlan &= ~BIT(i); 2096 } 2097 2098 /* Enable redirection of unknown SA to the CPU port */ 2099 b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED); 2100 2101 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 2102 dev->ports[port].vlan_ctl_mask = pvlan; 2103 2104 pvid = b53_default_pvid(dev); 2105 vl = &dev->vlans[pvid]; 2106 2107 if (dev->vlan_filtering) { 2108 /* Make this port join all VLANs without VLAN entries */ 2109 if (is58xx(dev)) { 2110 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®); 2111 reg |= BIT(port); 2112 if (!(reg & BIT(cpu_port))) 2113 reg |= BIT(cpu_port); 2114 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg); 2115 } 2116 2117 b53_get_vlan_entry(dev, pvid, vl); 2118 vl->members |= BIT(port) | BIT(cpu_port); 2119 vl->untag |= BIT(port) | BIT(cpu_port); 2120 b53_set_vlan_entry(dev, pvid, vl); 2121 } 2122 } 2123 EXPORT_SYMBOL(b53_br_leave); 2124 2125 void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state) 2126 { 2127 struct b53_device *dev = ds->priv; 2128 u8 hw_state; 2129 u8 reg; 2130 2131 switch (state) { 2132 case BR_STATE_DISABLED: 2133 hw_state = PORT_CTRL_DIS_STATE; 2134 break; 2135 case BR_STATE_LISTENING: 2136 hw_state = PORT_CTRL_LISTEN_STATE; 2137 break; 2138 case BR_STATE_LEARNING: 2139 hw_state = PORT_CTRL_LEARN_STATE; 2140 break; 2141 case BR_STATE_FORWARDING: 2142 hw_state = PORT_CTRL_FWD_STATE; 2143 break; 2144 case BR_STATE_BLOCKING: 2145 hw_state = PORT_CTRL_BLOCK_STATE; 2146 break; 2147 default: 2148 dev_err(ds->dev, "invalid STP state: %d\n", state); 2149 return; 2150 } 2151 2152 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); 2153 reg &= ~PORT_CTRL_STP_STATE_MASK; 2154 reg |= hw_state; 2155 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); 2156 } 2157 EXPORT_SYMBOL(b53_br_set_stp_state); 2158 2159 void b53_br_fast_age(struct dsa_switch *ds, int port) 2160 { 2161 struct b53_device *dev = ds->priv; 2162 2163 if (b53_fast_age_port(dev, port)) 2164 dev_err(ds->dev, "fast ageing failed\n"); 2165 } 2166 EXPORT_SYMBOL(b53_br_fast_age); 2167 2168 int b53_br_flags_pre(struct dsa_switch *ds, int port, 2169 struct switchdev_brport_flags flags, 2170 struct netlink_ext_ack *extack) 2171 { 2172 if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_LEARNING)) 2173 return -EINVAL; 2174 2175 return 0; 2176 } 2177 EXPORT_SYMBOL(b53_br_flags_pre); 2178 2179 int b53_br_flags(struct dsa_switch *ds, int port, 2180 struct switchdev_brport_flags flags, 2181 struct netlink_ext_ack *extack) 2182 { 2183 if (flags.mask & BR_FLOOD) 2184 b53_port_set_ucast_flood(ds->priv, port, 2185 !!(flags.val & BR_FLOOD)); 2186 if (flags.mask & BR_MCAST_FLOOD) 2187 b53_port_set_mcast_flood(ds->priv, port, 2188 !!(flags.val & BR_MCAST_FLOOD)); 2189 if (flags.mask & BR_LEARNING) 2190 b53_port_set_learning(ds->priv, port, 2191 !!(flags.val & BR_LEARNING)); 2192 2193 return 0; 2194 } 2195 EXPORT_SYMBOL(b53_br_flags); 2196 2197 static bool b53_possible_cpu_port(struct dsa_switch *ds, int port) 2198 { 2199 /* Broadcom switches will accept enabling Broadcom tags on the 2200 * following ports: 5, 7 and 8, any other port is not supported 2201 */ 2202 switch (port) { 2203 case B53_CPU_PORT_25: 2204 case 7: 2205 case B53_CPU_PORT: 2206 return true; 2207 } 2208 2209 return false; 2210 } 2211 2212 static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port, 2213 enum dsa_tag_protocol tag_protocol) 2214 { 2215 bool ret = b53_possible_cpu_port(ds, port); 2216 2217 if (!ret) { 2218 dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n", 2219 port); 2220 return ret; 2221 } 2222 2223 switch (tag_protocol) { 2224 case DSA_TAG_PROTO_BRCM: 2225 case DSA_TAG_PROTO_BRCM_PREPEND: 2226 dev_warn(ds->dev, 2227 "Port %d is stacked to Broadcom tag switch\n", port); 2228 ret = false; 2229 break; 2230 default: 2231 ret = true; 2232 break; 2233 } 2234 2235 return ret; 2236 } 2237 2238 enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port, 2239 enum dsa_tag_protocol mprot) 2240 { 2241 struct b53_device *dev = ds->priv; 2242 2243 if (!b53_can_enable_brcm_tags(ds, port, mprot)) { 2244 dev->tag_protocol = DSA_TAG_PROTO_NONE; 2245 goto out; 2246 } 2247 2248 /* Older models require a different 6 byte tag */ 2249 if (is5325(dev) || is5365(dev) || is63xx(dev)) { 2250 dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY; 2251 goto out; 2252 } 2253 2254 /* Broadcom BCM58xx chips have a flow accelerator on Port 8 2255 * which requires us to use the prepended Broadcom tag type 2256 */ 2257 if (dev->chip_id == BCM58XX_DEVICE_ID && port == B53_CPU_PORT) { 2258 dev->tag_protocol = DSA_TAG_PROTO_BRCM_PREPEND; 2259 goto out; 2260 } 2261 2262 dev->tag_protocol = DSA_TAG_PROTO_BRCM; 2263 out: 2264 return dev->tag_protocol; 2265 } 2266 EXPORT_SYMBOL(b53_get_tag_protocol); 2267 2268 int b53_mirror_add(struct dsa_switch *ds, int port, 2269 struct dsa_mall_mirror_tc_entry *mirror, bool ingress, 2270 struct netlink_ext_ack *extack) 2271 { 2272 struct b53_device *dev = ds->priv; 2273 u16 reg, loc; 2274 2275 if (ingress) 2276 loc = B53_IG_MIR_CTL; 2277 else 2278 loc = B53_EG_MIR_CTL; 2279 2280 b53_read16(dev, B53_MGMT_PAGE, loc, ®); 2281 reg |= BIT(port); 2282 b53_write16(dev, B53_MGMT_PAGE, loc, reg); 2283 2284 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®); 2285 reg &= ~CAP_PORT_MASK; 2286 reg |= mirror->to_local_port; 2287 reg |= MIRROR_EN; 2288 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg); 2289 2290 return 0; 2291 } 2292 EXPORT_SYMBOL(b53_mirror_add); 2293 2294 void b53_mirror_del(struct dsa_switch *ds, int port, 2295 struct dsa_mall_mirror_tc_entry *mirror) 2296 { 2297 struct b53_device *dev = ds->priv; 2298 bool loc_disable = false, other_loc_disable = false; 2299 u16 reg, loc; 2300 2301 if (mirror->ingress) 2302 loc = B53_IG_MIR_CTL; 2303 else 2304 loc = B53_EG_MIR_CTL; 2305 2306 /* Update the desired ingress/egress register */ 2307 b53_read16(dev, B53_MGMT_PAGE, loc, ®); 2308 reg &= ~BIT(port); 2309 if (!(reg & MIRROR_MASK)) 2310 loc_disable = true; 2311 b53_write16(dev, B53_MGMT_PAGE, loc, reg); 2312 2313 /* Now look at the other one to know if we can disable mirroring 2314 * entirely 2315 */ 2316 if (mirror->ingress) 2317 b53_read16(dev, B53_MGMT_PAGE, B53_EG_MIR_CTL, ®); 2318 else 2319 b53_read16(dev, B53_MGMT_PAGE, B53_IG_MIR_CTL, ®); 2320 if (!(reg & MIRROR_MASK)) 2321 other_loc_disable = true; 2322 2323 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®); 2324 /* Both no longer have ports, let's disable mirroring */ 2325 if (loc_disable && other_loc_disable) { 2326 reg &= ~MIRROR_EN; 2327 reg &= ~mirror->to_local_port; 2328 } 2329 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg); 2330 } 2331 EXPORT_SYMBOL(b53_mirror_del); 2332 2333 /* Returns 0 if EEE was not enabled, or 1 otherwise 2334 */ 2335 int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy) 2336 { 2337 int ret; 2338 2339 if (!b53_support_eee(ds, port)) 2340 return 0; 2341 2342 ret = phy_init_eee(phy, false); 2343 if (ret) 2344 return 0; 2345 2346 b53_eee_enable_set(ds, port, true); 2347 2348 return 1; 2349 } 2350 EXPORT_SYMBOL(b53_eee_init); 2351 2352 bool b53_support_eee(struct dsa_switch *ds, int port) 2353 { 2354 struct b53_device *dev = ds->priv; 2355 2356 return !is5325(dev) && !is5365(dev) && !is63xx(dev); 2357 } 2358 EXPORT_SYMBOL(b53_support_eee); 2359 2360 int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e) 2361 { 2362 struct b53_device *dev = ds->priv; 2363 struct ethtool_keee *p = &dev->ports[port].eee; 2364 2365 p->eee_enabled = e->eee_enabled; 2366 b53_eee_enable_set(ds, port, e->eee_enabled); 2367 2368 return 0; 2369 } 2370 EXPORT_SYMBOL(b53_set_mac_eee); 2371 2372 static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu) 2373 { 2374 struct b53_device *dev = ds->priv; 2375 bool enable_jumbo; 2376 bool allow_10_100; 2377 2378 if (is5325(dev) || is5365(dev)) 2379 return 0; 2380 2381 if (!dsa_is_cpu_port(ds, port)) 2382 return 0; 2383 2384 enable_jumbo = (mtu > ETH_DATA_LEN); 2385 allow_10_100 = !is63xx(dev); 2386 2387 return b53_set_jumbo(dev, enable_jumbo, allow_10_100); 2388 } 2389 2390 static int b53_get_max_mtu(struct dsa_switch *ds, int port) 2391 { 2392 struct b53_device *dev = ds->priv; 2393 2394 if (is5325(dev) || is5365(dev)) 2395 return B53_MAX_MTU_25; 2396 2397 return B53_MAX_MTU; 2398 } 2399 2400 int b53_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) 2401 { 2402 struct b53_device *dev = ds->priv; 2403 u32 atc; 2404 int reg; 2405 2406 if (is63xx(dev)) 2407 reg = B53_AGING_TIME_CONTROL_63XX; 2408 else 2409 reg = B53_AGING_TIME_CONTROL; 2410 2411 atc = DIV_ROUND_CLOSEST(msecs, 1000); 2412 2413 if (!is5325(dev) && !is5365(dev)) 2414 atc |= AGE_CHANGE; 2415 2416 b53_write32(dev, B53_MGMT_PAGE, reg, atc); 2417 2418 return 0; 2419 } 2420 EXPORT_SYMBOL_GPL(b53_set_ageing_time); 2421 2422 static const struct phylink_mac_ops b53_phylink_mac_ops = { 2423 .mac_select_pcs = b53_phylink_mac_select_pcs, 2424 .mac_config = b53_phylink_mac_config, 2425 .mac_link_down = b53_phylink_mac_link_down, 2426 .mac_link_up = b53_phylink_mac_link_up, 2427 }; 2428 2429 static const struct dsa_switch_ops b53_switch_ops = { 2430 .get_tag_protocol = b53_get_tag_protocol, 2431 .setup = b53_setup, 2432 .teardown = b53_teardown, 2433 .get_strings = b53_get_strings, 2434 .get_ethtool_stats = b53_get_ethtool_stats, 2435 .get_sset_count = b53_get_sset_count, 2436 .get_ethtool_phy_stats = b53_get_ethtool_phy_stats, 2437 .phy_read = b53_phy_read16, 2438 .phy_write = b53_phy_write16, 2439 .phylink_get_caps = b53_phylink_get_caps, 2440 .port_setup = b53_setup_port, 2441 .port_enable = b53_enable_port, 2442 .port_disable = b53_disable_port, 2443 .support_eee = b53_support_eee, 2444 .set_mac_eee = b53_set_mac_eee, 2445 .set_ageing_time = b53_set_ageing_time, 2446 .port_bridge_join = b53_br_join, 2447 .port_bridge_leave = b53_br_leave, 2448 .port_pre_bridge_flags = b53_br_flags_pre, 2449 .port_bridge_flags = b53_br_flags, 2450 .port_stp_state_set = b53_br_set_stp_state, 2451 .port_fast_age = b53_br_fast_age, 2452 .port_vlan_filtering = b53_vlan_filtering, 2453 .port_vlan_add = b53_vlan_add, 2454 .port_vlan_del = b53_vlan_del, 2455 .port_fdb_dump = b53_fdb_dump, 2456 .port_fdb_add = b53_fdb_add, 2457 .port_fdb_del = b53_fdb_del, 2458 .port_mirror_add = b53_mirror_add, 2459 .port_mirror_del = b53_mirror_del, 2460 .port_mdb_add = b53_mdb_add, 2461 .port_mdb_del = b53_mdb_del, 2462 .port_max_mtu = b53_get_max_mtu, 2463 .port_change_mtu = b53_change_mtu, 2464 }; 2465 2466 struct b53_chip_data { 2467 u32 chip_id; 2468 const char *dev_name; 2469 u16 vlans; 2470 u16 enabled_ports; 2471 u8 imp_port; 2472 u8 cpu_port; 2473 u8 vta_regs[3]; 2474 u8 arl_bins; 2475 u16 arl_buckets; 2476 u8 duplex_reg; 2477 u8 jumbo_pm_reg; 2478 u8 jumbo_size_reg; 2479 }; 2480 2481 #define B53_VTA_REGS \ 2482 { B53_VT_ACCESS, B53_VT_INDEX, B53_VT_ENTRY } 2483 #define B53_VTA_REGS_9798 \ 2484 { B53_VT_ACCESS_9798, B53_VT_INDEX_9798, B53_VT_ENTRY_9798 } 2485 #define B53_VTA_REGS_63XX \ 2486 { B53_VT_ACCESS_63XX, B53_VT_INDEX_63XX, B53_VT_ENTRY_63XX } 2487 2488 static const struct b53_chip_data b53_switch_chips[] = { 2489 { 2490 .chip_id = BCM5325_DEVICE_ID, 2491 .dev_name = "BCM5325", 2492 .vlans = 16, 2493 .enabled_ports = 0x3f, 2494 .arl_bins = 2, 2495 .arl_buckets = 1024, 2496 .imp_port = 5, 2497 .duplex_reg = B53_DUPLEX_STAT_FE, 2498 }, 2499 { 2500 .chip_id = BCM5365_DEVICE_ID, 2501 .dev_name = "BCM5365", 2502 .vlans = 256, 2503 .enabled_ports = 0x3f, 2504 .arl_bins = 2, 2505 .arl_buckets = 1024, 2506 .imp_port = 5, 2507 .duplex_reg = B53_DUPLEX_STAT_FE, 2508 }, 2509 { 2510 .chip_id = BCM5389_DEVICE_ID, 2511 .dev_name = "BCM5389", 2512 .vlans = 4096, 2513 .enabled_ports = 0x11f, 2514 .arl_bins = 4, 2515 .arl_buckets = 1024, 2516 .imp_port = 8, 2517 .vta_regs = B53_VTA_REGS, 2518 .duplex_reg = B53_DUPLEX_STAT_GE, 2519 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2520 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2521 }, 2522 { 2523 .chip_id = BCM5395_DEVICE_ID, 2524 .dev_name = "BCM5395", 2525 .vlans = 4096, 2526 .enabled_ports = 0x11f, 2527 .arl_bins = 4, 2528 .arl_buckets = 1024, 2529 .imp_port = 8, 2530 .vta_regs = B53_VTA_REGS, 2531 .duplex_reg = B53_DUPLEX_STAT_GE, 2532 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2533 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2534 }, 2535 { 2536 .chip_id = BCM5397_DEVICE_ID, 2537 .dev_name = "BCM5397", 2538 .vlans = 4096, 2539 .enabled_ports = 0x11f, 2540 .arl_bins = 4, 2541 .arl_buckets = 1024, 2542 .imp_port = 8, 2543 .vta_regs = B53_VTA_REGS_9798, 2544 .duplex_reg = B53_DUPLEX_STAT_GE, 2545 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2546 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2547 }, 2548 { 2549 .chip_id = BCM5398_DEVICE_ID, 2550 .dev_name = "BCM5398", 2551 .vlans = 4096, 2552 .enabled_ports = 0x17f, 2553 .arl_bins = 4, 2554 .arl_buckets = 1024, 2555 .imp_port = 8, 2556 .vta_regs = B53_VTA_REGS_9798, 2557 .duplex_reg = B53_DUPLEX_STAT_GE, 2558 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2559 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2560 }, 2561 { 2562 .chip_id = BCM53101_DEVICE_ID, 2563 .dev_name = "BCM53101", 2564 .vlans = 4096, 2565 .enabled_ports = 0x11f, 2566 .arl_bins = 4, 2567 .arl_buckets = 512, 2568 .vta_regs = B53_VTA_REGS, 2569 .imp_port = 8, 2570 .duplex_reg = B53_DUPLEX_STAT_GE, 2571 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2572 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2573 }, 2574 { 2575 .chip_id = BCM53115_DEVICE_ID, 2576 .dev_name = "BCM53115", 2577 .vlans = 4096, 2578 .enabled_ports = 0x11f, 2579 .arl_bins = 4, 2580 .arl_buckets = 1024, 2581 .vta_regs = B53_VTA_REGS, 2582 .imp_port = 8, 2583 .duplex_reg = B53_DUPLEX_STAT_GE, 2584 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2585 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2586 }, 2587 { 2588 .chip_id = BCM53125_DEVICE_ID, 2589 .dev_name = "BCM53125", 2590 .vlans = 4096, 2591 .enabled_ports = 0x1ff, 2592 .arl_bins = 4, 2593 .arl_buckets = 1024, 2594 .imp_port = 8, 2595 .vta_regs = B53_VTA_REGS, 2596 .duplex_reg = B53_DUPLEX_STAT_GE, 2597 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2598 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2599 }, 2600 { 2601 .chip_id = BCM53128_DEVICE_ID, 2602 .dev_name = "BCM53128", 2603 .vlans = 4096, 2604 .enabled_ports = 0x1ff, 2605 .arl_bins = 4, 2606 .arl_buckets = 1024, 2607 .imp_port = 8, 2608 .vta_regs = B53_VTA_REGS, 2609 .duplex_reg = B53_DUPLEX_STAT_GE, 2610 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2611 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2612 }, 2613 { 2614 .chip_id = BCM63XX_DEVICE_ID, 2615 .dev_name = "BCM63xx", 2616 .vlans = 4096, 2617 .enabled_ports = 0, /* pdata must provide them */ 2618 .arl_bins = 4, 2619 .arl_buckets = 1024, 2620 .imp_port = 8, 2621 .vta_regs = B53_VTA_REGS_63XX, 2622 .duplex_reg = B53_DUPLEX_STAT_63XX, 2623 .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX, 2624 .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX, 2625 }, 2626 { 2627 .chip_id = BCM63268_DEVICE_ID, 2628 .dev_name = "BCM63268", 2629 .vlans = 4096, 2630 .enabled_ports = 0, /* pdata must provide them */ 2631 .arl_bins = 4, 2632 .arl_buckets = 1024, 2633 .imp_port = 8, 2634 .vta_regs = B53_VTA_REGS_63XX, 2635 .duplex_reg = B53_DUPLEX_STAT_63XX, 2636 .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX, 2637 .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX, 2638 }, 2639 { 2640 .chip_id = BCM53010_DEVICE_ID, 2641 .dev_name = "BCM53010", 2642 .vlans = 4096, 2643 .enabled_ports = 0x1bf, 2644 .arl_bins = 4, 2645 .arl_buckets = 1024, 2646 .imp_port = 8, 2647 .vta_regs = B53_VTA_REGS, 2648 .duplex_reg = B53_DUPLEX_STAT_GE, 2649 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2650 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2651 }, 2652 { 2653 .chip_id = BCM53011_DEVICE_ID, 2654 .dev_name = "BCM53011", 2655 .vlans = 4096, 2656 .enabled_ports = 0x1bf, 2657 .arl_bins = 4, 2658 .arl_buckets = 1024, 2659 .imp_port = 8, 2660 .vta_regs = B53_VTA_REGS, 2661 .duplex_reg = B53_DUPLEX_STAT_GE, 2662 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2663 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2664 }, 2665 { 2666 .chip_id = BCM53012_DEVICE_ID, 2667 .dev_name = "BCM53012", 2668 .vlans = 4096, 2669 .enabled_ports = 0x1bf, 2670 .arl_bins = 4, 2671 .arl_buckets = 1024, 2672 .imp_port = 8, 2673 .vta_regs = B53_VTA_REGS, 2674 .duplex_reg = B53_DUPLEX_STAT_GE, 2675 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2676 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2677 }, 2678 { 2679 .chip_id = BCM53018_DEVICE_ID, 2680 .dev_name = "BCM53018", 2681 .vlans = 4096, 2682 .enabled_ports = 0x1bf, 2683 .arl_bins = 4, 2684 .arl_buckets = 1024, 2685 .imp_port = 8, 2686 .vta_regs = B53_VTA_REGS, 2687 .duplex_reg = B53_DUPLEX_STAT_GE, 2688 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2689 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2690 }, 2691 { 2692 .chip_id = BCM53019_DEVICE_ID, 2693 .dev_name = "BCM53019", 2694 .vlans = 4096, 2695 .enabled_ports = 0x1bf, 2696 .arl_bins = 4, 2697 .arl_buckets = 1024, 2698 .imp_port = 8, 2699 .vta_regs = B53_VTA_REGS, 2700 .duplex_reg = B53_DUPLEX_STAT_GE, 2701 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2702 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2703 }, 2704 { 2705 .chip_id = BCM58XX_DEVICE_ID, 2706 .dev_name = "BCM585xx/586xx/88312", 2707 .vlans = 4096, 2708 .enabled_ports = 0x1ff, 2709 .arl_bins = 4, 2710 .arl_buckets = 1024, 2711 .imp_port = 8, 2712 .vta_regs = B53_VTA_REGS, 2713 .duplex_reg = B53_DUPLEX_STAT_GE, 2714 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2715 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2716 }, 2717 { 2718 .chip_id = BCM583XX_DEVICE_ID, 2719 .dev_name = "BCM583xx/11360", 2720 .vlans = 4096, 2721 .enabled_ports = 0x103, 2722 .arl_bins = 4, 2723 .arl_buckets = 1024, 2724 .imp_port = 8, 2725 .vta_regs = B53_VTA_REGS, 2726 .duplex_reg = B53_DUPLEX_STAT_GE, 2727 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2728 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2729 }, 2730 /* Starfighter 2 */ 2731 { 2732 .chip_id = BCM4908_DEVICE_ID, 2733 .dev_name = "BCM4908", 2734 .vlans = 4096, 2735 .enabled_ports = 0x1bf, 2736 .arl_bins = 4, 2737 .arl_buckets = 256, 2738 .imp_port = 8, 2739 .vta_regs = B53_VTA_REGS, 2740 .duplex_reg = B53_DUPLEX_STAT_GE, 2741 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2742 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2743 }, 2744 { 2745 .chip_id = BCM7445_DEVICE_ID, 2746 .dev_name = "BCM7445", 2747 .vlans = 4096, 2748 .enabled_ports = 0x1ff, 2749 .arl_bins = 4, 2750 .arl_buckets = 1024, 2751 .imp_port = 8, 2752 .vta_regs = B53_VTA_REGS, 2753 .duplex_reg = B53_DUPLEX_STAT_GE, 2754 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2755 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2756 }, 2757 { 2758 .chip_id = BCM7278_DEVICE_ID, 2759 .dev_name = "BCM7278", 2760 .vlans = 4096, 2761 .enabled_ports = 0x1ff, 2762 .arl_bins = 4, 2763 .arl_buckets = 256, 2764 .imp_port = 8, 2765 .vta_regs = B53_VTA_REGS, 2766 .duplex_reg = B53_DUPLEX_STAT_GE, 2767 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2768 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2769 }, 2770 { 2771 .chip_id = BCM53134_DEVICE_ID, 2772 .dev_name = "BCM53134", 2773 .vlans = 4096, 2774 .enabled_ports = 0x12f, 2775 .imp_port = 8, 2776 .cpu_port = B53_CPU_PORT, 2777 .vta_regs = B53_VTA_REGS, 2778 .arl_bins = 4, 2779 .arl_buckets = 1024, 2780 .duplex_reg = B53_DUPLEX_STAT_GE, 2781 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2782 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2783 }, 2784 }; 2785 2786 static int b53_switch_init(struct b53_device *dev) 2787 { 2788 unsigned int i; 2789 int ret; 2790 2791 for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) { 2792 const struct b53_chip_data *chip = &b53_switch_chips[i]; 2793 2794 if (chip->chip_id == dev->chip_id) { 2795 if (!dev->enabled_ports) 2796 dev->enabled_ports = chip->enabled_ports; 2797 dev->name = chip->dev_name; 2798 dev->duplex_reg = chip->duplex_reg; 2799 dev->vta_regs[0] = chip->vta_regs[0]; 2800 dev->vta_regs[1] = chip->vta_regs[1]; 2801 dev->vta_regs[2] = chip->vta_regs[2]; 2802 dev->jumbo_pm_reg = chip->jumbo_pm_reg; 2803 dev->imp_port = chip->imp_port; 2804 dev->num_vlans = chip->vlans; 2805 dev->num_arl_bins = chip->arl_bins; 2806 dev->num_arl_buckets = chip->arl_buckets; 2807 break; 2808 } 2809 } 2810 2811 /* check which BCM5325x version we have */ 2812 if (is5325(dev)) { 2813 u8 vc4; 2814 2815 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4); 2816 2817 /* check reserved bits */ 2818 switch (vc4 & 3) { 2819 case 1: 2820 /* BCM5325E */ 2821 break; 2822 case 3: 2823 /* BCM5325F - do not use port 4 */ 2824 dev->enabled_ports &= ~BIT(4); 2825 break; 2826 default: 2827 /* On the BCM47XX SoCs this is the supported internal switch.*/ 2828 #ifndef CONFIG_BCM47XX 2829 /* BCM5325M */ 2830 return -EINVAL; 2831 #else 2832 break; 2833 #endif 2834 } 2835 } 2836 2837 dev->num_ports = fls(dev->enabled_ports); 2838 2839 dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS); 2840 2841 /* Include non standard CPU port built-in PHYs to be probed */ 2842 if (is539x(dev) || is531x5(dev)) { 2843 for (i = 0; i < dev->num_ports; i++) { 2844 if (!(dev->ds->phys_mii_mask & BIT(i)) && 2845 !b53_possible_cpu_port(dev->ds, i)) 2846 dev->ds->phys_mii_mask |= BIT(i); 2847 } 2848 } 2849 2850 dev->ports = devm_kcalloc(dev->dev, 2851 dev->num_ports, sizeof(struct b53_port), 2852 GFP_KERNEL); 2853 if (!dev->ports) 2854 return -ENOMEM; 2855 2856 dev->vlans = devm_kcalloc(dev->dev, 2857 dev->num_vlans, sizeof(struct b53_vlan), 2858 GFP_KERNEL); 2859 if (!dev->vlans) 2860 return -ENOMEM; 2861 2862 dev->reset_gpio = b53_switch_get_reset_gpio(dev); 2863 if (dev->reset_gpio >= 0) { 2864 ret = devm_gpio_request_one(dev->dev, dev->reset_gpio, 2865 GPIOF_OUT_INIT_HIGH, "robo_reset"); 2866 if (ret) 2867 return ret; 2868 } 2869 2870 return 0; 2871 } 2872 2873 struct b53_device *b53_switch_alloc(struct device *base, 2874 const struct b53_io_ops *ops, 2875 void *priv) 2876 { 2877 struct dsa_switch *ds; 2878 struct b53_device *dev; 2879 2880 ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL); 2881 if (!ds) 2882 return NULL; 2883 2884 ds->dev = base; 2885 2886 dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL); 2887 if (!dev) 2888 return NULL; 2889 2890 ds->priv = dev; 2891 dev->dev = base; 2892 2893 dev->ds = ds; 2894 dev->priv = priv; 2895 dev->ops = ops; 2896 ds->ops = &b53_switch_ops; 2897 ds->phylink_mac_ops = &b53_phylink_mac_ops; 2898 dev->vlan_enabled = true; 2899 dev->vlan_filtering = false; 2900 /* Let DSA handle the case were multiple bridges span the same switch 2901 * device and different VLAN awareness settings are requested, which 2902 * would be breaking filtering semantics for any of the other bridge 2903 * devices. (not hardware supported) 2904 */ 2905 ds->vlan_filtering_is_global = true; 2906 2907 mutex_init(&dev->reg_mutex); 2908 mutex_init(&dev->stats_mutex); 2909 mutex_init(&dev->arl_mutex); 2910 2911 return dev; 2912 } 2913 EXPORT_SYMBOL(b53_switch_alloc); 2914 2915 int b53_switch_detect(struct b53_device *dev) 2916 { 2917 u32 id32; 2918 u16 tmp; 2919 u8 id8; 2920 int ret; 2921 2922 ret = b53_read8(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id8); 2923 if (ret) 2924 return ret; 2925 2926 switch (id8) { 2927 case 0: 2928 /* BCM5325 and BCM5365 do not have this register so reads 2929 * return 0. But the read operation did succeed, so assume this 2930 * is one of them. 2931 * 2932 * Next check if we can write to the 5325's VTA register; for 2933 * 5365 it is read only. 2934 */ 2935 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf); 2936 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp); 2937 2938 if (tmp == 0xf) 2939 dev->chip_id = BCM5325_DEVICE_ID; 2940 else 2941 dev->chip_id = BCM5365_DEVICE_ID; 2942 break; 2943 case BCM5389_DEVICE_ID: 2944 case BCM5395_DEVICE_ID: 2945 case BCM5397_DEVICE_ID: 2946 case BCM5398_DEVICE_ID: 2947 dev->chip_id = id8; 2948 break; 2949 default: 2950 ret = b53_read32(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id32); 2951 if (ret) 2952 return ret; 2953 2954 switch (id32) { 2955 case BCM53101_DEVICE_ID: 2956 case BCM53115_DEVICE_ID: 2957 case BCM53125_DEVICE_ID: 2958 case BCM53128_DEVICE_ID: 2959 case BCM53010_DEVICE_ID: 2960 case BCM53011_DEVICE_ID: 2961 case BCM53012_DEVICE_ID: 2962 case BCM53018_DEVICE_ID: 2963 case BCM53019_DEVICE_ID: 2964 case BCM53134_DEVICE_ID: 2965 dev->chip_id = id32; 2966 break; 2967 default: 2968 dev_err(dev->dev, 2969 "unsupported switch detected (BCM53%02x/BCM%x)\n", 2970 id8, id32); 2971 return -ENODEV; 2972 } 2973 } 2974 2975 if (dev->chip_id == BCM5325_DEVICE_ID) 2976 return b53_read8(dev, B53_STAT_PAGE, B53_REV_ID_25, 2977 &dev->core_rev); 2978 else 2979 return b53_read8(dev, B53_MGMT_PAGE, B53_REV_ID, 2980 &dev->core_rev); 2981 } 2982 EXPORT_SYMBOL(b53_switch_detect); 2983 2984 int b53_switch_register(struct b53_device *dev) 2985 { 2986 int ret; 2987 2988 if (dev->pdata) { 2989 dev->chip_id = dev->pdata->chip_id; 2990 dev->enabled_ports = dev->pdata->enabled_ports; 2991 } 2992 2993 if (!dev->chip_id && b53_switch_detect(dev)) 2994 return -EINVAL; 2995 2996 ret = b53_switch_init(dev); 2997 if (ret) 2998 return ret; 2999 3000 dev_info(dev->dev, "found switch: %s, rev %i\n", 3001 dev->name, dev->core_rev); 3002 3003 return dsa_register_switch(dev->ds); 3004 } 3005 EXPORT_SYMBOL(b53_switch_register); 3006 3007 MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>"); 3008 MODULE_DESCRIPTION("B53 switch library"); 3009 MODULE_LICENSE("Dual BSD/GPL"); 3010