1 /* 2 * B53 switch driver main logic 3 * 4 * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org> 5 * Copyright (C) 2016 Florian Fainelli <f.fainelli@gmail.com> 6 * 7 * Permission to use, copy, modify, and/or distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <linux/delay.h> 21 #include <linux/export.h> 22 #include <linux/gpio.h> 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <linux/platform_data/b53.h> 26 #include <linux/phy.h> 27 #include <linux/phylink.h> 28 #include <linux/etherdevice.h> 29 #include <linux/if_bridge.h> 30 #include <linux/if_vlan.h> 31 #include <net/dsa.h> 32 33 #include "b53_regs.h" 34 #include "b53_priv.h" 35 36 struct b53_mib_desc { 37 u8 size; 38 u8 offset; 39 const char *name; 40 }; 41 42 /* BCM5365 MIB counters */ 43 static const struct b53_mib_desc b53_mibs_65[] = { 44 { 8, 0x00, "TxOctets" }, 45 { 4, 0x08, "TxDropPkts" }, 46 { 4, 0x10, "TxBroadcastPkts" }, 47 { 4, 0x14, "TxMulticastPkts" }, 48 { 4, 0x18, "TxUnicastPkts" }, 49 { 4, 0x1c, "TxCollisions" }, 50 { 4, 0x20, "TxSingleCollision" }, 51 { 4, 0x24, "TxMultipleCollision" }, 52 { 4, 0x28, "TxDeferredTransmit" }, 53 { 4, 0x2c, "TxLateCollision" }, 54 { 4, 0x30, "TxExcessiveCollision" }, 55 { 4, 0x38, "TxPausePkts" }, 56 { 8, 0x44, "RxOctets" }, 57 { 4, 0x4c, "RxUndersizePkts" }, 58 { 4, 0x50, "RxPausePkts" }, 59 { 4, 0x54, "Pkts64Octets" }, 60 { 4, 0x58, "Pkts65to127Octets" }, 61 { 4, 0x5c, "Pkts128to255Octets" }, 62 { 4, 0x60, "Pkts256to511Octets" }, 63 { 4, 0x64, "Pkts512to1023Octets" }, 64 { 4, 0x68, "Pkts1024to1522Octets" }, 65 { 4, 0x6c, "RxOversizePkts" }, 66 { 4, 0x70, "RxJabbers" }, 67 { 4, 0x74, "RxAlignmentErrors" }, 68 { 4, 0x78, "RxFCSErrors" }, 69 { 8, 0x7c, "RxGoodOctets" }, 70 { 4, 0x84, "RxDropPkts" }, 71 { 4, 0x88, "RxUnicastPkts" }, 72 { 4, 0x8c, "RxMulticastPkts" }, 73 { 4, 0x90, "RxBroadcastPkts" }, 74 { 4, 0x94, "RxSAChanges" }, 75 { 4, 0x98, "RxFragments" }, 76 }; 77 78 #define B53_MIBS_65_SIZE ARRAY_SIZE(b53_mibs_65) 79 80 /* BCM63xx MIB counters */ 81 static const struct b53_mib_desc b53_mibs_63xx[] = { 82 { 8, 0x00, "TxOctets" }, 83 { 4, 0x08, "TxDropPkts" }, 84 { 4, 0x0c, "TxQoSPkts" }, 85 { 4, 0x10, "TxBroadcastPkts" }, 86 { 4, 0x14, "TxMulticastPkts" }, 87 { 4, 0x18, "TxUnicastPkts" }, 88 { 4, 0x1c, "TxCollisions" }, 89 { 4, 0x20, "TxSingleCollision" }, 90 { 4, 0x24, "TxMultipleCollision" }, 91 { 4, 0x28, "TxDeferredTransmit" }, 92 { 4, 0x2c, "TxLateCollision" }, 93 { 4, 0x30, "TxExcessiveCollision" }, 94 { 4, 0x38, "TxPausePkts" }, 95 { 8, 0x3c, "TxQoSOctets" }, 96 { 8, 0x44, "RxOctets" }, 97 { 4, 0x4c, "RxUndersizePkts" }, 98 { 4, 0x50, "RxPausePkts" }, 99 { 4, 0x54, "Pkts64Octets" }, 100 { 4, 0x58, "Pkts65to127Octets" }, 101 { 4, 0x5c, "Pkts128to255Octets" }, 102 { 4, 0x60, "Pkts256to511Octets" }, 103 { 4, 0x64, "Pkts512to1023Octets" }, 104 { 4, 0x68, "Pkts1024to1522Octets" }, 105 { 4, 0x6c, "RxOversizePkts" }, 106 { 4, 0x70, "RxJabbers" }, 107 { 4, 0x74, "RxAlignmentErrors" }, 108 { 4, 0x78, "RxFCSErrors" }, 109 { 8, 0x7c, "RxGoodOctets" }, 110 { 4, 0x84, "RxDropPkts" }, 111 { 4, 0x88, "RxUnicastPkts" }, 112 { 4, 0x8c, "RxMulticastPkts" }, 113 { 4, 0x90, "RxBroadcastPkts" }, 114 { 4, 0x94, "RxSAChanges" }, 115 { 4, 0x98, "RxFragments" }, 116 { 4, 0xa0, "RxSymbolErrors" }, 117 { 4, 0xa4, "RxQoSPkts" }, 118 { 8, 0xa8, "RxQoSOctets" }, 119 { 4, 0xb0, "Pkts1523to2047Octets" }, 120 { 4, 0xb4, "Pkts2048to4095Octets" }, 121 { 4, 0xb8, "Pkts4096to8191Octets" }, 122 { 4, 0xbc, "Pkts8192to9728Octets" }, 123 { 4, 0xc0, "RxDiscarded" }, 124 }; 125 126 #define B53_MIBS_63XX_SIZE ARRAY_SIZE(b53_mibs_63xx) 127 128 /* MIB counters */ 129 static const struct b53_mib_desc b53_mibs[] = { 130 { 8, 0x00, "TxOctets" }, 131 { 4, 0x08, "TxDropPkts" }, 132 { 4, 0x10, "TxBroadcastPkts" }, 133 { 4, 0x14, "TxMulticastPkts" }, 134 { 4, 0x18, "TxUnicastPkts" }, 135 { 4, 0x1c, "TxCollisions" }, 136 { 4, 0x20, "TxSingleCollision" }, 137 { 4, 0x24, "TxMultipleCollision" }, 138 { 4, 0x28, "TxDeferredTransmit" }, 139 { 4, 0x2c, "TxLateCollision" }, 140 { 4, 0x30, "TxExcessiveCollision" }, 141 { 4, 0x38, "TxPausePkts" }, 142 { 8, 0x50, "RxOctets" }, 143 { 4, 0x58, "RxUndersizePkts" }, 144 { 4, 0x5c, "RxPausePkts" }, 145 { 4, 0x60, "Pkts64Octets" }, 146 { 4, 0x64, "Pkts65to127Octets" }, 147 { 4, 0x68, "Pkts128to255Octets" }, 148 { 4, 0x6c, "Pkts256to511Octets" }, 149 { 4, 0x70, "Pkts512to1023Octets" }, 150 { 4, 0x74, "Pkts1024to1522Octets" }, 151 { 4, 0x78, "RxOversizePkts" }, 152 { 4, 0x7c, "RxJabbers" }, 153 { 4, 0x80, "RxAlignmentErrors" }, 154 { 4, 0x84, "RxFCSErrors" }, 155 { 8, 0x88, "RxGoodOctets" }, 156 { 4, 0x90, "RxDropPkts" }, 157 { 4, 0x94, "RxUnicastPkts" }, 158 { 4, 0x98, "RxMulticastPkts" }, 159 { 4, 0x9c, "RxBroadcastPkts" }, 160 { 4, 0xa0, "RxSAChanges" }, 161 { 4, 0xa4, "RxFragments" }, 162 { 4, 0xa8, "RxJumboPkts" }, 163 { 4, 0xac, "RxSymbolErrors" }, 164 { 4, 0xc0, "RxDiscarded" }, 165 }; 166 167 #define B53_MIBS_SIZE ARRAY_SIZE(b53_mibs) 168 169 static const struct b53_mib_desc b53_mibs_58xx[] = { 170 { 8, 0x00, "TxOctets" }, 171 { 4, 0x08, "TxDropPkts" }, 172 { 4, 0x0c, "TxQPKTQ0" }, 173 { 4, 0x10, "TxBroadcastPkts" }, 174 { 4, 0x14, "TxMulticastPkts" }, 175 { 4, 0x18, "TxUnicastPKts" }, 176 { 4, 0x1c, "TxCollisions" }, 177 { 4, 0x20, "TxSingleCollision" }, 178 { 4, 0x24, "TxMultipleCollision" }, 179 { 4, 0x28, "TxDeferredCollision" }, 180 { 4, 0x2c, "TxLateCollision" }, 181 { 4, 0x30, "TxExcessiveCollision" }, 182 { 4, 0x34, "TxFrameInDisc" }, 183 { 4, 0x38, "TxPausePkts" }, 184 { 4, 0x3c, "TxQPKTQ1" }, 185 { 4, 0x40, "TxQPKTQ2" }, 186 { 4, 0x44, "TxQPKTQ3" }, 187 { 4, 0x48, "TxQPKTQ4" }, 188 { 4, 0x4c, "TxQPKTQ5" }, 189 { 8, 0x50, "RxOctets" }, 190 { 4, 0x58, "RxUndersizePkts" }, 191 { 4, 0x5c, "RxPausePkts" }, 192 { 4, 0x60, "RxPkts64Octets" }, 193 { 4, 0x64, "RxPkts65to127Octets" }, 194 { 4, 0x68, "RxPkts128to255Octets" }, 195 { 4, 0x6c, "RxPkts256to511Octets" }, 196 { 4, 0x70, "RxPkts512to1023Octets" }, 197 { 4, 0x74, "RxPkts1024toMaxPktsOctets" }, 198 { 4, 0x78, "RxOversizePkts" }, 199 { 4, 0x7c, "RxJabbers" }, 200 { 4, 0x80, "RxAlignmentErrors" }, 201 { 4, 0x84, "RxFCSErrors" }, 202 { 8, 0x88, "RxGoodOctets" }, 203 { 4, 0x90, "RxDropPkts" }, 204 { 4, 0x94, "RxUnicastPkts" }, 205 { 4, 0x98, "RxMulticastPkts" }, 206 { 4, 0x9c, "RxBroadcastPkts" }, 207 { 4, 0xa0, "RxSAChanges" }, 208 { 4, 0xa4, "RxFragments" }, 209 { 4, 0xa8, "RxJumboPkt" }, 210 { 4, 0xac, "RxSymblErr" }, 211 { 4, 0xb0, "InRangeErrCount" }, 212 { 4, 0xb4, "OutRangeErrCount" }, 213 { 4, 0xb8, "EEELpiEvent" }, 214 { 4, 0xbc, "EEELpiDuration" }, 215 { 4, 0xc0, "RxDiscard" }, 216 { 4, 0xc8, "TxQPKTQ6" }, 217 { 4, 0xcc, "TxQPKTQ7" }, 218 { 4, 0xd0, "TxPkts64Octets" }, 219 { 4, 0xd4, "TxPkts65to127Octets" }, 220 { 4, 0xd8, "TxPkts128to255Octets" }, 221 { 4, 0xdc, "TxPkts256to511Ocets" }, 222 { 4, 0xe0, "TxPkts512to1023Ocets" }, 223 { 4, 0xe4, "TxPkts1024toMaxPktOcets" }, 224 }; 225 226 #define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx) 227 228 #define B53_MAX_MTU_25 (1536 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) 229 #define B53_MAX_MTU (9720 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) 230 231 static int b53_do_vlan_op(struct b53_device *dev, u8 op) 232 { 233 unsigned int i; 234 235 b53_write8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], VTA_START_CMD | op); 236 237 for (i = 0; i < 10; i++) { 238 u8 vta; 239 240 b53_read8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], &vta); 241 if (!(vta & VTA_START_CMD)) 242 return 0; 243 244 usleep_range(100, 200); 245 } 246 247 return -EIO; 248 } 249 250 static void b53_set_vlan_entry(struct b53_device *dev, u16 vid, 251 struct b53_vlan *vlan) 252 { 253 if (is5325(dev)) { 254 u32 entry = 0; 255 256 if (vlan->members) { 257 entry = ((vlan->untag & VA_UNTAG_MASK_25) << 258 VA_UNTAG_S_25) | vlan->members; 259 if (dev->core_rev >= 3) 260 entry |= VA_VALID_25_R4 | vid << VA_VID_HIGH_S; 261 else 262 entry |= VA_VALID_25; 263 } 264 265 b53_write32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, entry); 266 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid | 267 VTA_RW_STATE_WR | VTA_RW_OP_EN); 268 } else if (is5365(dev)) { 269 u16 entry = 0; 270 271 if (vlan->members) 272 entry = ((vlan->untag & VA_UNTAG_MASK_65) << 273 VA_UNTAG_S_65) | vlan->members | VA_VALID_65; 274 275 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, entry); 276 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid | 277 VTA_RW_STATE_WR | VTA_RW_OP_EN); 278 } else { 279 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid); 280 b53_write32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], 281 (vlan->untag << VTE_UNTAG_S) | vlan->members); 282 283 b53_do_vlan_op(dev, VTA_CMD_WRITE); 284 } 285 286 dev_dbg(dev->ds->dev, "VID: %d, members: 0x%04x, untag: 0x%04x\n", 287 vid, vlan->members, vlan->untag); 288 } 289 290 static void b53_get_vlan_entry(struct b53_device *dev, u16 vid, 291 struct b53_vlan *vlan) 292 { 293 if (is5325(dev)) { 294 u32 entry = 0; 295 296 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid | 297 VTA_RW_STATE_RD | VTA_RW_OP_EN); 298 b53_read32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, &entry); 299 300 if (dev->core_rev >= 3) 301 vlan->valid = !!(entry & VA_VALID_25_R4); 302 else 303 vlan->valid = !!(entry & VA_VALID_25); 304 vlan->members = entry & VA_MEMBER_MASK; 305 vlan->untag = (entry >> VA_UNTAG_S_25) & VA_UNTAG_MASK_25; 306 307 } else if (is5365(dev)) { 308 u16 entry = 0; 309 310 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid | 311 VTA_RW_STATE_WR | VTA_RW_OP_EN); 312 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, &entry); 313 314 vlan->valid = !!(entry & VA_VALID_65); 315 vlan->members = entry & VA_MEMBER_MASK; 316 vlan->untag = (entry >> VA_UNTAG_S_65) & VA_UNTAG_MASK_65; 317 } else { 318 u32 entry = 0; 319 320 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid); 321 b53_do_vlan_op(dev, VTA_CMD_READ); 322 b53_read32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], &entry); 323 vlan->members = entry & VTE_MEMBERS; 324 vlan->untag = (entry >> VTE_UNTAG_S) & VTE_MEMBERS; 325 vlan->valid = true; 326 } 327 } 328 329 static void b53_set_eap_mode(struct b53_device *dev, int port, int mode) 330 { 331 u64 eap_conf; 332 333 if (is5325(dev) || is5365(dev) || dev->chip_id == BCM5389_DEVICE_ID) 334 return; 335 336 b53_read64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), &eap_conf); 337 338 if (is63xx(dev)) { 339 eap_conf &= ~EAP_MODE_MASK_63XX; 340 eap_conf |= (u64)mode << EAP_MODE_SHIFT_63XX; 341 } else { 342 eap_conf &= ~EAP_MODE_MASK; 343 eap_conf |= (u64)mode << EAP_MODE_SHIFT; 344 } 345 346 b53_write64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), eap_conf); 347 } 348 349 static void b53_set_forwarding(struct b53_device *dev, int enable) 350 { 351 u8 mgmt; 352 353 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 354 355 if (enable) 356 mgmt |= SM_SW_FWD_EN; 357 else 358 mgmt &= ~SM_SW_FWD_EN; 359 360 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 361 362 /* Include IMP port in dumb forwarding mode 363 */ 364 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt); 365 mgmt |= B53_MII_DUMB_FWDG_EN; 366 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); 367 368 /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether 369 * frames should be flooded or not. 370 */ 371 b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); 372 mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN; 373 b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); 374 } 375 376 static void b53_enable_vlan(struct b53_device *dev, int port, bool enable, 377 bool enable_filtering) 378 { 379 u8 mgmt, vc0, vc1, vc4 = 0, vc5; 380 381 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 382 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, &vc0); 383 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, &vc1); 384 385 if (is5325(dev) || is5365(dev)) { 386 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4); 387 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, &vc5); 388 } else if (is63xx(dev)) { 389 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, &vc4); 390 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, &vc5); 391 } else { 392 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, &vc4); 393 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5); 394 } 395 396 vc1 &= ~VC1_RX_MCST_FWD_EN; 397 398 if (enable) { 399 vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; 400 vc1 |= VC1_RX_MCST_UNTAG_EN; 401 vc4 &= ~VC4_ING_VID_CHECK_MASK; 402 if (enable_filtering) { 403 vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; 404 vc5 |= VC5_DROP_VTABLE_MISS; 405 } else { 406 vc4 |= VC4_NO_ING_VID_CHK << VC4_ING_VID_CHECK_S; 407 vc5 &= ~VC5_DROP_VTABLE_MISS; 408 } 409 410 if (is5325(dev)) 411 vc0 &= ~VC0_RESERVED_1; 412 413 if (is5325(dev) || is5365(dev)) 414 vc1 |= VC1_RX_MCST_TAG_EN; 415 416 } else { 417 vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID); 418 vc1 &= ~VC1_RX_MCST_UNTAG_EN; 419 vc4 &= ~VC4_ING_VID_CHECK_MASK; 420 vc5 &= ~VC5_DROP_VTABLE_MISS; 421 422 if (is5325(dev) || is5365(dev)) 423 vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S; 424 else 425 vc4 |= VC4_ING_VID_VIO_TO_IMP << VC4_ING_VID_CHECK_S; 426 427 if (is5325(dev) || is5365(dev)) 428 vc1 &= ~VC1_RX_MCST_TAG_EN; 429 } 430 431 if (!is5325(dev) && !is5365(dev)) 432 vc5 &= ~VC5_VID_FFF_EN; 433 434 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, vc0); 435 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, vc1); 436 437 if (is5325(dev) || is5365(dev)) { 438 /* enable the high 8 bit vid check on 5325 */ 439 if (is5325(dev) && enable) 440 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 441 VC3_HIGH_8BIT_EN); 442 else 443 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0); 444 445 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, vc4); 446 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, vc5); 447 } else if (is63xx(dev)) { 448 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3_63XX, 0); 449 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, vc4); 450 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, vc5); 451 } else { 452 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0); 453 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, vc4); 454 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, vc5); 455 } 456 457 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 458 459 dev->vlan_enabled = enable; 460 461 dev_dbg(dev->dev, "Port %d VLAN enabled: %d, filtering: %d\n", 462 port, enable, enable_filtering); 463 } 464 465 static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) 466 { 467 u32 port_mask = 0; 468 u16 max_size = JMS_MIN_SIZE; 469 470 if (is5325(dev) || is5365(dev)) 471 return -EINVAL; 472 473 if (enable) { 474 port_mask = dev->enabled_ports; 475 max_size = JMS_MAX_SIZE; 476 if (allow_10_100) 477 port_mask |= JPM_10_100_JUMBO_EN; 478 } 479 480 b53_write32(dev, B53_JUMBO_PAGE, dev->jumbo_pm_reg, port_mask); 481 return b53_write16(dev, B53_JUMBO_PAGE, dev->jumbo_size_reg, max_size); 482 } 483 484 static int b53_flush_arl(struct b53_device *dev, u8 mask) 485 { 486 unsigned int i; 487 488 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, 489 FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask); 490 491 for (i = 0; i < 10; i++) { 492 u8 fast_age_ctrl; 493 494 b53_read8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, 495 &fast_age_ctrl); 496 497 if (!(fast_age_ctrl & FAST_AGE_DONE)) 498 goto out; 499 500 msleep(1); 501 } 502 503 return -ETIMEDOUT; 504 out: 505 /* Only age dynamic entries (default behavior) */ 506 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, FAST_AGE_DYNAMIC); 507 return 0; 508 } 509 510 static int b53_fast_age_port(struct b53_device *dev, int port) 511 { 512 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port); 513 514 return b53_flush_arl(dev, FAST_AGE_PORT); 515 } 516 517 static int b53_fast_age_vlan(struct b53_device *dev, u16 vid) 518 { 519 b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid); 520 521 return b53_flush_arl(dev, FAST_AGE_VLAN); 522 } 523 524 void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) 525 { 526 struct b53_device *dev = ds->priv; 527 unsigned int i; 528 u16 pvlan; 529 530 /* Enable the IMP port to be in the same VLAN as the other ports 531 * on a per-port basis such that we only have Port i and IMP in 532 * the same VLAN. 533 */ 534 b53_for_each_port(dev, i) { 535 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &pvlan); 536 pvlan |= BIT(cpu_port); 537 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan); 538 } 539 } 540 EXPORT_SYMBOL(b53_imp_vlan_setup); 541 542 static void b53_port_set_ucast_flood(struct b53_device *dev, int port, 543 bool unicast) 544 { 545 u16 uc; 546 547 b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc); 548 if (unicast) 549 uc |= BIT(port); 550 else 551 uc &= ~BIT(port); 552 b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc); 553 } 554 555 static void b53_port_set_mcast_flood(struct b53_device *dev, int port, 556 bool multicast) 557 { 558 u16 mc; 559 560 b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc); 561 if (multicast) 562 mc |= BIT(port); 563 else 564 mc &= ~BIT(port); 565 b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc); 566 567 b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc); 568 if (multicast) 569 mc |= BIT(port); 570 else 571 mc &= ~BIT(port); 572 b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc); 573 } 574 575 static void b53_port_set_learning(struct b53_device *dev, int port, 576 bool learning) 577 { 578 u16 reg; 579 580 b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, ®); 581 if (learning) 582 reg &= ~BIT(port); 583 else 584 reg |= BIT(port); 585 b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg); 586 } 587 588 static void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable) 589 { 590 struct b53_device *dev = ds->priv; 591 u16 reg; 592 593 b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, ®); 594 if (enable) 595 reg |= BIT(port); 596 else 597 reg &= ~BIT(port); 598 b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg); 599 } 600 601 int b53_setup_port(struct dsa_switch *ds, int port) 602 { 603 struct b53_device *dev = ds->priv; 604 605 b53_port_set_ucast_flood(dev, port, true); 606 b53_port_set_mcast_flood(dev, port, true); 607 b53_port_set_learning(dev, port, false); 608 609 /* Force all traffic to go to the CPU port to prevent the ASIC from 610 * trying to forward to bridged ports on matching FDB entries, then 611 * dropping frames because it isn't allowed to forward there. 612 */ 613 if (dsa_is_user_port(ds, port)) 614 b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED); 615 616 return 0; 617 } 618 EXPORT_SYMBOL(b53_setup_port); 619 620 int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) 621 { 622 struct b53_device *dev = ds->priv; 623 unsigned int cpu_port; 624 int ret = 0; 625 u16 pvlan; 626 627 if (!dsa_is_user_port(ds, port)) 628 return 0; 629 630 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 631 632 if (dev->ops->irq_enable) 633 ret = dev->ops->irq_enable(dev, port); 634 if (ret) 635 return ret; 636 637 /* Clear the Rx and Tx disable bits and set to no spanning tree */ 638 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0); 639 640 /* Set this port, and only this one to be in the default VLAN, 641 * if member of a bridge, restore its membership prior to 642 * bringing down this port. 643 */ 644 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 645 pvlan &= ~0x1ff; 646 pvlan |= BIT(port); 647 pvlan |= dev->ports[port].vlan_ctl_mask; 648 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 649 650 b53_imp_vlan_setup(ds, cpu_port); 651 652 /* If EEE was enabled, restore it */ 653 if (dev->ports[port].eee.eee_enabled) 654 b53_eee_enable_set(ds, port, true); 655 656 return 0; 657 } 658 EXPORT_SYMBOL(b53_enable_port); 659 660 void b53_disable_port(struct dsa_switch *ds, int port) 661 { 662 struct b53_device *dev = ds->priv; 663 u8 reg; 664 665 /* Disable Tx/Rx for the port */ 666 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); 667 reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE; 668 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); 669 670 if (dev->ops->irq_disable) 671 dev->ops->irq_disable(dev, port); 672 } 673 EXPORT_SYMBOL(b53_disable_port); 674 675 void b53_brcm_hdr_setup(struct dsa_switch *ds, int port) 676 { 677 struct b53_device *dev = ds->priv; 678 bool tag_en = !(dev->tag_protocol == DSA_TAG_PROTO_NONE); 679 u8 hdr_ctl, val; 680 u16 reg; 681 682 /* Resolve which bit controls the Broadcom tag */ 683 switch (port) { 684 case 8: 685 val = BRCM_HDR_P8_EN; 686 break; 687 case 7: 688 val = BRCM_HDR_P7_EN; 689 break; 690 case 5: 691 val = BRCM_HDR_P5_EN; 692 break; 693 default: 694 val = 0; 695 break; 696 } 697 698 /* Enable management mode if tagging is requested */ 699 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &hdr_ctl); 700 if (tag_en) 701 hdr_ctl |= SM_SW_FWD_MODE; 702 else 703 hdr_ctl &= ~SM_SW_FWD_MODE; 704 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, hdr_ctl); 705 706 /* Configure the appropriate IMP port */ 707 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &hdr_ctl); 708 if (port == 8) 709 hdr_ctl |= GC_FRM_MGMT_PORT_MII; 710 else if (port == 5) 711 hdr_ctl |= GC_FRM_MGMT_PORT_M; 712 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, hdr_ctl); 713 714 /* Enable Broadcom tags for IMP port */ 715 b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl); 716 if (tag_en) 717 hdr_ctl |= val; 718 else 719 hdr_ctl &= ~val; 720 b53_write8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, hdr_ctl); 721 722 /* Registers below are only accessible on newer devices */ 723 if (!is58xx(dev)) 724 return; 725 726 /* Enable reception Broadcom tag for CPU TX (switch RX) to 727 * allow us to tag outgoing frames 728 */ 729 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, ®); 730 if (tag_en) 731 reg &= ~BIT(port); 732 else 733 reg |= BIT(port); 734 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, reg); 735 736 /* Enable transmission of Broadcom tags from the switch (CPU RX) to 737 * allow delivering frames to the per-port net_devices 738 */ 739 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, ®); 740 if (tag_en) 741 reg &= ~BIT(port); 742 else 743 reg |= BIT(port); 744 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, reg); 745 } 746 EXPORT_SYMBOL(b53_brcm_hdr_setup); 747 748 static void b53_enable_cpu_port(struct b53_device *dev, int port) 749 { 750 u8 port_ctrl; 751 752 /* BCM5325 CPU port is at 8 */ 753 if ((is5325(dev) || is5365(dev)) && port == B53_CPU_PORT_25) 754 port = B53_CPU_PORT; 755 756 port_ctrl = PORT_CTRL_RX_BCST_EN | 757 PORT_CTRL_RX_MCST_EN | 758 PORT_CTRL_RX_UCST_EN; 759 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl); 760 761 b53_brcm_hdr_setup(dev->ds, port); 762 } 763 764 static void b53_enable_mib(struct b53_device *dev) 765 { 766 u8 gc; 767 768 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); 769 gc &= ~(GC_RESET_MIB | GC_MIB_AC_EN); 770 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); 771 } 772 773 static void b53_enable_stp(struct b53_device *dev) 774 { 775 u8 gc; 776 777 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); 778 gc |= GC_RX_BPDU_EN; 779 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); 780 } 781 782 static u16 b53_default_pvid(struct b53_device *dev) 783 { 784 if (is5325(dev) || is5365(dev)) 785 return 1; 786 else 787 return 0; 788 } 789 790 static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port) 791 { 792 struct b53_device *dev = ds->priv; 793 794 return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port); 795 } 796 797 static bool b53_vlan_port_may_join_untagged(struct dsa_switch *ds, int port) 798 { 799 struct b53_device *dev = ds->priv; 800 struct dsa_port *dp; 801 802 if (!dev->vlan_filtering) 803 return true; 804 805 dp = dsa_to_port(ds, port); 806 807 if (dsa_port_is_cpu(dp)) 808 return true; 809 810 return dp->bridge == NULL; 811 } 812 813 int b53_configure_vlan(struct dsa_switch *ds) 814 { 815 struct b53_device *dev = ds->priv; 816 struct b53_vlan vl = { 0 }; 817 struct b53_vlan *v; 818 int i, def_vid; 819 u16 vid; 820 821 def_vid = b53_default_pvid(dev); 822 823 /* clear all vlan entries */ 824 if (is5325(dev) || is5365(dev)) { 825 for (i = def_vid; i < dev->num_vlans; i++) 826 b53_set_vlan_entry(dev, i, &vl); 827 } else { 828 b53_do_vlan_op(dev, VTA_CMD_CLEAR); 829 } 830 831 b53_enable_vlan(dev, -1, dev->vlan_enabled, dev->vlan_filtering); 832 833 /* Create an untagged VLAN entry for the default PVID in case 834 * CONFIG_VLAN_8021Q is disabled and there are no calls to 835 * dsa_user_vlan_rx_add_vid() to create the default VLAN 836 * entry. Do this only when the tagging protocol is not 837 * DSA_TAG_PROTO_NONE 838 */ 839 v = &dev->vlans[def_vid]; 840 b53_for_each_port(dev, i) { 841 if (!b53_vlan_port_may_join_untagged(ds, i)) 842 continue; 843 844 vl.members |= BIT(i); 845 if (!b53_vlan_port_needs_forced_tagged(ds, i)) 846 vl.untag = vl.members; 847 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(i), 848 def_vid); 849 } 850 b53_set_vlan_entry(dev, def_vid, &vl); 851 852 if (dev->vlan_filtering) { 853 /* Upon initial call we have not set-up any VLANs, but upon 854 * system resume, we need to restore all VLAN entries. 855 */ 856 for (vid = def_vid + 1; vid < dev->num_vlans; vid++) { 857 v = &dev->vlans[vid]; 858 859 if (!v->members) 860 continue; 861 862 b53_set_vlan_entry(dev, vid, v); 863 b53_fast_age_vlan(dev, vid); 864 } 865 866 b53_for_each_port(dev, i) { 867 if (!dsa_is_cpu_port(ds, i)) 868 b53_write16(dev, B53_VLAN_PAGE, 869 B53_VLAN_PORT_DEF_TAG(i), 870 dev->ports[i].pvid); 871 } 872 } 873 874 return 0; 875 } 876 EXPORT_SYMBOL(b53_configure_vlan); 877 878 static void b53_switch_reset_gpio(struct b53_device *dev) 879 { 880 int gpio = dev->reset_gpio; 881 882 if (gpio < 0) 883 return; 884 885 /* Reset sequence: RESET low(50ms)->high(20ms) 886 */ 887 gpio_set_value(gpio, 0); 888 mdelay(50); 889 890 gpio_set_value(gpio, 1); 891 mdelay(20); 892 893 dev->current_page = 0xff; 894 } 895 896 static int b53_switch_reset(struct b53_device *dev) 897 { 898 unsigned int timeout = 1000; 899 u8 mgmt, reg; 900 901 b53_switch_reset_gpio(dev); 902 903 if (is539x(dev)) { 904 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83); 905 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00); 906 } 907 908 /* This is specific to 58xx devices here, do not use is58xx() which 909 * covers the larger Starfigther 2 family, including 7445/7278 which 910 * still use this driver as a library and need to perform the reset 911 * earlier. 912 */ 913 if (dev->chip_id == BCM58XX_DEVICE_ID || 914 dev->chip_id == BCM583XX_DEVICE_ID) { 915 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®); 916 reg |= SW_RST | EN_SW_RST | EN_CH_RST; 917 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg); 918 919 do { 920 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®); 921 if (!(reg & SW_RST)) 922 break; 923 924 usleep_range(1000, 2000); 925 } while (timeout-- > 0); 926 927 if (timeout == 0) { 928 dev_err(dev->dev, 929 "Timeout waiting for SW_RST to clear!\n"); 930 return -ETIMEDOUT; 931 } 932 } 933 934 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 935 936 if (!(mgmt & SM_SW_FWD_EN)) { 937 mgmt &= ~SM_SW_FWD_MODE; 938 mgmt |= SM_SW_FWD_EN; 939 940 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 941 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 942 943 if (!(mgmt & SM_SW_FWD_EN)) { 944 dev_err(dev->dev, "Failed to enable switch!\n"); 945 return -EINVAL; 946 } 947 } 948 949 b53_enable_mib(dev); 950 b53_enable_stp(dev); 951 952 return b53_flush_arl(dev, FAST_AGE_STATIC); 953 } 954 955 static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg) 956 { 957 struct b53_device *priv = ds->priv; 958 u16 value = 0; 959 int ret; 960 961 if (priv->ops->phy_read16) 962 ret = priv->ops->phy_read16(priv, addr, reg, &value); 963 else 964 ret = b53_read16(priv, B53_PORT_MII_PAGE(addr), 965 reg * 2, &value); 966 967 return ret ? ret : value; 968 } 969 970 static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val) 971 { 972 struct b53_device *priv = ds->priv; 973 974 if (priv->ops->phy_write16) 975 return priv->ops->phy_write16(priv, addr, reg, val); 976 977 return b53_write16(priv, B53_PORT_MII_PAGE(addr), reg * 2, val); 978 } 979 980 static int b53_reset_switch(struct b53_device *priv) 981 { 982 /* reset vlans */ 983 memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans); 984 memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports); 985 986 priv->serdes_lane = B53_INVALID_LANE; 987 988 return b53_switch_reset(priv); 989 } 990 991 static int b53_apply_config(struct b53_device *priv) 992 { 993 /* disable switching */ 994 b53_set_forwarding(priv, 0); 995 996 b53_configure_vlan(priv->ds); 997 998 /* enable switching */ 999 b53_set_forwarding(priv, 1); 1000 1001 return 0; 1002 } 1003 1004 static void b53_reset_mib(struct b53_device *priv) 1005 { 1006 u8 gc; 1007 1008 b53_read8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); 1009 1010 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc | GC_RESET_MIB); 1011 msleep(1); 1012 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc & ~GC_RESET_MIB); 1013 msleep(1); 1014 } 1015 1016 static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev) 1017 { 1018 if (is5365(dev)) 1019 return b53_mibs_65; 1020 else if (is63xx(dev)) 1021 return b53_mibs_63xx; 1022 else if (is58xx(dev)) 1023 return b53_mibs_58xx; 1024 else 1025 return b53_mibs; 1026 } 1027 1028 static unsigned int b53_get_mib_size(struct b53_device *dev) 1029 { 1030 if (is5365(dev)) 1031 return B53_MIBS_65_SIZE; 1032 else if (is63xx(dev)) 1033 return B53_MIBS_63XX_SIZE; 1034 else if (is58xx(dev)) 1035 return B53_MIBS_58XX_SIZE; 1036 else 1037 return B53_MIBS_SIZE; 1038 } 1039 1040 static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port) 1041 { 1042 /* These ports typically do not have built-in PHYs */ 1043 switch (port) { 1044 case B53_CPU_PORT_25: 1045 case 7: 1046 case B53_CPU_PORT: 1047 return NULL; 1048 } 1049 1050 return mdiobus_get_phy(ds->user_mii_bus, port); 1051 } 1052 1053 void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset, 1054 uint8_t *data) 1055 { 1056 struct b53_device *dev = ds->priv; 1057 const struct b53_mib_desc *mibs = b53_get_mib(dev); 1058 unsigned int mib_size = b53_get_mib_size(dev); 1059 struct phy_device *phydev; 1060 unsigned int i; 1061 1062 if (stringset == ETH_SS_STATS) { 1063 for (i = 0; i < mib_size; i++) 1064 ethtool_puts(&data, mibs[i].name); 1065 } else if (stringset == ETH_SS_PHY_STATS) { 1066 phydev = b53_get_phy_device(ds, port); 1067 if (!phydev) 1068 return; 1069 1070 phy_ethtool_get_strings(phydev, data); 1071 } 1072 } 1073 EXPORT_SYMBOL(b53_get_strings); 1074 1075 void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) 1076 { 1077 struct b53_device *dev = ds->priv; 1078 const struct b53_mib_desc *mibs = b53_get_mib(dev); 1079 unsigned int mib_size = b53_get_mib_size(dev); 1080 const struct b53_mib_desc *s; 1081 unsigned int i; 1082 u64 val = 0; 1083 1084 if (is5365(dev) && port == 5) 1085 port = 8; 1086 1087 mutex_lock(&dev->stats_mutex); 1088 1089 for (i = 0; i < mib_size; i++) { 1090 s = &mibs[i]; 1091 1092 if (s->size == 8) { 1093 b53_read64(dev, B53_MIB_PAGE(port), s->offset, &val); 1094 } else { 1095 u32 val32; 1096 1097 b53_read32(dev, B53_MIB_PAGE(port), s->offset, 1098 &val32); 1099 val = val32; 1100 } 1101 data[i] = (u64)val; 1102 } 1103 1104 mutex_unlock(&dev->stats_mutex); 1105 } 1106 EXPORT_SYMBOL(b53_get_ethtool_stats); 1107 1108 void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data) 1109 { 1110 struct phy_device *phydev; 1111 1112 phydev = b53_get_phy_device(ds, port); 1113 if (!phydev) 1114 return; 1115 1116 phy_ethtool_get_stats(phydev, NULL, data); 1117 } 1118 EXPORT_SYMBOL(b53_get_ethtool_phy_stats); 1119 1120 int b53_get_sset_count(struct dsa_switch *ds, int port, int sset) 1121 { 1122 struct b53_device *dev = ds->priv; 1123 struct phy_device *phydev; 1124 1125 if (sset == ETH_SS_STATS) { 1126 return b53_get_mib_size(dev); 1127 } else if (sset == ETH_SS_PHY_STATS) { 1128 phydev = b53_get_phy_device(ds, port); 1129 if (!phydev) 1130 return 0; 1131 1132 return phy_ethtool_get_sset_count(phydev); 1133 } 1134 1135 return 0; 1136 } 1137 EXPORT_SYMBOL(b53_get_sset_count); 1138 1139 enum b53_devlink_resource_id { 1140 B53_DEVLINK_PARAM_ID_VLAN_TABLE, 1141 }; 1142 1143 static u64 b53_devlink_vlan_table_get(void *priv) 1144 { 1145 struct b53_device *dev = priv; 1146 struct b53_vlan *vl; 1147 unsigned int i; 1148 u64 count = 0; 1149 1150 for (i = 0; i < dev->num_vlans; i++) { 1151 vl = &dev->vlans[i]; 1152 if (vl->members) 1153 count++; 1154 } 1155 1156 return count; 1157 } 1158 1159 int b53_setup_devlink_resources(struct dsa_switch *ds) 1160 { 1161 struct devlink_resource_size_params size_params; 1162 struct b53_device *dev = ds->priv; 1163 int err; 1164 1165 devlink_resource_size_params_init(&size_params, dev->num_vlans, 1166 dev->num_vlans, 1167 1, DEVLINK_RESOURCE_UNIT_ENTRY); 1168 1169 err = dsa_devlink_resource_register(ds, "VLAN", dev->num_vlans, 1170 B53_DEVLINK_PARAM_ID_VLAN_TABLE, 1171 DEVLINK_RESOURCE_ID_PARENT_TOP, 1172 &size_params); 1173 if (err) 1174 goto out; 1175 1176 dsa_devlink_resource_occ_get_register(ds, 1177 B53_DEVLINK_PARAM_ID_VLAN_TABLE, 1178 b53_devlink_vlan_table_get, dev); 1179 1180 return 0; 1181 out: 1182 dsa_devlink_resources_unregister(ds); 1183 return err; 1184 } 1185 EXPORT_SYMBOL(b53_setup_devlink_resources); 1186 1187 static int b53_setup(struct dsa_switch *ds) 1188 { 1189 struct b53_device *dev = ds->priv; 1190 struct b53_vlan *vl; 1191 unsigned int port; 1192 u16 pvid; 1193 int ret; 1194 1195 /* Request bridge PVID untagged when DSA_TAG_PROTO_NONE is set 1196 * which forces the CPU port to be tagged in all VLANs. 1197 */ 1198 ds->untag_bridge_pvid = dev->tag_protocol == DSA_TAG_PROTO_NONE; 1199 1200 /* The switch does not tell us the original VLAN for untagged 1201 * packets, so keep the CPU port always tagged. 1202 */ 1203 ds->untag_vlan_aware_bridge_pvid = true; 1204 1205 ret = b53_reset_switch(dev); 1206 if (ret) { 1207 dev_err(ds->dev, "failed to reset switch\n"); 1208 return ret; 1209 } 1210 1211 /* setup default vlan for filtering mode */ 1212 pvid = b53_default_pvid(dev); 1213 vl = &dev->vlans[pvid]; 1214 b53_for_each_port(dev, port) { 1215 vl->members |= BIT(port); 1216 if (!b53_vlan_port_needs_forced_tagged(ds, port)) 1217 vl->untag |= BIT(port); 1218 } 1219 1220 b53_reset_mib(dev); 1221 1222 ret = b53_apply_config(dev); 1223 if (ret) { 1224 dev_err(ds->dev, "failed to apply configuration\n"); 1225 return ret; 1226 } 1227 1228 /* Configure IMP/CPU port, disable all other ports. Enabled 1229 * ports will be configured with .port_enable 1230 */ 1231 for (port = 0; port < dev->num_ports; port++) { 1232 if (dsa_is_cpu_port(ds, port)) 1233 b53_enable_cpu_port(dev, port); 1234 else 1235 b53_disable_port(ds, port); 1236 } 1237 1238 return b53_setup_devlink_resources(ds); 1239 } 1240 1241 static void b53_teardown(struct dsa_switch *ds) 1242 { 1243 dsa_devlink_resources_unregister(ds); 1244 } 1245 1246 static void b53_force_link(struct b53_device *dev, int port, int link) 1247 { 1248 u8 reg, val, off; 1249 1250 /* Override the port settings */ 1251 if (port == dev->imp_port) { 1252 off = B53_PORT_OVERRIDE_CTRL; 1253 val = PORT_OVERRIDE_EN; 1254 } else { 1255 off = B53_GMII_PORT_OVERRIDE_CTRL(port); 1256 val = GMII_PO_EN; 1257 } 1258 1259 b53_read8(dev, B53_CTRL_PAGE, off, ®); 1260 reg |= val; 1261 if (link) 1262 reg |= PORT_OVERRIDE_LINK; 1263 else 1264 reg &= ~PORT_OVERRIDE_LINK; 1265 b53_write8(dev, B53_CTRL_PAGE, off, reg); 1266 } 1267 1268 static void b53_force_port_config(struct b53_device *dev, int port, 1269 int speed, int duplex, 1270 bool tx_pause, bool rx_pause) 1271 { 1272 u8 reg, val, off; 1273 1274 /* Override the port settings */ 1275 if (port == dev->imp_port) { 1276 off = B53_PORT_OVERRIDE_CTRL; 1277 val = PORT_OVERRIDE_EN; 1278 } else { 1279 off = B53_GMII_PORT_OVERRIDE_CTRL(port); 1280 val = GMII_PO_EN; 1281 } 1282 1283 b53_read8(dev, B53_CTRL_PAGE, off, ®); 1284 reg |= val; 1285 if (duplex == DUPLEX_FULL) 1286 reg |= PORT_OVERRIDE_FULL_DUPLEX; 1287 else 1288 reg &= ~PORT_OVERRIDE_FULL_DUPLEX; 1289 1290 switch (speed) { 1291 case 2000: 1292 reg |= PORT_OVERRIDE_SPEED_2000M; 1293 fallthrough; 1294 case SPEED_1000: 1295 reg |= PORT_OVERRIDE_SPEED_1000M; 1296 break; 1297 case SPEED_100: 1298 reg |= PORT_OVERRIDE_SPEED_100M; 1299 break; 1300 case SPEED_10: 1301 reg |= PORT_OVERRIDE_SPEED_10M; 1302 break; 1303 default: 1304 dev_err(dev->dev, "unknown speed: %d\n", speed); 1305 return; 1306 } 1307 1308 if (rx_pause) 1309 reg |= PORT_OVERRIDE_RX_FLOW; 1310 if (tx_pause) 1311 reg |= PORT_OVERRIDE_TX_FLOW; 1312 1313 b53_write8(dev, B53_CTRL_PAGE, off, reg); 1314 } 1315 1316 static void b53_adjust_63xx_rgmii(struct dsa_switch *ds, int port, 1317 phy_interface_t interface) 1318 { 1319 struct b53_device *dev = ds->priv; 1320 u8 rgmii_ctrl = 0, off; 1321 1322 if (port == dev->imp_port) 1323 off = B53_RGMII_CTRL_IMP; 1324 else 1325 off = B53_RGMII_CTRL_P(port); 1326 1327 b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl); 1328 1329 switch (interface) { 1330 case PHY_INTERFACE_MODE_RGMII_ID: 1331 rgmii_ctrl |= (RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC); 1332 break; 1333 case PHY_INTERFACE_MODE_RGMII_RXID: 1334 rgmii_ctrl &= ~(RGMII_CTRL_DLL_TXC); 1335 rgmii_ctrl |= RGMII_CTRL_DLL_RXC; 1336 break; 1337 case PHY_INTERFACE_MODE_RGMII_TXID: 1338 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC); 1339 rgmii_ctrl |= RGMII_CTRL_DLL_TXC; 1340 break; 1341 case PHY_INTERFACE_MODE_RGMII: 1342 default: 1343 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC); 1344 break; 1345 } 1346 1347 if (port != dev->imp_port) { 1348 if (is63268(dev)) 1349 rgmii_ctrl |= RGMII_CTRL_MII_OVERRIDE; 1350 1351 rgmii_ctrl |= RGMII_CTRL_ENABLE_GMII; 1352 } 1353 1354 b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl); 1355 1356 dev_dbg(ds->dev, "Configured port %d for %s\n", port, 1357 phy_modes(interface)); 1358 } 1359 1360 static void b53_adjust_531x5_rgmii(struct dsa_switch *ds, int port, 1361 phy_interface_t interface) 1362 { 1363 struct b53_device *dev = ds->priv; 1364 u8 rgmii_ctrl = 0, off; 1365 1366 if (port == dev->imp_port) 1367 off = B53_RGMII_CTRL_IMP; 1368 else 1369 off = B53_RGMII_CTRL_P(port); 1370 1371 /* Configure the port RGMII clock delay by DLL disabled and 1372 * tx_clk aligned timing (restoring to reset defaults) 1373 */ 1374 b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl); 1375 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC | 1376 RGMII_CTRL_TIMING_SEL); 1377 1378 /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make 1379 * sure that we enable the port TX clock internal delay to 1380 * account for this internal delay that is inserted, otherwise 1381 * the switch won't be able to receive correctly. 1382 * 1383 * PHY_INTERFACE_MODE_RGMII means that we are not introducing 1384 * any delay neither on transmission nor reception, so the 1385 * BCM53125 must also be configured accordingly to account for 1386 * the lack of delay and introduce 1387 * 1388 * The BCM53125 switch has its RX clock and TX clock control 1389 * swapped, hence the reason why we modify the TX clock path in 1390 * the "RGMII" case 1391 */ 1392 if (interface == PHY_INTERFACE_MODE_RGMII_TXID) 1393 rgmii_ctrl |= RGMII_CTRL_DLL_TXC; 1394 if (interface == PHY_INTERFACE_MODE_RGMII) 1395 rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC; 1396 rgmii_ctrl |= RGMII_CTRL_TIMING_SEL; 1397 b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl); 1398 1399 dev_info(ds->dev, "Configured port %d for %s\n", port, 1400 phy_modes(interface)); 1401 } 1402 1403 static void b53_adjust_5325_mii(struct dsa_switch *ds, int port) 1404 { 1405 struct b53_device *dev = ds->priv; 1406 u8 reg = 0; 1407 1408 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 1409 ®); 1410 1411 /* reverse mii needs to be enabled */ 1412 if (!(reg & PORT_OVERRIDE_RV_MII_25)) { 1413 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 1414 reg | PORT_OVERRIDE_RV_MII_25); 1415 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 1416 ®); 1417 1418 if (!(reg & PORT_OVERRIDE_RV_MII_25)) { 1419 dev_err(ds->dev, 1420 "Failed to enable reverse MII mode\n"); 1421 return; 1422 } 1423 } 1424 } 1425 1426 void b53_port_event(struct dsa_switch *ds, int port) 1427 { 1428 struct b53_device *dev = ds->priv; 1429 bool link; 1430 u16 sts; 1431 1432 b53_read16(dev, B53_STAT_PAGE, B53_LINK_STAT, &sts); 1433 link = !!(sts & BIT(port)); 1434 dsa_port_phylink_mac_change(ds, port, link); 1435 } 1436 EXPORT_SYMBOL(b53_port_event); 1437 1438 static void b53_phylink_get_caps(struct dsa_switch *ds, int port, 1439 struct phylink_config *config) 1440 { 1441 struct b53_device *dev = ds->priv; 1442 1443 /* Internal ports need GMII for PHYLIB */ 1444 __set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces); 1445 1446 /* These switches appear to support MII and RevMII too, but beyond 1447 * this, the code gives very few clues. FIXME: We probably need more 1448 * interface modes here. 1449 * 1450 * According to b53_srab_mux_init(), ports 3..5 can support: 1451 * SGMII, MII, GMII, RGMII or INTERNAL depending on the MUX setting. 1452 * However, the interface mode read from the MUX configuration is 1453 * not passed back to DSA, so phylink uses NA. 1454 * DT can specify RGMII for ports 0, 1. 1455 * For MDIO, port 8 can be RGMII_TXID. 1456 */ 1457 __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces); 1458 __set_bit(PHY_INTERFACE_MODE_REVMII, config->supported_interfaces); 1459 1460 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | 1461 MAC_10 | MAC_100; 1462 1463 /* 5325/5365 are not capable of gigabit speeds, everything else is. 1464 * Note: the original code also exclulded Gigagbit for MII, RevMII 1465 * and 802.3z modes. MII and RevMII are not able to work above 100M, 1466 * so will be excluded by the generic validator implementation. 1467 * However, the exclusion of Gigabit for 802.3z just seems wrong. 1468 */ 1469 if (!(is5325(dev) || is5365(dev))) 1470 config->mac_capabilities |= MAC_1000; 1471 1472 /* Get the implementation specific capabilities */ 1473 if (dev->ops->phylink_get_caps) 1474 dev->ops->phylink_get_caps(dev, port, config); 1475 } 1476 1477 static struct phylink_pcs *b53_phylink_mac_select_pcs(struct phylink_config *config, 1478 phy_interface_t interface) 1479 { 1480 struct dsa_port *dp = dsa_phylink_to_port(config); 1481 struct b53_device *dev = dp->ds->priv; 1482 1483 if (!dev->ops->phylink_mac_select_pcs) 1484 return NULL; 1485 1486 return dev->ops->phylink_mac_select_pcs(dev, dp->index, interface); 1487 } 1488 1489 static void b53_phylink_mac_config(struct phylink_config *config, 1490 unsigned int mode, 1491 const struct phylink_link_state *state) 1492 { 1493 struct dsa_port *dp = dsa_phylink_to_port(config); 1494 phy_interface_t interface = state->interface; 1495 struct dsa_switch *ds = dp->ds; 1496 struct b53_device *dev = ds->priv; 1497 int port = dp->index; 1498 1499 if (is63xx(dev) && port >= B53_63XX_RGMII0) 1500 b53_adjust_63xx_rgmii(ds, port, interface); 1501 1502 if (mode == MLO_AN_FIXED) { 1503 if (is531x5(dev) && phy_interface_mode_is_rgmii(interface)) 1504 b53_adjust_531x5_rgmii(ds, port, interface); 1505 1506 /* configure MII port if necessary */ 1507 if (is5325(dev)) 1508 b53_adjust_5325_mii(ds, port); 1509 } 1510 } 1511 1512 static void b53_phylink_mac_link_down(struct phylink_config *config, 1513 unsigned int mode, 1514 phy_interface_t interface) 1515 { 1516 struct dsa_port *dp = dsa_phylink_to_port(config); 1517 struct b53_device *dev = dp->ds->priv; 1518 int port = dp->index; 1519 1520 if (mode == MLO_AN_PHY) 1521 return; 1522 1523 if (mode == MLO_AN_FIXED) { 1524 b53_force_link(dev, port, false); 1525 return; 1526 } 1527 1528 if (phy_interface_mode_is_8023z(interface) && 1529 dev->ops->serdes_link_set) 1530 dev->ops->serdes_link_set(dev, port, mode, interface, false); 1531 } 1532 1533 static void b53_phylink_mac_link_up(struct phylink_config *config, 1534 struct phy_device *phydev, 1535 unsigned int mode, 1536 phy_interface_t interface, 1537 int speed, int duplex, 1538 bool tx_pause, bool rx_pause) 1539 { 1540 struct dsa_port *dp = dsa_phylink_to_port(config); 1541 struct dsa_switch *ds = dp->ds; 1542 struct b53_device *dev = ds->priv; 1543 struct ethtool_keee *p = &dev->ports[dp->index].eee; 1544 int port = dp->index; 1545 1546 if (mode == MLO_AN_PHY) { 1547 /* Re-negotiate EEE if it was enabled already */ 1548 p->eee_enabled = b53_eee_init(ds, port, phydev); 1549 return; 1550 } 1551 1552 if (mode == MLO_AN_FIXED) { 1553 /* Force flow control on BCM5301x's CPU port */ 1554 if (is5301x(dev) && dsa_is_cpu_port(ds, port)) 1555 tx_pause = rx_pause = true; 1556 1557 b53_force_port_config(dev, port, speed, duplex, 1558 tx_pause, rx_pause); 1559 b53_force_link(dev, port, true); 1560 return; 1561 } 1562 1563 if (phy_interface_mode_is_8023z(interface) && 1564 dev->ops->serdes_link_set) 1565 dev->ops->serdes_link_set(dev, port, mode, interface, true); 1566 } 1567 1568 int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, 1569 struct netlink_ext_ack *extack) 1570 { 1571 struct b53_device *dev = ds->priv; 1572 1573 if (dev->vlan_filtering != vlan_filtering) { 1574 dev->vlan_filtering = vlan_filtering; 1575 b53_apply_config(dev); 1576 } 1577 1578 return 0; 1579 } 1580 EXPORT_SYMBOL(b53_vlan_filtering); 1581 1582 static int b53_vlan_prepare(struct dsa_switch *ds, int port, 1583 const struct switchdev_obj_port_vlan *vlan) 1584 { 1585 struct b53_device *dev = ds->priv; 1586 1587 if ((is5325(dev) || is5365(dev)) && vlan->vid == 0) 1588 return -EOPNOTSUPP; 1589 1590 /* Port 7 on 7278 connects to the ASP's UniMAC which is not capable of 1591 * receiving VLAN tagged frames at all, we can still allow the port to 1592 * be configured for egress untagged. 1593 */ 1594 if (dev->chip_id == BCM7278_DEVICE_ID && port == 7 && 1595 !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) 1596 return -EINVAL; 1597 1598 if (vlan->vid >= dev->num_vlans) 1599 return -ERANGE; 1600 1601 b53_enable_vlan(dev, port, true, dev->vlan_filtering); 1602 1603 return 0; 1604 } 1605 1606 int b53_vlan_add(struct dsa_switch *ds, int port, 1607 const struct switchdev_obj_port_vlan *vlan, 1608 struct netlink_ext_ack *extack) 1609 { 1610 struct b53_device *dev = ds->priv; 1611 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1612 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 1613 struct b53_vlan *vl; 1614 u16 old_pvid, new_pvid; 1615 int err; 1616 1617 err = b53_vlan_prepare(ds, port, vlan); 1618 if (err) 1619 return err; 1620 1621 if (vlan->vid == 0) 1622 return 0; 1623 1624 old_pvid = dev->ports[port].pvid; 1625 if (pvid) 1626 new_pvid = vlan->vid; 1627 else if (!pvid && vlan->vid == old_pvid) 1628 new_pvid = b53_default_pvid(dev); 1629 else 1630 new_pvid = old_pvid; 1631 dev->ports[port].pvid = new_pvid; 1632 1633 vl = &dev->vlans[vlan->vid]; 1634 1635 if (dsa_is_cpu_port(ds, port)) 1636 untagged = false; 1637 1638 vl->members |= BIT(port); 1639 if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port)) 1640 vl->untag |= BIT(port); 1641 else 1642 vl->untag &= ~BIT(port); 1643 1644 if (!dev->vlan_filtering) 1645 return 0; 1646 1647 b53_set_vlan_entry(dev, vlan->vid, vl); 1648 b53_fast_age_vlan(dev, vlan->vid); 1649 1650 if (!dsa_is_cpu_port(ds, port) && new_pvid != old_pvid) { 1651 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), 1652 new_pvid); 1653 b53_fast_age_vlan(dev, old_pvid); 1654 } 1655 1656 return 0; 1657 } 1658 EXPORT_SYMBOL(b53_vlan_add); 1659 1660 int b53_vlan_del(struct dsa_switch *ds, int port, 1661 const struct switchdev_obj_port_vlan *vlan) 1662 { 1663 struct b53_device *dev = ds->priv; 1664 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1665 struct b53_vlan *vl; 1666 u16 pvid; 1667 1668 if (vlan->vid == 0) 1669 return 0; 1670 1671 pvid = dev->ports[port].pvid; 1672 1673 vl = &dev->vlans[vlan->vid]; 1674 1675 vl->members &= ~BIT(port); 1676 1677 if (pvid == vlan->vid) 1678 pvid = b53_default_pvid(dev); 1679 dev->ports[port].pvid = pvid; 1680 1681 if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port)) 1682 vl->untag &= ~(BIT(port)); 1683 1684 if (!dev->vlan_filtering) 1685 return 0; 1686 1687 b53_set_vlan_entry(dev, vlan->vid, vl); 1688 b53_fast_age_vlan(dev, vlan->vid); 1689 1690 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid); 1691 b53_fast_age_vlan(dev, pvid); 1692 1693 return 0; 1694 } 1695 EXPORT_SYMBOL(b53_vlan_del); 1696 1697 /* Address Resolution Logic routines. Caller must hold &dev->arl_mutex. */ 1698 static int b53_arl_op_wait(struct b53_device *dev) 1699 { 1700 unsigned int timeout = 10; 1701 u8 reg; 1702 1703 do { 1704 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®); 1705 if (!(reg & ARLTBL_START_DONE)) 1706 return 0; 1707 1708 usleep_range(1000, 2000); 1709 } while (timeout--); 1710 1711 dev_warn(dev->dev, "timeout waiting for ARL to finish: 0x%02x\n", reg); 1712 1713 return -ETIMEDOUT; 1714 } 1715 1716 static int b53_arl_rw_op(struct b53_device *dev, unsigned int op) 1717 { 1718 u8 reg; 1719 1720 if (op > ARLTBL_RW) 1721 return -EINVAL; 1722 1723 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®); 1724 reg |= ARLTBL_START_DONE; 1725 if (op) 1726 reg |= ARLTBL_RW; 1727 else 1728 reg &= ~ARLTBL_RW; 1729 if (dev->vlan_enabled) 1730 reg &= ~ARLTBL_IVL_SVL_SELECT; 1731 else 1732 reg |= ARLTBL_IVL_SVL_SELECT; 1733 b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg); 1734 1735 return b53_arl_op_wait(dev); 1736 } 1737 1738 static int b53_arl_read(struct b53_device *dev, u64 mac, 1739 u16 vid, struct b53_arl_entry *ent, u8 *idx) 1740 { 1741 DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES); 1742 unsigned int i; 1743 int ret; 1744 1745 ret = b53_arl_op_wait(dev); 1746 if (ret) 1747 return ret; 1748 1749 bitmap_zero(free_bins, dev->num_arl_bins); 1750 1751 /* Read the bins */ 1752 for (i = 0; i < dev->num_arl_bins; i++) { 1753 u64 mac_vid; 1754 u32 fwd_entry; 1755 1756 b53_read64(dev, B53_ARLIO_PAGE, 1757 B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid); 1758 b53_read32(dev, B53_ARLIO_PAGE, 1759 B53_ARLTBL_DATA_ENTRY(i), &fwd_entry); 1760 b53_arl_to_entry(ent, mac_vid, fwd_entry); 1761 1762 if (!(fwd_entry & ARLTBL_VALID)) { 1763 set_bit(i, free_bins); 1764 continue; 1765 } 1766 if ((mac_vid & ARLTBL_MAC_MASK) != mac) 1767 continue; 1768 if (dev->vlan_enabled && 1769 ((mac_vid >> ARLTBL_VID_S) & ARLTBL_VID_MASK) != vid) 1770 continue; 1771 *idx = i; 1772 return 0; 1773 } 1774 1775 *idx = find_first_bit(free_bins, dev->num_arl_bins); 1776 return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT; 1777 } 1778 1779 static int b53_arl_op(struct b53_device *dev, int op, int port, 1780 const unsigned char *addr, u16 vid, bool is_valid) 1781 { 1782 struct b53_arl_entry ent; 1783 u32 fwd_entry; 1784 u64 mac, mac_vid = 0; 1785 u8 idx = 0; 1786 int ret; 1787 1788 /* Convert the array into a 64-bit MAC */ 1789 mac = ether_addr_to_u64(addr); 1790 1791 /* Perform a read for the given MAC and VID */ 1792 b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac); 1793 b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid); 1794 1795 /* Issue a read operation for this MAC */ 1796 ret = b53_arl_rw_op(dev, 1); 1797 if (ret) 1798 return ret; 1799 1800 ret = b53_arl_read(dev, mac, vid, &ent, &idx); 1801 1802 /* If this is a read, just finish now */ 1803 if (op) 1804 return ret; 1805 1806 switch (ret) { 1807 case -ETIMEDOUT: 1808 return ret; 1809 case -ENOSPC: 1810 dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n", 1811 addr, vid); 1812 return is_valid ? ret : 0; 1813 case -ENOENT: 1814 /* We could not find a matching MAC, so reset to a new entry */ 1815 dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n", 1816 addr, vid, idx); 1817 fwd_entry = 0; 1818 break; 1819 default: 1820 dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n", 1821 addr, vid, idx); 1822 break; 1823 } 1824 1825 /* For multicast address, the port is a bitmask and the validity 1826 * is determined by having at least one port being still active 1827 */ 1828 if (!is_multicast_ether_addr(addr)) { 1829 ent.port = port; 1830 ent.is_valid = is_valid; 1831 } else { 1832 if (is_valid) 1833 ent.port |= BIT(port); 1834 else 1835 ent.port &= ~BIT(port); 1836 1837 ent.is_valid = !!(ent.port); 1838 } 1839 1840 ent.vid = vid; 1841 ent.is_static = true; 1842 ent.is_age = false; 1843 memcpy(ent.mac, addr, ETH_ALEN); 1844 b53_arl_from_entry(&mac_vid, &fwd_entry, &ent); 1845 1846 b53_write64(dev, B53_ARLIO_PAGE, 1847 B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid); 1848 b53_write32(dev, B53_ARLIO_PAGE, 1849 B53_ARLTBL_DATA_ENTRY(idx), fwd_entry); 1850 1851 return b53_arl_rw_op(dev, 0); 1852 } 1853 1854 int b53_fdb_add(struct dsa_switch *ds, int port, 1855 const unsigned char *addr, u16 vid, 1856 struct dsa_db db) 1857 { 1858 struct b53_device *priv = ds->priv; 1859 int ret; 1860 1861 /* 5325 and 5365 require some more massaging, but could 1862 * be supported eventually 1863 */ 1864 if (is5325(priv) || is5365(priv)) 1865 return -EOPNOTSUPP; 1866 1867 mutex_lock(&priv->arl_mutex); 1868 ret = b53_arl_op(priv, 0, port, addr, vid, true); 1869 mutex_unlock(&priv->arl_mutex); 1870 1871 return ret; 1872 } 1873 EXPORT_SYMBOL(b53_fdb_add); 1874 1875 int b53_fdb_del(struct dsa_switch *ds, int port, 1876 const unsigned char *addr, u16 vid, 1877 struct dsa_db db) 1878 { 1879 struct b53_device *priv = ds->priv; 1880 int ret; 1881 1882 mutex_lock(&priv->arl_mutex); 1883 ret = b53_arl_op(priv, 0, port, addr, vid, false); 1884 mutex_unlock(&priv->arl_mutex); 1885 1886 return ret; 1887 } 1888 EXPORT_SYMBOL(b53_fdb_del); 1889 1890 static int b53_arl_search_wait(struct b53_device *dev) 1891 { 1892 unsigned int timeout = 1000; 1893 u8 reg; 1894 1895 do { 1896 b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, ®); 1897 if (!(reg & ARL_SRCH_STDN)) 1898 return 0; 1899 1900 if (reg & ARL_SRCH_VLID) 1901 return 0; 1902 1903 usleep_range(1000, 2000); 1904 } while (timeout--); 1905 1906 return -ETIMEDOUT; 1907 } 1908 1909 static void b53_arl_search_rd(struct b53_device *dev, u8 idx, 1910 struct b53_arl_entry *ent) 1911 { 1912 u64 mac_vid; 1913 u32 fwd_entry; 1914 1915 b53_read64(dev, B53_ARLIO_PAGE, 1916 B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid); 1917 b53_read32(dev, B53_ARLIO_PAGE, 1918 B53_ARL_SRCH_RSTL(idx), &fwd_entry); 1919 b53_arl_to_entry(ent, mac_vid, fwd_entry); 1920 } 1921 1922 static int b53_fdb_copy(int port, const struct b53_arl_entry *ent, 1923 dsa_fdb_dump_cb_t *cb, void *data) 1924 { 1925 if (!ent->is_valid) 1926 return 0; 1927 1928 if (port != ent->port) 1929 return 0; 1930 1931 return cb(ent->mac, ent->vid, ent->is_static, data); 1932 } 1933 1934 int b53_fdb_dump(struct dsa_switch *ds, int port, 1935 dsa_fdb_dump_cb_t *cb, void *data) 1936 { 1937 struct b53_device *priv = ds->priv; 1938 struct b53_arl_entry results[2]; 1939 unsigned int count = 0; 1940 int ret; 1941 u8 reg; 1942 1943 mutex_lock(&priv->arl_mutex); 1944 1945 /* Start search operation */ 1946 reg = ARL_SRCH_STDN; 1947 b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg); 1948 1949 do { 1950 ret = b53_arl_search_wait(priv); 1951 if (ret) 1952 break; 1953 1954 b53_arl_search_rd(priv, 0, &results[0]); 1955 ret = b53_fdb_copy(port, &results[0], cb, data); 1956 if (ret) 1957 break; 1958 1959 if (priv->num_arl_bins > 2) { 1960 b53_arl_search_rd(priv, 1, &results[1]); 1961 ret = b53_fdb_copy(port, &results[1], cb, data); 1962 if (ret) 1963 break; 1964 1965 if (!results[0].is_valid && !results[1].is_valid) 1966 break; 1967 } 1968 1969 } while (count++ < b53_max_arl_entries(priv) / 2); 1970 1971 mutex_unlock(&priv->arl_mutex); 1972 1973 return 0; 1974 } 1975 EXPORT_SYMBOL(b53_fdb_dump); 1976 1977 int b53_mdb_add(struct dsa_switch *ds, int port, 1978 const struct switchdev_obj_port_mdb *mdb, 1979 struct dsa_db db) 1980 { 1981 struct b53_device *priv = ds->priv; 1982 int ret; 1983 1984 /* 5325 and 5365 require some more massaging, but could 1985 * be supported eventually 1986 */ 1987 if (is5325(priv) || is5365(priv)) 1988 return -EOPNOTSUPP; 1989 1990 mutex_lock(&priv->arl_mutex); 1991 ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true); 1992 mutex_unlock(&priv->arl_mutex); 1993 1994 return ret; 1995 } 1996 EXPORT_SYMBOL(b53_mdb_add); 1997 1998 int b53_mdb_del(struct dsa_switch *ds, int port, 1999 const struct switchdev_obj_port_mdb *mdb, 2000 struct dsa_db db) 2001 { 2002 struct b53_device *priv = ds->priv; 2003 int ret; 2004 2005 mutex_lock(&priv->arl_mutex); 2006 ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false); 2007 mutex_unlock(&priv->arl_mutex); 2008 if (ret) 2009 dev_err(ds->dev, "failed to delete MDB entry\n"); 2010 2011 return ret; 2012 } 2013 EXPORT_SYMBOL(b53_mdb_del); 2014 2015 int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, 2016 bool *tx_fwd_offload, struct netlink_ext_ack *extack) 2017 { 2018 struct b53_device *dev = ds->priv; 2019 struct b53_vlan *vl; 2020 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 2021 u16 pvlan, reg, pvid; 2022 unsigned int i; 2023 2024 /* On 7278, port 7 which connects to the ASP should only receive 2025 * traffic from matching CFP rules. 2026 */ 2027 if (dev->chip_id == BCM7278_DEVICE_ID && port == 7) 2028 return -EINVAL; 2029 2030 pvid = b53_default_pvid(dev); 2031 vl = &dev->vlans[pvid]; 2032 2033 if (dev->vlan_filtering) { 2034 /* Make this port leave the all VLANs join since we will have 2035 * proper VLAN entries from now on 2036 */ 2037 if (is58xx(dev)) { 2038 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, 2039 ®); 2040 reg &= ~BIT(port); 2041 if ((reg & BIT(cpu_port)) == BIT(cpu_port)) 2042 reg &= ~BIT(cpu_port); 2043 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, 2044 reg); 2045 } 2046 2047 b53_get_vlan_entry(dev, pvid, vl); 2048 vl->members &= ~BIT(port); 2049 if (vl->members == BIT(cpu_port)) 2050 vl->members &= ~BIT(cpu_port); 2051 vl->untag = vl->members; 2052 b53_set_vlan_entry(dev, pvid, vl); 2053 } 2054 2055 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 2056 2057 b53_for_each_port(dev, i) { 2058 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) 2059 continue; 2060 2061 /* Add this local port to the remote port VLAN control 2062 * membership and update the remote port bitmask 2063 */ 2064 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); 2065 reg |= BIT(port); 2066 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg); 2067 dev->ports[i].vlan_ctl_mask = reg; 2068 2069 pvlan |= BIT(i); 2070 } 2071 2072 /* Disable redirection of unknown SA to the CPU port */ 2073 b53_set_eap_mode(dev, port, EAP_MODE_BASIC); 2074 2075 /* Configure the local port VLAN control membership to include 2076 * remote ports and update the local port bitmask 2077 */ 2078 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 2079 dev->ports[port].vlan_ctl_mask = pvlan; 2080 2081 return 0; 2082 } 2083 EXPORT_SYMBOL(b53_br_join); 2084 2085 void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge) 2086 { 2087 struct b53_device *dev = ds->priv; 2088 struct b53_vlan *vl; 2089 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 2090 unsigned int i; 2091 u16 pvlan, reg, pvid; 2092 2093 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 2094 2095 b53_for_each_port(dev, i) { 2096 /* Don't touch the remaining ports */ 2097 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) 2098 continue; 2099 2100 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); 2101 reg &= ~BIT(port); 2102 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg); 2103 dev->ports[port].vlan_ctl_mask = reg; 2104 2105 /* Prevent self removal to preserve isolation */ 2106 if (port != i) 2107 pvlan &= ~BIT(i); 2108 } 2109 2110 /* Enable redirection of unknown SA to the CPU port */ 2111 b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED); 2112 2113 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 2114 dev->ports[port].vlan_ctl_mask = pvlan; 2115 2116 pvid = b53_default_pvid(dev); 2117 vl = &dev->vlans[pvid]; 2118 2119 if (dev->vlan_filtering) { 2120 /* Make this port join all VLANs without VLAN entries */ 2121 if (is58xx(dev)) { 2122 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®); 2123 reg |= BIT(port); 2124 if (!(reg & BIT(cpu_port))) 2125 reg |= BIT(cpu_port); 2126 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg); 2127 } 2128 2129 b53_get_vlan_entry(dev, pvid, vl); 2130 vl->members |= BIT(port) | BIT(cpu_port); 2131 vl->untag |= BIT(port) | BIT(cpu_port); 2132 b53_set_vlan_entry(dev, pvid, vl); 2133 } 2134 } 2135 EXPORT_SYMBOL(b53_br_leave); 2136 2137 void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state) 2138 { 2139 struct b53_device *dev = ds->priv; 2140 u8 hw_state; 2141 u8 reg; 2142 2143 switch (state) { 2144 case BR_STATE_DISABLED: 2145 hw_state = PORT_CTRL_DIS_STATE; 2146 break; 2147 case BR_STATE_LISTENING: 2148 hw_state = PORT_CTRL_LISTEN_STATE; 2149 break; 2150 case BR_STATE_LEARNING: 2151 hw_state = PORT_CTRL_LEARN_STATE; 2152 break; 2153 case BR_STATE_FORWARDING: 2154 hw_state = PORT_CTRL_FWD_STATE; 2155 break; 2156 case BR_STATE_BLOCKING: 2157 hw_state = PORT_CTRL_BLOCK_STATE; 2158 break; 2159 default: 2160 dev_err(ds->dev, "invalid STP state: %d\n", state); 2161 return; 2162 } 2163 2164 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); 2165 reg &= ~PORT_CTRL_STP_STATE_MASK; 2166 reg |= hw_state; 2167 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); 2168 } 2169 EXPORT_SYMBOL(b53_br_set_stp_state); 2170 2171 void b53_br_fast_age(struct dsa_switch *ds, int port) 2172 { 2173 struct b53_device *dev = ds->priv; 2174 2175 if (b53_fast_age_port(dev, port)) 2176 dev_err(ds->dev, "fast ageing failed\n"); 2177 } 2178 EXPORT_SYMBOL(b53_br_fast_age); 2179 2180 int b53_br_flags_pre(struct dsa_switch *ds, int port, 2181 struct switchdev_brport_flags flags, 2182 struct netlink_ext_ack *extack) 2183 { 2184 if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_LEARNING)) 2185 return -EINVAL; 2186 2187 return 0; 2188 } 2189 EXPORT_SYMBOL(b53_br_flags_pre); 2190 2191 int b53_br_flags(struct dsa_switch *ds, int port, 2192 struct switchdev_brport_flags flags, 2193 struct netlink_ext_ack *extack) 2194 { 2195 if (flags.mask & BR_FLOOD) 2196 b53_port_set_ucast_flood(ds->priv, port, 2197 !!(flags.val & BR_FLOOD)); 2198 if (flags.mask & BR_MCAST_FLOOD) 2199 b53_port_set_mcast_flood(ds->priv, port, 2200 !!(flags.val & BR_MCAST_FLOOD)); 2201 if (flags.mask & BR_LEARNING) 2202 b53_port_set_learning(ds->priv, port, 2203 !!(flags.val & BR_LEARNING)); 2204 2205 return 0; 2206 } 2207 EXPORT_SYMBOL(b53_br_flags); 2208 2209 static bool b53_possible_cpu_port(struct dsa_switch *ds, int port) 2210 { 2211 /* Broadcom switches will accept enabling Broadcom tags on the 2212 * following ports: 5, 7 and 8, any other port is not supported 2213 */ 2214 switch (port) { 2215 case B53_CPU_PORT_25: 2216 case 7: 2217 case B53_CPU_PORT: 2218 return true; 2219 } 2220 2221 return false; 2222 } 2223 2224 static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port, 2225 enum dsa_tag_protocol tag_protocol) 2226 { 2227 bool ret = b53_possible_cpu_port(ds, port); 2228 2229 if (!ret) { 2230 dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n", 2231 port); 2232 return ret; 2233 } 2234 2235 switch (tag_protocol) { 2236 case DSA_TAG_PROTO_BRCM: 2237 case DSA_TAG_PROTO_BRCM_PREPEND: 2238 dev_warn(ds->dev, 2239 "Port %d is stacked to Broadcom tag switch\n", port); 2240 ret = false; 2241 break; 2242 default: 2243 ret = true; 2244 break; 2245 } 2246 2247 return ret; 2248 } 2249 2250 enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port, 2251 enum dsa_tag_protocol mprot) 2252 { 2253 struct b53_device *dev = ds->priv; 2254 2255 if (!b53_can_enable_brcm_tags(ds, port, mprot)) { 2256 dev->tag_protocol = DSA_TAG_PROTO_NONE; 2257 goto out; 2258 } 2259 2260 /* Older models require a different 6 byte tag */ 2261 if (is5325(dev) || is5365(dev) || is63xx(dev)) { 2262 dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY; 2263 goto out; 2264 } 2265 2266 /* Broadcom BCM58xx chips have a flow accelerator on Port 8 2267 * which requires us to use the prepended Broadcom tag type 2268 */ 2269 if (dev->chip_id == BCM58XX_DEVICE_ID && port == B53_CPU_PORT) { 2270 dev->tag_protocol = DSA_TAG_PROTO_BRCM_PREPEND; 2271 goto out; 2272 } 2273 2274 dev->tag_protocol = DSA_TAG_PROTO_BRCM; 2275 out: 2276 return dev->tag_protocol; 2277 } 2278 EXPORT_SYMBOL(b53_get_tag_protocol); 2279 2280 int b53_mirror_add(struct dsa_switch *ds, int port, 2281 struct dsa_mall_mirror_tc_entry *mirror, bool ingress, 2282 struct netlink_ext_ack *extack) 2283 { 2284 struct b53_device *dev = ds->priv; 2285 u16 reg, loc; 2286 2287 if (ingress) 2288 loc = B53_IG_MIR_CTL; 2289 else 2290 loc = B53_EG_MIR_CTL; 2291 2292 b53_read16(dev, B53_MGMT_PAGE, loc, ®); 2293 reg |= BIT(port); 2294 b53_write16(dev, B53_MGMT_PAGE, loc, reg); 2295 2296 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®); 2297 reg &= ~CAP_PORT_MASK; 2298 reg |= mirror->to_local_port; 2299 reg |= MIRROR_EN; 2300 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg); 2301 2302 return 0; 2303 } 2304 EXPORT_SYMBOL(b53_mirror_add); 2305 2306 void b53_mirror_del(struct dsa_switch *ds, int port, 2307 struct dsa_mall_mirror_tc_entry *mirror) 2308 { 2309 struct b53_device *dev = ds->priv; 2310 bool loc_disable = false, other_loc_disable = false; 2311 u16 reg, loc; 2312 2313 if (mirror->ingress) 2314 loc = B53_IG_MIR_CTL; 2315 else 2316 loc = B53_EG_MIR_CTL; 2317 2318 /* Update the desired ingress/egress register */ 2319 b53_read16(dev, B53_MGMT_PAGE, loc, ®); 2320 reg &= ~BIT(port); 2321 if (!(reg & MIRROR_MASK)) 2322 loc_disable = true; 2323 b53_write16(dev, B53_MGMT_PAGE, loc, reg); 2324 2325 /* Now look at the other one to know if we can disable mirroring 2326 * entirely 2327 */ 2328 if (mirror->ingress) 2329 b53_read16(dev, B53_MGMT_PAGE, B53_EG_MIR_CTL, ®); 2330 else 2331 b53_read16(dev, B53_MGMT_PAGE, B53_IG_MIR_CTL, ®); 2332 if (!(reg & MIRROR_MASK)) 2333 other_loc_disable = true; 2334 2335 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®); 2336 /* Both no longer have ports, let's disable mirroring */ 2337 if (loc_disable && other_loc_disable) { 2338 reg &= ~MIRROR_EN; 2339 reg &= ~mirror->to_local_port; 2340 } 2341 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg); 2342 } 2343 EXPORT_SYMBOL(b53_mirror_del); 2344 2345 /* Returns 0 if EEE was not enabled, or 1 otherwise 2346 */ 2347 int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy) 2348 { 2349 int ret; 2350 2351 ret = phy_init_eee(phy, false); 2352 if (ret) 2353 return 0; 2354 2355 b53_eee_enable_set(ds, port, true); 2356 2357 return 1; 2358 } 2359 EXPORT_SYMBOL(b53_eee_init); 2360 2361 bool b53_support_eee(struct dsa_switch *ds, int port) 2362 { 2363 struct b53_device *dev = ds->priv; 2364 2365 return !is5325(dev) && !is5365(dev); 2366 } 2367 EXPORT_SYMBOL(b53_support_eee); 2368 2369 int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e) 2370 { 2371 struct b53_device *dev = ds->priv; 2372 struct ethtool_keee *p = &dev->ports[port].eee; 2373 2374 p->eee_enabled = e->eee_enabled; 2375 b53_eee_enable_set(ds, port, e->eee_enabled); 2376 2377 return 0; 2378 } 2379 EXPORT_SYMBOL(b53_set_mac_eee); 2380 2381 static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu) 2382 { 2383 struct b53_device *dev = ds->priv; 2384 bool enable_jumbo; 2385 bool allow_10_100; 2386 2387 if (is5325(dev) || is5365(dev)) 2388 return 0; 2389 2390 if (!dsa_is_cpu_port(ds, port)) 2391 return 0; 2392 2393 enable_jumbo = (mtu > ETH_DATA_LEN); 2394 allow_10_100 = !is63xx(dev); 2395 2396 return b53_set_jumbo(dev, enable_jumbo, allow_10_100); 2397 } 2398 2399 static int b53_get_max_mtu(struct dsa_switch *ds, int port) 2400 { 2401 struct b53_device *dev = ds->priv; 2402 2403 if (is5325(dev) || is5365(dev)) 2404 return B53_MAX_MTU_25; 2405 2406 return B53_MAX_MTU; 2407 } 2408 2409 static const struct phylink_mac_ops b53_phylink_mac_ops = { 2410 .mac_select_pcs = b53_phylink_mac_select_pcs, 2411 .mac_config = b53_phylink_mac_config, 2412 .mac_link_down = b53_phylink_mac_link_down, 2413 .mac_link_up = b53_phylink_mac_link_up, 2414 }; 2415 2416 static const struct dsa_switch_ops b53_switch_ops = { 2417 .get_tag_protocol = b53_get_tag_protocol, 2418 .setup = b53_setup, 2419 .teardown = b53_teardown, 2420 .get_strings = b53_get_strings, 2421 .get_ethtool_stats = b53_get_ethtool_stats, 2422 .get_sset_count = b53_get_sset_count, 2423 .get_ethtool_phy_stats = b53_get_ethtool_phy_stats, 2424 .phy_read = b53_phy_read16, 2425 .phy_write = b53_phy_write16, 2426 .phylink_get_caps = b53_phylink_get_caps, 2427 .port_setup = b53_setup_port, 2428 .port_enable = b53_enable_port, 2429 .port_disable = b53_disable_port, 2430 .support_eee = b53_support_eee, 2431 .set_mac_eee = b53_set_mac_eee, 2432 .port_bridge_join = b53_br_join, 2433 .port_bridge_leave = b53_br_leave, 2434 .port_pre_bridge_flags = b53_br_flags_pre, 2435 .port_bridge_flags = b53_br_flags, 2436 .port_stp_state_set = b53_br_set_stp_state, 2437 .port_fast_age = b53_br_fast_age, 2438 .port_vlan_filtering = b53_vlan_filtering, 2439 .port_vlan_add = b53_vlan_add, 2440 .port_vlan_del = b53_vlan_del, 2441 .port_fdb_dump = b53_fdb_dump, 2442 .port_fdb_add = b53_fdb_add, 2443 .port_fdb_del = b53_fdb_del, 2444 .port_mirror_add = b53_mirror_add, 2445 .port_mirror_del = b53_mirror_del, 2446 .port_mdb_add = b53_mdb_add, 2447 .port_mdb_del = b53_mdb_del, 2448 .port_max_mtu = b53_get_max_mtu, 2449 .port_change_mtu = b53_change_mtu, 2450 }; 2451 2452 struct b53_chip_data { 2453 u32 chip_id; 2454 const char *dev_name; 2455 u16 vlans; 2456 u16 enabled_ports; 2457 u8 imp_port; 2458 u8 cpu_port; 2459 u8 vta_regs[3]; 2460 u8 arl_bins; 2461 u16 arl_buckets; 2462 u8 duplex_reg; 2463 u8 jumbo_pm_reg; 2464 u8 jumbo_size_reg; 2465 }; 2466 2467 #define B53_VTA_REGS \ 2468 { B53_VT_ACCESS, B53_VT_INDEX, B53_VT_ENTRY } 2469 #define B53_VTA_REGS_9798 \ 2470 { B53_VT_ACCESS_9798, B53_VT_INDEX_9798, B53_VT_ENTRY_9798 } 2471 #define B53_VTA_REGS_63XX \ 2472 { B53_VT_ACCESS_63XX, B53_VT_INDEX_63XX, B53_VT_ENTRY_63XX } 2473 2474 static const struct b53_chip_data b53_switch_chips[] = { 2475 { 2476 .chip_id = BCM5325_DEVICE_ID, 2477 .dev_name = "BCM5325", 2478 .vlans = 16, 2479 .enabled_ports = 0x3f, 2480 .arl_bins = 2, 2481 .arl_buckets = 1024, 2482 .imp_port = 5, 2483 .duplex_reg = B53_DUPLEX_STAT_FE, 2484 }, 2485 { 2486 .chip_id = BCM5365_DEVICE_ID, 2487 .dev_name = "BCM5365", 2488 .vlans = 256, 2489 .enabled_ports = 0x3f, 2490 .arl_bins = 2, 2491 .arl_buckets = 1024, 2492 .imp_port = 5, 2493 .duplex_reg = B53_DUPLEX_STAT_FE, 2494 }, 2495 { 2496 .chip_id = BCM5389_DEVICE_ID, 2497 .dev_name = "BCM5389", 2498 .vlans = 4096, 2499 .enabled_ports = 0x11f, 2500 .arl_bins = 4, 2501 .arl_buckets = 1024, 2502 .imp_port = 8, 2503 .vta_regs = B53_VTA_REGS, 2504 .duplex_reg = B53_DUPLEX_STAT_GE, 2505 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2506 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2507 }, 2508 { 2509 .chip_id = BCM5395_DEVICE_ID, 2510 .dev_name = "BCM5395", 2511 .vlans = 4096, 2512 .enabled_ports = 0x11f, 2513 .arl_bins = 4, 2514 .arl_buckets = 1024, 2515 .imp_port = 8, 2516 .vta_regs = B53_VTA_REGS, 2517 .duplex_reg = B53_DUPLEX_STAT_GE, 2518 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2519 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2520 }, 2521 { 2522 .chip_id = BCM5397_DEVICE_ID, 2523 .dev_name = "BCM5397", 2524 .vlans = 4096, 2525 .enabled_ports = 0x11f, 2526 .arl_bins = 4, 2527 .arl_buckets = 1024, 2528 .imp_port = 8, 2529 .vta_regs = B53_VTA_REGS_9798, 2530 .duplex_reg = B53_DUPLEX_STAT_GE, 2531 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2532 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2533 }, 2534 { 2535 .chip_id = BCM5398_DEVICE_ID, 2536 .dev_name = "BCM5398", 2537 .vlans = 4096, 2538 .enabled_ports = 0x17f, 2539 .arl_bins = 4, 2540 .arl_buckets = 1024, 2541 .imp_port = 8, 2542 .vta_regs = B53_VTA_REGS_9798, 2543 .duplex_reg = B53_DUPLEX_STAT_GE, 2544 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2545 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2546 }, 2547 { 2548 .chip_id = BCM53101_DEVICE_ID, 2549 .dev_name = "BCM53101", 2550 .vlans = 4096, 2551 .enabled_ports = 0x11f, 2552 .arl_bins = 4, 2553 .arl_buckets = 512, 2554 .vta_regs = B53_VTA_REGS, 2555 .imp_port = 8, 2556 .duplex_reg = B53_DUPLEX_STAT_GE, 2557 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2558 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2559 }, 2560 { 2561 .chip_id = BCM53115_DEVICE_ID, 2562 .dev_name = "BCM53115", 2563 .vlans = 4096, 2564 .enabled_ports = 0x11f, 2565 .arl_bins = 4, 2566 .arl_buckets = 1024, 2567 .vta_regs = B53_VTA_REGS, 2568 .imp_port = 8, 2569 .duplex_reg = B53_DUPLEX_STAT_GE, 2570 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2571 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2572 }, 2573 { 2574 .chip_id = BCM53125_DEVICE_ID, 2575 .dev_name = "BCM53125", 2576 .vlans = 4096, 2577 .enabled_ports = 0x1ff, 2578 .arl_bins = 4, 2579 .arl_buckets = 1024, 2580 .imp_port = 8, 2581 .vta_regs = B53_VTA_REGS, 2582 .duplex_reg = B53_DUPLEX_STAT_GE, 2583 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2584 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2585 }, 2586 { 2587 .chip_id = BCM53128_DEVICE_ID, 2588 .dev_name = "BCM53128", 2589 .vlans = 4096, 2590 .enabled_ports = 0x1ff, 2591 .arl_bins = 4, 2592 .arl_buckets = 1024, 2593 .imp_port = 8, 2594 .vta_regs = B53_VTA_REGS, 2595 .duplex_reg = B53_DUPLEX_STAT_GE, 2596 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2597 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2598 }, 2599 { 2600 .chip_id = BCM63XX_DEVICE_ID, 2601 .dev_name = "BCM63xx", 2602 .vlans = 4096, 2603 .enabled_ports = 0, /* pdata must provide them */ 2604 .arl_bins = 4, 2605 .arl_buckets = 1024, 2606 .imp_port = 8, 2607 .vta_regs = B53_VTA_REGS_63XX, 2608 .duplex_reg = B53_DUPLEX_STAT_63XX, 2609 .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX, 2610 .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX, 2611 }, 2612 { 2613 .chip_id = BCM63268_DEVICE_ID, 2614 .dev_name = "BCM63268", 2615 .vlans = 4096, 2616 .enabled_ports = 0, /* pdata must provide them */ 2617 .arl_bins = 4, 2618 .arl_buckets = 1024, 2619 .imp_port = 8, 2620 .vta_regs = B53_VTA_REGS_63XX, 2621 .duplex_reg = B53_DUPLEX_STAT_63XX, 2622 .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX, 2623 .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX, 2624 }, 2625 { 2626 .chip_id = BCM53010_DEVICE_ID, 2627 .dev_name = "BCM53010", 2628 .vlans = 4096, 2629 .enabled_ports = 0x1bf, 2630 .arl_bins = 4, 2631 .arl_buckets = 1024, 2632 .imp_port = 8, 2633 .vta_regs = B53_VTA_REGS, 2634 .duplex_reg = B53_DUPLEX_STAT_GE, 2635 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2636 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2637 }, 2638 { 2639 .chip_id = BCM53011_DEVICE_ID, 2640 .dev_name = "BCM53011", 2641 .vlans = 4096, 2642 .enabled_ports = 0x1bf, 2643 .arl_bins = 4, 2644 .arl_buckets = 1024, 2645 .imp_port = 8, 2646 .vta_regs = B53_VTA_REGS, 2647 .duplex_reg = B53_DUPLEX_STAT_GE, 2648 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2649 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2650 }, 2651 { 2652 .chip_id = BCM53012_DEVICE_ID, 2653 .dev_name = "BCM53012", 2654 .vlans = 4096, 2655 .enabled_ports = 0x1bf, 2656 .arl_bins = 4, 2657 .arl_buckets = 1024, 2658 .imp_port = 8, 2659 .vta_regs = B53_VTA_REGS, 2660 .duplex_reg = B53_DUPLEX_STAT_GE, 2661 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2662 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2663 }, 2664 { 2665 .chip_id = BCM53018_DEVICE_ID, 2666 .dev_name = "BCM53018", 2667 .vlans = 4096, 2668 .enabled_ports = 0x1bf, 2669 .arl_bins = 4, 2670 .arl_buckets = 1024, 2671 .imp_port = 8, 2672 .vta_regs = B53_VTA_REGS, 2673 .duplex_reg = B53_DUPLEX_STAT_GE, 2674 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2675 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2676 }, 2677 { 2678 .chip_id = BCM53019_DEVICE_ID, 2679 .dev_name = "BCM53019", 2680 .vlans = 4096, 2681 .enabled_ports = 0x1bf, 2682 .arl_bins = 4, 2683 .arl_buckets = 1024, 2684 .imp_port = 8, 2685 .vta_regs = B53_VTA_REGS, 2686 .duplex_reg = B53_DUPLEX_STAT_GE, 2687 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2688 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2689 }, 2690 { 2691 .chip_id = BCM58XX_DEVICE_ID, 2692 .dev_name = "BCM585xx/586xx/88312", 2693 .vlans = 4096, 2694 .enabled_ports = 0x1ff, 2695 .arl_bins = 4, 2696 .arl_buckets = 1024, 2697 .imp_port = 8, 2698 .vta_regs = B53_VTA_REGS, 2699 .duplex_reg = B53_DUPLEX_STAT_GE, 2700 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2701 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2702 }, 2703 { 2704 .chip_id = BCM583XX_DEVICE_ID, 2705 .dev_name = "BCM583xx/11360", 2706 .vlans = 4096, 2707 .enabled_ports = 0x103, 2708 .arl_bins = 4, 2709 .arl_buckets = 1024, 2710 .imp_port = 8, 2711 .vta_regs = B53_VTA_REGS, 2712 .duplex_reg = B53_DUPLEX_STAT_GE, 2713 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2714 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2715 }, 2716 /* Starfighter 2 */ 2717 { 2718 .chip_id = BCM4908_DEVICE_ID, 2719 .dev_name = "BCM4908", 2720 .vlans = 4096, 2721 .enabled_ports = 0x1bf, 2722 .arl_bins = 4, 2723 .arl_buckets = 256, 2724 .imp_port = 8, 2725 .vta_regs = B53_VTA_REGS, 2726 .duplex_reg = B53_DUPLEX_STAT_GE, 2727 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2728 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2729 }, 2730 { 2731 .chip_id = BCM7445_DEVICE_ID, 2732 .dev_name = "BCM7445", 2733 .vlans = 4096, 2734 .enabled_ports = 0x1ff, 2735 .arl_bins = 4, 2736 .arl_buckets = 1024, 2737 .imp_port = 8, 2738 .vta_regs = B53_VTA_REGS, 2739 .duplex_reg = B53_DUPLEX_STAT_GE, 2740 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2741 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2742 }, 2743 { 2744 .chip_id = BCM7278_DEVICE_ID, 2745 .dev_name = "BCM7278", 2746 .vlans = 4096, 2747 .enabled_ports = 0x1ff, 2748 .arl_bins = 4, 2749 .arl_buckets = 256, 2750 .imp_port = 8, 2751 .vta_regs = B53_VTA_REGS, 2752 .duplex_reg = B53_DUPLEX_STAT_GE, 2753 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2754 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2755 }, 2756 { 2757 .chip_id = BCM53134_DEVICE_ID, 2758 .dev_name = "BCM53134", 2759 .vlans = 4096, 2760 .enabled_ports = 0x12f, 2761 .imp_port = 8, 2762 .cpu_port = B53_CPU_PORT, 2763 .vta_regs = B53_VTA_REGS, 2764 .arl_bins = 4, 2765 .arl_buckets = 1024, 2766 .duplex_reg = B53_DUPLEX_STAT_GE, 2767 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2768 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2769 }, 2770 }; 2771 2772 static int b53_switch_init(struct b53_device *dev) 2773 { 2774 unsigned int i; 2775 int ret; 2776 2777 for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) { 2778 const struct b53_chip_data *chip = &b53_switch_chips[i]; 2779 2780 if (chip->chip_id == dev->chip_id) { 2781 if (!dev->enabled_ports) 2782 dev->enabled_ports = chip->enabled_ports; 2783 dev->name = chip->dev_name; 2784 dev->duplex_reg = chip->duplex_reg; 2785 dev->vta_regs[0] = chip->vta_regs[0]; 2786 dev->vta_regs[1] = chip->vta_regs[1]; 2787 dev->vta_regs[2] = chip->vta_regs[2]; 2788 dev->jumbo_pm_reg = chip->jumbo_pm_reg; 2789 dev->imp_port = chip->imp_port; 2790 dev->num_vlans = chip->vlans; 2791 dev->num_arl_bins = chip->arl_bins; 2792 dev->num_arl_buckets = chip->arl_buckets; 2793 break; 2794 } 2795 } 2796 2797 /* check which BCM5325x version we have */ 2798 if (is5325(dev)) { 2799 u8 vc4; 2800 2801 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4); 2802 2803 /* check reserved bits */ 2804 switch (vc4 & 3) { 2805 case 1: 2806 /* BCM5325E */ 2807 break; 2808 case 3: 2809 /* BCM5325F - do not use port 4 */ 2810 dev->enabled_ports &= ~BIT(4); 2811 break; 2812 default: 2813 /* On the BCM47XX SoCs this is the supported internal switch.*/ 2814 #ifndef CONFIG_BCM47XX 2815 /* BCM5325M */ 2816 return -EINVAL; 2817 #else 2818 break; 2819 #endif 2820 } 2821 } 2822 2823 dev->num_ports = fls(dev->enabled_ports); 2824 2825 dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS); 2826 2827 /* Include non standard CPU port built-in PHYs to be probed */ 2828 if (is539x(dev) || is531x5(dev)) { 2829 for (i = 0; i < dev->num_ports; i++) { 2830 if (!(dev->ds->phys_mii_mask & BIT(i)) && 2831 !b53_possible_cpu_port(dev->ds, i)) 2832 dev->ds->phys_mii_mask |= BIT(i); 2833 } 2834 } 2835 2836 dev->ports = devm_kcalloc(dev->dev, 2837 dev->num_ports, sizeof(struct b53_port), 2838 GFP_KERNEL); 2839 if (!dev->ports) 2840 return -ENOMEM; 2841 2842 dev->vlans = devm_kcalloc(dev->dev, 2843 dev->num_vlans, sizeof(struct b53_vlan), 2844 GFP_KERNEL); 2845 if (!dev->vlans) 2846 return -ENOMEM; 2847 2848 dev->reset_gpio = b53_switch_get_reset_gpio(dev); 2849 if (dev->reset_gpio >= 0) { 2850 ret = devm_gpio_request_one(dev->dev, dev->reset_gpio, 2851 GPIOF_OUT_INIT_HIGH, "robo_reset"); 2852 if (ret) 2853 return ret; 2854 } 2855 2856 return 0; 2857 } 2858 2859 struct b53_device *b53_switch_alloc(struct device *base, 2860 const struct b53_io_ops *ops, 2861 void *priv) 2862 { 2863 struct dsa_switch *ds; 2864 struct b53_device *dev; 2865 2866 ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL); 2867 if (!ds) 2868 return NULL; 2869 2870 ds->dev = base; 2871 2872 dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL); 2873 if (!dev) 2874 return NULL; 2875 2876 ds->priv = dev; 2877 dev->dev = base; 2878 2879 dev->ds = ds; 2880 dev->priv = priv; 2881 dev->ops = ops; 2882 ds->ops = &b53_switch_ops; 2883 ds->phylink_mac_ops = &b53_phylink_mac_ops; 2884 dev->vlan_enabled = true; 2885 dev->vlan_filtering = false; 2886 /* Let DSA handle the case were multiple bridges span the same switch 2887 * device and different VLAN awareness settings are requested, which 2888 * would be breaking filtering semantics for any of the other bridge 2889 * devices. (not hardware supported) 2890 */ 2891 ds->vlan_filtering_is_global = true; 2892 2893 mutex_init(&dev->reg_mutex); 2894 mutex_init(&dev->stats_mutex); 2895 mutex_init(&dev->arl_mutex); 2896 2897 return dev; 2898 } 2899 EXPORT_SYMBOL(b53_switch_alloc); 2900 2901 int b53_switch_detect(struct b53_device *dev) 2902 { 2903 u32 id32; 2904 u16 tmp; 2905 u8 id8; 2906 int ret; 2907 2908 ret = b53_read8(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id8); 2909 if (ret) 2910 return ret; 2911 2912 switch (id8) { 2913 case 0: 2914 /* BCM5325 and BCM5365 do not have this register so reads 2915 * return 0. But the read operation did succeed, so assume this 2916 * is one of them. 2917 * 2918 * Next check if we can write to the 5325's VTA register; for 2919 * 5365 it is read only. 2920 */ 2921 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf); 2922 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp); 2923 2924 if (tmp == 0xf) 2925 dev->chip_id = BCM5325_DEVICE_ID; 2926 else 2927 dev->chip_id = BCM5365_DEVICE_ID; 2928 break; 2929 case BCM5389_DEVICE_ID: 2930 case BCM5395_DEVICE_ID: 2931 case BCM5397_DEVICE_ID: 2932 case BCM5398_DEVICE_ID: 2933 dev->chip_id = id8; 2934 break; 2935 default: 2936 ret = b53_read32(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id32); 2937 if (ret) 2938 return ret; 2939 2940 switch (id32) { 2941 case BCM53101_DEVICE_ID: 2942 case BCM53115_DEVICE_ID: 2943 case BCM53125_DEVICE_ID: 2944 case BCM53128_DEVICE_ID: 2945 case BCM53010_DEVICE_ID: 2946 case BCM53011_DEVICE_ID: 2947 case BCM53012_DEVICE_ID: 2948 case BCM53018_DEVICE_ID: 2949 case BCM53019_DEVICE_ID: 2950 case BCM53134_DEVICE_ID: 2951 dev->chip_id = id32; 2952 break; 2953 default: 2954 dev_err(dev->dev, 2955 "unsupported switch detected (BCM53%02x/BCM%x)\n", 2956 id8, id32); 2957 return -ENODEV; 2958 } 2959 } 2960 2961 if (dev->chip_id == BCM5325_DEVICE_ID) 2962 return b53_read8(dev, B53_STAT_PAGE, B53_REV_ID_25, 2963 &dev->core_rev); 2964 else 2965 return b53_read8(dev, B53_MGMT_PAGE, B53_REV_ID, 2966 &dev->core_rev); 2967 } 2968 EXPORT_SYMBOL(b53_switch_detect); 2969 2970 int b53_switch_register(struct b53_device *dev) 2971 { 2972 int ret; 2973 2974 if (dev->pdata) { 2975 dev->chip_id = dev->pdata->chip_id; 2976 dev->enabled_ports = dev->pdata->enabled_ports; 2977 } 2978 2979 if (!dev->chip_id && b53_switch_detect(dev)) 2980 return -EINVAL; 2981 2982 ret = b53_switch_init(dev); 2983 if (ret) 2984 return ret; 2985 2986 dev_info(dev->dev, "found switch: %s, rev %i\n", 2987 dev->name, dev->core_rev); 2988 2989 return dsa_register_switch(dev->ds); 2990 } 2991 EXPORT_SYMBOL(b53_switch_register); 2992 2993 MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>"); 2994 MODULE_DESCRIPTION("B53 switch library"); 2995 MODULE_LICENSE("Dual BSD/GPL"); 2996