1 /* 2 * B53 switch driver main logic 3 * 4 * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org> 5 * Copyright (C) 2016 Florian Fainelli <f.fainelli@gmail.com> 6 * 7 * Permission to use, copy, modify, and/or distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <linux/delay.h> 21 #include <linux/export.h> 22 #include <linux/gpio.h> 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <linux/platform_data/b53.h> 26 #include <linux/phy.h> 27 #include <linux/phylink.h> 28 #include <linux/etherdevice.h> 29 #include <linux/if_bridge.h> 30 #include <linux/if_vlan.h> 31 #include <net/dsa.h> 32 33 #include "b53_regs.h" 34 #include "b53_priv.h" 35 36 struct b53_mib_desc { 37 u8 size; 38 u8 offset; 39 const char *name; 40 }; 41 42 /* BCM5365 MIB counters */ 43 static const struct b53_mib_desc b53_mibs_65[] = { 44 { 8, 0x00, "TxOctets" }, 45 { 4, 0x08, "TxDropPkts" }, 46 { 4, 0x10, "TxBroadcastPkts" }, 47 { 4, 0x14, "TxMulticastPkts" }, 48 { 4, 0x18, "TxUnicastPkts" }, 49 { 4, 0x1c, "TxCollisions" }, 50 { 4, 0x20, "TxSingleCollision" }, 51 { 4, 0x24, "TxMultipleCollision" }, 52 { 4, 0x28, "TxDeferredTransmit" }, 53 { 4, 0x2c, "TxLateCollision" }, 54 { 4, 0x30, "TxExcessiveCollision" }, 55 { 4, 0x38, "TxPausePkts" }, 56 { 8, 0x44, "RxOctets" }, 57 { 4, 0x4c, "RxUndersizePkts" }, 58 { 4, 0x50, "RxPausePkts" }, 59 { 4, 0x54, "Pkts64Octets" }, 60 { 4, 0x58, "Pkts65to127Octets" }, 61 { 4, 0x5c, "Pkts128to255Octets" }, 62 { 4, 0x60, "Pkts256to511Octets" }, 63 { 4, 0x64, "Pkts512to1023Octets" }, 64 { 4, 0x68, "Pkts1024to1522Octets" }, 65 { 4, 0x6c, "RxOversizePkts" }, 66 { 4, 0x70, "RxJabbers" }, 67 { 4, 0x74, "RxAlignmentErrors" }, 68 { 4, 0x78, "RxFCSErrors" }, 69 { 8, 0x7c, "RxGoodOctets" }, 70 { 4, 0x84, "RxDropPkts" }, 71 { 4, 0x88, "RxUnicastPkts" }, 72 { 4, 0x8c, "RxMulticastPkts" }, 73 { 4, 0x90, "RxBroadcastPkts" }, 74 { 4, 0x94, "RxSAChanges" }, 75 { 4, 0x98, "RxFragments" }, 76 }; 77 78 #define B53_MIBS_65_SIZE ARRAY_SIZE(b53_mibs_65) 79 80 /* BCM63xx MIB counters */ 81 static const struct b53_mib_desc b53_mibs_63xx[] = { 82 { 8, 0x00, "TxOctets" }, 83 { 4, 0x08, "TxDropPkts" }, 84 { 4, 0x0c, "TxQoSPkts" }, 85 { 4, 0x10, "TxBroadcastPkts" }, 86 { 4, 0x14, "TxMulticastPkts" }, 87 { 4, 0x18, "TxUnicastPkts" }, 88 { 4, 0x1c, "TxCollisions" }, 89 { 4, 0x20, "TxSingleCollision" }, 90 { 4, 0x24, "TxMultipleCollision" }, 91 { 4, 0x28, "TxDeferredTransmit" }, 92 { 4, 0x2c, "TxLateCollision" }, 93 { 4, 0x30, "TxExcessiveCollision" }, 94 { 4, 0x38, "TxPausePkts" }, 95 { 8, 0x3c, "TxQoSOctets" }, 96 { 8, 0x44, "RxOctets" }, 97 { 4, 0x4c, "RxUndersizePkts" }, 98 { 4, 0x50, "RxPausePkts" }, 99 { 4, 0x54, "Pkts64Octets" }, 100 { 4, 0x58, "Pkts65to127Octets" }, 101 { 4, 0x5c, "Pkts128to255Octets" }, 102 { 4, 0x60, "Pkts256to511Octets" }, 103 { 4, 0x64, "Pkts512to1023Octets" }, 104 { 4, 0x68, "Pkts1024to1522Octets" }, 105 { 4, 0x6c, "RxOversizePkts" }, 106 { 4, 0x70, "RxJabbers" }, 107 { 4, 0x74, "RxAlignmentErrors" }, 108 { 4, 0x78, "RxFCSErrors" }, 109 { 8, 0x7c, "RxGoodOctets" }, 110 { 4, 0x84, "RxDropPkts" }, 111 { 4, 0x88, "RxUnicastPkts" }, 112 { 4, 0x8c, "RxMulticastPkts" }, 113 { 4, 0x90, "RxBroadcastPkts" }, 114 { 4, 0x94, "RxSAChanges" }, 115 { 4, 0x98, "RxFragments" }, 116 { 4, 0xa0, "RxSymbolErrors" }, 117 { 4, 0xa4, "RxQoSPkts" }, 118 { 8, 0xa8, "RxQoSOctets" }, 119 { 4, 0xb0, "Pkts1523to2047Octets" }, 120 { 4, 0xb4, "Pkts2048to4095Octets" }, 121 { 4, 0xb8, "Pkts4096to8191Octets" }, 122 { 4, 0xbc, "Pkts8192to9728Octets" }, 123 { 4, 0xc0, "RxDiscarded" }, 124 }; 125 126 #define B53_MIBS_63XX_SIZE ARRAY_SIZE(b53_mibs_63xx) 127 128 /* MIB counters */ 129 static const struct b53_mib_desc b53_mibs[] = { 130 { 8, 0x00, "TxOctets" }, 131 { 4, 0x08, "TxDropPkts" }, 132 { 4, 0x10, "TxBroadcastPkts" }, 133 { 4, 0x14, "TxMulticastPkts" }, 134 { 4, 0x18, "TxUnicastPkts" }, 135 { 4, 0x1c, "TxCollisions" }, 136 { 4, 0x20, "TxSingleCollision" }, 137 { 4, 0x24, "TxMultipleCollision" }, 138 { 4, 0x28, "TxDeferredTransmit" }, 139 { 4, 0x2c, "TxLateCollision" }, 140 { 4, 0x30, "TxExcessiveCollision" }, 141 { 4, 0x38, "TxPausePkts" }, 142 { 8, 0x50, "RxOctets" }, 143 { 4, 0x58, "RxUndersizePkts" }, 144 { 4, 0x5c, "RxPausePkts" }, 145 { 4, 0x60, "Pkts64Octets" }, 146 { 4, 0x64, "Pkts65to127Octets" }, 147 { 4, 0x68, "Pkts128to255Octets" }, 148 { 4, 0x6c, "Pkts256to511Octets" }, 149 { 4, 0x70, "Pkts512to1023Octets" }, 150 { 4, 0x74, "Pkts1024to1522Octets" }, 151 { 4, 0x78, "RxOversizePkts" }, 152 { 4, 0x7c, "RxJabbers" }, 153 { 4, 0x80, "RxAlignmentErrors" }, 154 { 4, 0x84, "RxFCSErrors" }, 155 { 8, 0x88, "RxGoodOctets" }, 156 { 4, 0x90, "RxDropPkts" }, 157 { 4, 0x94, "RxUnicastPkts" }, 158 { 4, 0x98, "RxMulticastPkts" }, 159 { 4, 0x9c, "RxBroadcastPkts" }, 160 { 4, 0xa0, "RxSAChanges" }, 161 { 4, 0xa4, "RxFragments" }, 162 { 4, 0xa8, "RxJumboPkts" }, 163 { 4, 0xac, "RxSymbolErrors" }, 164 { 4, 0xc0, "RxDiscarded" }, 165 }; 166 167 #define B53_MIBS_SIZE ARRAY_SIZE(b53_mibs) 168 169 static const struct b53_mib_desc b53_mibs_58xx[] = { 170 { 8, 0x00, "TxOctets" }, 171 { 4, 0x08, "TxDropPkts" }, 172 { 4, 0x0c, "TxQPKTQ0" }, 173 { 4, 0x10, "TxBroadcastPkts" }, 174 { 4, 0x14, "TxMulticastPkts" }, 175 { 4, 0x18, "TxUnicastPKts" }, 176 { 4, 0x1c, "TxCollisions" }, 177 { 4, 0x20, "TxSingleCollision" }, 178 { 4, 0x24, "TxMultipleCollision" }, 179 { 4, 0x28, "TxDeferredCollision" }, 180 { 4, 0x2c, "TxLateCollision" }, 181 { 4, 0x30, "TxExcessiveCollision" }, 182 { 4, 0x34, "TxFrameInDisc" }, 183 { 4, 0x38, "TxPausePkts" }, 184 { 4, 0x3c, "TxQPKTQ1" }, 185 { 4, 0x40, "TxQPKTQ2" }, 186 { 4, 0x44, "TxQPKTQ3" }, 187 { 4, 0x48, "TxQPKTQ4" }, 188 { 4, 0x4c, "TxQPKTQ5" }, 189 { 8, 0x50, "RxOctets" }, 190 { 4, 0x58, "RxUndersizePkts" }, 191 { 4, 0x5c, "RxPausePkts" }, 192 { 4, 0x60, "RxPkts64Octets" }, 193 { 4, 0x64, "RxPkts65to127Octets" }, 194 { 4, 0x68, "RxPkts128to255Octets" }, 195 { 4, 0x6c, "RxPkts256to511Octets" }, 196 { 4, 0x70, "RxPkts512to1023Octets" }, 197 { 4, 0x74, "RxPkts1024toMaxPktsOctets" }, 198 { 4, 0x78, "RxOversizePkts" }, 199 { 4, 0x7c, "RxJabbers" }, 200 { 4, 0x80, "RxAlignmentErrors" }, 201 { 4, 0x84, "RxFCSErrors" }, 202 { 8, 0x88, "RxGoodOctets" }, 203 { 4, 0x90, "RxDropPkts" }, 204 { 4, 0x94, "RxUnicastPkts" }, 205 { 4, 0x98, "RxMulticastPkts" }, 206 { 4, 0x9c, "RxBroadcastPkts" }, 207 { 4, 0xa0, "RxSAChanges" }, 208 { 4, 0xa4, "RxFragments" }, 209 { 4, 0xa8, "RxJumboPkt" }, 210 { 4, 0xac, "RxSymblErr" }, 211 { 4, 0xb0, "InRangeErrCount" }, 212 { 4, 0xb4, "OutRangeErrCount" }, 213 { 4, 0xb8, "EEELpiEvent" }, 214 { 4, 0xbc, "EEELpiDuration" }, 215 { 4, 0xc0, "RxDiscard" }, 216 { 4, 0xc8, "TxQPKTQ6" }, 217 { 4, 0xcc, "TxQPKTQ7" }, 218 { 4, 0xd0, "TxPkts64Octets" }, 219 { 4, 0xd4, "TxPkts65to127Octets" }, 220 { 4, 0xd8, "TxPkts128to255Octets" }, 221 { 4, 0xdc, "TxPkts256to511Ocets" }, 222 { 4, 0xe0, "TxPkts512to1023Ocets" }, 223 { 4, 0xe4, "TxPkts1024toMaxPktOcets" }, 224 }; 225 226 #define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx) 227 228 #define B53_MAX_MTU_25 (1536 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) 229 #define B53_MAX_MTU (9720 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) 230 231 static int b53_do_vlan_op(struct b53_device *dev, u8 op) 232 { 233 unsigned int i; 234 235 b53_write8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], VTA_START_CMD | op); 236 237 for (i = 0; i < 10; i++) { 238 u8 vta; 239 240 b53_read8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], &vta); 241 if (!(vta & VTA_START_CMD)) 242 return 0; 243 244 usleep_range(100, 200); 245 } 246 247 return -EIO; 248 } 249 250 static void b53_set_vlan_entry(struct b53_device *dev, u16 vid, 251 struct b53_vlan *vlan) 252 { 253 if (is5325(dev)) { 254 u32 entry = 0; 255 256 if (vlan->members) { 257 entry = ((vlan->untag & VA_UNTAG_MASK_25) << 258 VA_UNTAG_S_25) | vlan->members; 259 if (dev->core_rev >= 3) 260 entry |= VA_VALID_25_R4 | vid << VA_VID_HIGH_S; 261 else 262 entry |= VA_VALID_25; 263 } 264 265 b53_write32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, entry); 266 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid | 267 VTA_RW_STATE_WR | VTA_RW_OP_EN); 268 } else if (is5365(dev)) { 269 u16 entry = 0; 270 271 if (vlan->members) 272 entry = ((vlan->untag & VA_UNTAG_MASK_65) << 273 VA_UNTAG_S_65) | vlan->members | VA_VALID_65; 274 275 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, entry); 276 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid | 277 VTA_RW_STATE_WR | VTA_RW_OP_EN); 278 } else { 279 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid); 280 b53_write32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], 281 (vlan->untag << VTE_UNTAG_S) | vlan->members); 282 283 b53_do_vlan_op(dev, VTA_CMD_WRITE); 284 } 285 286 dev_dbg(dev->ds->dev, "VID: %d, members: 0x%04x, untag: 0x%04x\n", 287 vid, vlan->members, vlan->untag); 288 } 289 290 static void b53_get_vlan_entry(struct b53_device *dev, u16 vid, 291 struct b53_vlan *vlan) 292 { 293 if (is5325(dev)) { 294 u32 entry = 0; 295 296 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid | 297 VTA_RW_STATE_RD | VTA_RW_OP_EN); 298 b53_read32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, &entry); 299 300 if (dev->core_rev >= 3) 301 vlan->valid = !!(entry & VA_VALID_25_R4); 302 else 303 vlan->valid = !!(entry & VA_VALID_25); 304 vlan->members = entry & VA_MEMBER_MASK; 305 vlan->untag = (entry >> VA_UNTAG_S_25) & VA_UNTAG_MASK_25; 306 307 } else if (is5365(dev)) { 308 u16 entry = 0; 309 310 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid | 311 VTA_RW_STATE_WR | VTA_RW_OP_EN); 312 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, &entry); 313 314 vlan->valid = !!(entry & VA_VALID_65); 315 vlan->members = entry & VA_MEMBER_MASK; 316 vlan->untag = (entry >> VA_UNTAG_S_65) & VA_UNTAG_MASK_65; 317 } else { 318 u32 entry = 0; 319 320 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid); 321 b53_do_vlan_op(dev, VTA_CMD_READ); 322 b53_read32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], &entry); 323 vlan->members = entry & VTE_MEMBERS; 324 vlan->untag = (entry >> VTE_UNTAG_S) & VTE_MEMBERS; 325 vlan->valid = true; 326 } 327 } 328 329 static void b53_set_forwarding(struct b53_device *dev, int enable) 330 { 331 u8 mgmt; 332 333 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 334 335 if (enable) 336 mgmt |= SM_SW_FWD_EN; 337 else 338 mgmt &= ~SM_SW_FWD_EN; 339 340 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 341 342 /* Include IMP port in dumb forwarding mode 343 */ 344 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt); 345 mgmt |= B53_MII_DUMB_FWDG_EN; 346 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); 347 348 /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether 349 * frames should be flooded or not. 350 */ 351 b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); 352 mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN; 353 b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); 354 } 355 356 static void b53_enable_vlan(struct b53_device *dev, int port, bool enable, 357 bool enable_filtering) 358 { 359 u8 mgmt, vc0, vc1, vc4 = 0, vc5; 360 361 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 362 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, &vc0); 363 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, &vc1); 364 365 if (is5325(dev) || is5365(dev)) { 366 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4); 367 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, &vc5); 368 } else if (is63xx(dev)) { 369 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, &vc4); 370 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, &vc5); 371 } else { 372 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, &vc4); 373 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5); 374 } 375 376 vc1 &= ~VC1_RX_MCST_FWD_EN; 377 378 if (enable) { 379 vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; 380 vc1 |= VC1_RX_MCST_UNTAG_EN; 381 vc4 &= ~VC4_ING_VID_CHECK_MASK; 382 if (enable_filtering) { 383 vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; 384 vc5 |= VC5_DROP_VTABLE_MISS; 385 } else { 386 vc4 |= VC4_NO_ING_VID_CHK << VC4_ING_VID_CHECK_S; 387 vc5 &= ~VC5_DROP_VTABLE_MISS; 388 } 389 390 if (is5325(dev)) 391 vc0 &= ~VC0_RESERVED_1; 392 393 if (is5325(dev) || is5365(dev)) 394 vc1 |= VC1_RX_MCST_TAG_EN; 395 396 } else { 397 vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID); 398 vc1 &= ~VC1_RX_MCST_UNTAG_EN; 399 vc4 &= ~VC4_ING_VID_CHECK_MASK; 400 vc5 &= ~VC5_DROP_VTABLE_MISS; 401 402 if (is5325(dev) || is5365(dev)) 403 vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S; 404 else 405 vc4 |= VC4_ING_VID_VIO_TO_IMP << VC4_ING_VID_CHECK_S; 406 407 if (is5325(dev) || is5365(dev)) 408 vc1 &= ~VC1_RX_MCST_TAG_EN; 409 } 410 411 if (!is5325(dev) && !is5365(dev)) 412 vc5 &= ~VC5_VID_FFF_EN; 413 414 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, vc0); 415 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, vc1); 416 417 if (is5325(dev) || is5365(dev)) { 418 /* enable the high 8 bit vid check on 5325 */ 419 if (is5325(dev) && enable) 420 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 421 VC3_HIGH_8BIT_EN); 422 else 423 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0); 424 425 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, vc4); 426 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, vc5); 427 } else if (is63xx(dev)) { 428 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3_63XX, 0); 429 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, vc4); 430 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, vc5); 431 } else { 432 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0); 433 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, vc4); 434 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, vc5); 435 } 436 437 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 438 439 dev->vlan_enabled = enable; 440 441 dev_dbg(dev->dev, "Port %d VLAN enabled: %d, filtering: %d\n", 442 port, enable, enable_filtering); 443 } 444 445 static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) 446 { 447 u32 port_mask = 0; 448 u16 max_size = JMS_MIN_SIZE; 449 450 if (is5325(dev) || is5365(dev)) 451 return -EINVAL; 452 453 if (enable) { 454 port_mask = dev->enabled_ports; 455 max_size = JMS_MAX_SIZE; 456 if (allow_10_100) 457 port_mask |= JPM_10_100_JUMBO_EN; 458 } 459 460 b53_write32(dev, B53_JUMBO_PAGE, dev->jumbo_pm_reg, port_mask); 461 return b53_write16(dev, B53_JUMBO_PAGE, dev->jumbo_size_reg, max_size); 462 } 463 464 static int b53_flush_arl(struct b53_device *dev, u8 mask) 465 { 466 unsigned int i; 467 468 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, 469 FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask); 470 471 for (i = 0; i < 10; i++) { 472 u8 fast_age_ctrl; 473 474 b53_read8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, 475 &fast_age_ctrl); 476 477 if (!(fast_age_ctrl & FAST_AGE_DONE)) 478 goto out; 479 480 msleep(1); 481 } 482 483 return -ETIMEDOUT; 484 out: 485 /* Only age dynamic entries (default behavior) */ 486 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, FAST_AGE_DYNAMIC); 487 return 0; 488 } 489 490 static int b53_fast_age_port(struct b53_device *dev, int port) 491 { 492 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port); 493 494 return b53_flush_arl(dev, FAST_AGE_PORT); 495 } 496 497 static int b53_fast_age_vlan(struct b53_device *dev, u16 vid) 498 { 499 b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid); 500 501 return b53_flush_arl(dev, FAST_AGE_VLAN); 502 } 503 504 void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) 505 { 506 struct b53_device *dev = ds->priv; 507 unsigned int i; 508 u16 pvlan; 509 510 /* Enable the IMP port to be in the same VLAN as the other ports 511 * on a per-port basis such that we only have Port i and IMP in 512 * the same VLAN. 513 */ 514 b53_for_each_port(dev, i) { 515 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &pvlan); 516 pvlan |= BIT(cpu_port); 517 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan); 518 } 519 } 520 EXPORT_SYMBOL(b53_imp_vlan_setup); 521 522 static void b53_port_set_ucast_flood(struct b53_device *dev, int port, 523 bool unicast) 524 { 525 u16 uc; 526 527 b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc); 528 if (unicast) 529 uc |= BIT(port); 530 else 531 uc &= ~BIT(port); 532 b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc); 533 } 534 535 static void b53_port_set_mcast_flood(struct b53_device *dev, int port, 536 bool multicast) 537 { 538 u16 mc; 539 540 b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc); 541 if (multicast) 542 mc |= BIT(port); 543 else 544 mc &= ~BIT(port); 545 b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc); 546 547 b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc); 548 if (multicast) 549 mc |= BIT(port); 550 else 551 mc &= ~BIT(port); 552 b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc); 553 } 554 555 static void b53_port_set_learning(struct b53_device *dev, int port, 556 bool learning) 557 { 558 u16 reg; 559 560 b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, ®); 561 if (learning) 562 reg &= ~BIT(port); 563 else 564 reg |= BIT(port); 565 b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg); 566 } 567 568 static void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable) 569 { 570 struct b53_device *dev = ds->priv; 571 u16 reg; 572 573 b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, ®); 574 if (enable) 575 reg |= BIT(port); 576 else 577 reg &= ~BIT(port); 578 b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg); 579 } 580 581 int b53_setup_port(struct dsa_switch *ds, int port) 582 { 583 struct b53_device *dev = ds->priv; 584 585 b53_port_set_ucast_flood(dev, port, true); 586 b53_port_set_mcast_flood(dev, port, true); 587 b53_port_set_learning(dev, port, false); 588 589 return 0; 590 } 591 EXPORT_SYMBOL(b53_setup_port); 592 593 int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) 594 { 595 struct b53_device *dev = ds->priv; 596 unsigned int cpu_port; 597 int ret = 0; 598 u16 pvlan; 599 600 if (!dsa_is_user_port(ds, port)) 601 return 0; 602 603 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 604 605 if (dev->ops->irq_enable) 606 ret = dev->ops->irq_enable(dev, port); 607 if (ret) 608 return ret; 609 610 /* Clear the Rx and Tx disable bits and set to no spanning tree */ 611 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0); 612 613 /* Set this port, and only this one to be in the default VLAN, 614 * if member of a bridge, restore its membership prior to 615 * bringing down this port. 616 */ 617 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 618 pvlan &= ~0x1ff; 619 pvlan |= BIT(port); 620 pvlan |= dev->ports[port].vlan_ctl_mask; 621 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 622 623 b53_imp_vlan_setup(ds, cpu_port); 624 625 /* If EEE was enabled, restore it */ 626 if (dev->ports[port].eee.eee_enabled) 627 b53_eee_enable_set(ds, port, true); 628 629 return 0; 630 } 631 EXPORT_SYMBOL(b53_enable_port); 632 633 void b53_disable_port(struct dsa_switch *ds, int port) 634 { 635 struct b53_device *dev = ds->priv; 636 u8 reg; 637 638 /* Disable Tx/Rx for the port */ 639 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); 640 reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE; 641 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); 642 643 if (dev->ops->irq_disable) 644 dev->ops->irq_disable(dev, port); 645 } 646 EXPORT_SYMBOL(b53_disable_port); 647 648 void b53_brcm_hdr_setup(struct dsa_switch *ds, int port) 649 { 650 struct b53_device *dev = ds->priv; 651 bool tag_en = !(dev->tag_protocol == DSA_TAG_PROTO_NONE); 652 u8 hdr_ctl, val; 653 u16 reg; 654 655 /* Resolve which bit controls the Broadcom tag */ 656 switch (port) { 657 case 8: 658 val = BRCM_HDR_P8_EN; 659 break; 660 case 7: 661 val = BRCM_HDR_P7_EN; 662 break; 663 case 5: 664 val = BRCM_HDR_P5_EN; 665 break; 666 default: 667 val = 0; 668 break; 669 } 670 671 /* Enable management mode if tagging is requested */ 672 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &hdr_ctl); 673 if (tag_en) 674 hdr_ctl |= SM_SW_FWD_MODE; 675 else 676 hdr_ctl &= ~SM_SW_FWD_MODE; 677 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, hdr_ctl); 678 679 /* Configure the appropriate IMP port */ 680 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &hdr_ctl); 681 if (port == 8) 682 hdr_ctl |= GC_FRM_MGMT_PORT_MII; 683 else if (port == 5) 684 hdr_ctl |= GC_FRM_MGMT_PORT_M; 685 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, hdr_ctl); 686 687 /* Enable Broadcom tags for IMP port */ 688 b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl); 689 if (tag_en) 690 hdr_ctl |= val; 691 else 692 hdr_ctl &= ~val; 693 b53_write8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, hdr_ctl); 694 695 /* Registers below are only accessible on newer devices */ 696 if (!is58xx(dev)) 697 return; 698 699 /* Enable reception Broadcom tag for CPU TX (switch RX) to 700 * allow us to tag outgoing frames 701 */ 702 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, ®); 703 if (tag_en) 704 reg &= ~BIT(port); 705 else 706 reg |= BIT(port); 707 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, reg); 708 709 /* Enable transmission of Broadcom tags from the switch (CPU RX) to 710 * allow delivering frames to the per-port net_devices 711 */ 712 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, ®); 713 if (tag_en) 714 reg &= ~BIT(port); 715 else 716 reg |= BIT(port); 717 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, reg); 718 } 719 EXPORT_SYMBOL(b53_brcm_hdr_setup); 720 721 static void b53_enable_cpu_port(struct b53_device *dev, int port) 722 { 723 u8 port_ctrl; 724 725 /* BCM5325 CPU port is at 8 */ 726 if ((is5325(dev) || is5365(dev)) && port == B53_CPU_PORT_25) 727 port = B53_CPU_PORT; 728 729 port_ctrl = PORT_CTRL_RX_BCST_EN | 730 PORT_CTRL_RX_MCST_EN | 731 PORT_CTRL_RX_UCST_EN; 732 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl); 733 734 b53_brcm_hdr_setup(dev->ds, port); 735 } 736 737 static void b53_enable_mib(struct b53_device *dev) 738 { 739 u8 gc; 740 741 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); 742 gc &= ~(GC_RESET_MIB | GC_MIB_AC_EN); 743 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); 744 } 745 746 static void b53_enable_stp(struct b53_device *dev) 747 { 748 u8 gc; 749 750 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); 751 gc |= GC_RX_BPDU_EN; 752 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); 753 } 754 755 static u16 b53_default_pvid(struct b53_device *dev) 756 { 757 if (is5325(dev) || is5365(dev)) 758 return 1; 759 else 760 return 0; 761 } 762 763 static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port) 764 { 765 struct b53_device *dev = ds->priv; 766 767 return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port); 768 } 769 770 static bool b53_vlan_port_may_join_untagged(struct dsa_switch *ds, int port) 771 { 772 struct b53_device *dev = ds->priv; 773 struct dsa_port *dp; 774 775 if (!dev->vlan_filtering) 776 return true; 777 778 dp = dsa_to_port(ds, port); 779 780 if (dsa_port_is_cpu(dp)) 781 return true; 782 783 return dp->bridge == NULL; 784 } 785 786 int b53_configure_vlan(struct dsa_switch *ds) 787 { 788 struct b53_device *dev = ds->priv; 789 struct b53_vlan vl = { 0 }; 790 struct b53_vlan *v; 791 int i, def_vid; 792 u16 vid; 793 794 def_vid = b53_default_pvid(dev); 795 796 /* clear all vlan entries */ 797 if (is5325(dev) || is5365(dev)) { 798 for (i = def_vid; i < dev->num_vlans; i++) 799 b53_set_vlan_entry(dev, i, &vl); 800 } else { 801 b53_do_vlan_op(dev, VTA_CMD_CLEAR); 802 } 803 804 b53_enable_vlan(dev, -1, dev->vlan_enabled, dev->vlan_filtering); 805 806 /* Create an untagged VLAN entry for the default PVID in case 807 * CONFIG_VLAN_8021Q is disabled and there are no calls to 808 * dsa_user_vlan_rx_add_vid() to create the default VLAN 809 * entry. Do this only when the tagging protocol is not 810 * DSA_TAG_PROTO_NONE 811 */ 812 v = &dev->vlans[def_vid]; 813 b53_for_each_port(dev, i) { 814 if (!b53_vlan_port_may_join_untagged(ds, i)) 815 continue; 816 817 vl.members |= BIT(i); 818 if (!b53_vlan_port_needs_forced_tagged(ds, i)) 819 vl.untag = vl.members; 820 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(i), 821 def_vid); 822 } 823 b53_set_vlan_entry(dev, def_vid, &vl); 824 825 if (dev->vlan_filtering) { 826 /* Upon initial call we have not set-up any VLANs, but upon 827 * system resume, we need to restore all VLAN entries. 828 */ 829 for (vid = def_vid + 1; vid < dev->num_vlans; vid++) { 830 v = &dev->vlans[vid]; 831 832 if (!v->members) 833 continue; 834 835 b53_set_vlan_entry(dev, vid, v); 836 b53_fast_age_vlan(dev, vid); 837 } 838 839 b53_for_each_port(dev, i) { 840 if (!dsa_is_cpu_port(ds, i)) 841 b53_write16(dev, B53_VLAN_PAGE, 842 B53_VLAN_PORT_DEF_TAG(i), 843 dev->ports[i].pvid); 844 } 845 } 846 847 return 0; 848 } 849 EXPORT_SYMBOL(b53_configure_vlan); 850 851 static void b53_switch_reset_gpio(struct b53_device *dev) 852 { 853 int gpio = dev->reset_gpio; 854 855 if (gpio < 0) 856 return; 857 858 /* Reset sequence: RESET low(50ms)->high(20ms) 859 */ 860 gpio_set_value(gpio, 0); 861 mdelay(50); 862 863 gpio_set_value(gpio, 1); 864 mdelay(20); 865 866 dev->current_page = 0xff; 867 } 868 869 static int b53_switch_reset(struct b53_device *dev) 870 { 871 unsigned int timeout = 1000; 872 u8 mgmt, reg; 873 874 b53_switch_reset_gpio(dev); 875 876 if (is539x(dev)) { 877 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83); 878 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00); 879 } 880 881 /* This is specific to 58xx devices here, do not use is58xx() which 882 * covers the larger Starfigther 2 family, including 7445/7278 which 883 * still use this driver as a library and need to perform the reset 884 * earlier. 885 */ 886 if (dev->chip_id == BCM58XX_DEVICE_ID || 887 dev->chip_id == BCM583XX_DEVICE_ID) { 888 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®); 889 reg |= SW_RST | EN_SW_RST | EN_CH_RST; 890 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg); 891 892 do { 893 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®); 894 if (!(reg & SW_RST)) 895 break; 896 897 usleep_range(1000, 2000); 898 } while (timeout-- > 0); 899 900 if (timeout == 0) { 901 dev_err(dev->dev, 902 "Timeout waiting for SW_RST to clear!\n"); 903 return -ETIMEDOUT; 904 } 905 } 906 907 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 908 909 if (!(mgmt & SM_SW_FWD_EN)) { 910 mgmt &= ~SM_SW_FWD_MODE; 911 mgmt |= SM_SW_FWD_EN; 912 913 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 914 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 915 916 if (!(mgmt & SM_SW_FWD_EN)) { 917 dev_err(dev->dev, "Failed to enable switch!\n"); 918 return -EINVAL; 919 } 920 } 921 922 b53_enable_mib(dev); 923 b53_enable_stp(dev); 924 925 return b53_flush_arl(dev, FAST_AGE_STATIC); 926 } 927 928 static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg) 929 { 930 struct b53_device *priv = ds->priv; 931 u16 value = 0; 932 int ret; 933 934 if (priv->ops->phy_read16) 935 ret = priv->ops->phy_read16(priv, addr, reg, &value); 936 else 937 ret = b53_read16(priv, B53_PORT_MII_PAGE(addr), 938 reg * 2, &value); 939 940 return ret ? ret : value; 941 } 942 943 static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val) 944 { 945 struct b53_device *priv = ds->priv; 946 947 if (priv->ops->phy_write16) 948 return priv->ops->phy_write16(priv, addr, reg, val); 949 950 return b53_write16(priv, B53_PORT_MII_PAGE(addr), reg * 2, val); 951 } 952 953 static int b53_reset_switch(struct b53_device *priv) 954 { 955 /* reset vlans */ 956 memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans); 957 memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports); 958 959 priv->serdes_lane = B53_INVALID_LANE; 960 961 return b53_switch_reset(priv); 962 } 963 964 static int b53_apply_config(struct b53_device *priv) 965 { 966 /* disable switching */ 967 b53_set_forwarding(priv, 0); 968 969 b53_configure_vlan(priv->ds); 970 971 /* enable switching */ 972 b53_set_forwarding(priv, 1); 973 974 return 0; 975 } 976 977 static void b53_reset_mib(struct b53_device *priv) 978 { 979 u8 gc; 980 981 b53_read8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); 982 983 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc | GC_RESET_MIB); 984 msleep(1); 985 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc & ~GC_RESET_MIB); 986 msleep(1); 987 } 988 989 static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev) 990 { 991 if (is5365(dev)) 992 return b53_mibs_65; 993 else if (is63xx(dev)) 994 return b53_mibs_63xx; 995 else if (is58xx(dev)) 996 return b53_mibs_58xx; 997 else 998 return b53_mibs; 999 } 1000 1001 static unsigned int b53_get_mib_size(struct b53_device *dev) 1002 { 1003 if (is5365(dev)) 1004 return B53_MIBS_65_SIZE; 1005 else if (is63xx(dev)) 1006 return B53_MIBS_63XX_SIZE; 1007 else if (is58xx(dev)) 1008 return B53_MIBS_58XX_SIZE; 1009 else 1010 return B53_MIBS_SIZE; 1011 } 1012 1013 static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port) 1014 { 1015 /* These ports typically do not have built-in PHYs */ 1016 switch (port) { 1017 case B53_CPU_PORT_25: 1018 case 7: 1019 case B53_CPU_PORT: 1020 return NULL; 1021 } 1022 1023 return mdiobus_get_phy(ds->user_mii_bus, port); 1024 } 1025 1026 void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset, 1027 uint8_t *data) 1028 { 1029 struct b53_device *dev = ds->priv; 1030 const struct b53_mib_desc *mibs = b53_get_mib(dev); 1031 unsigned int mib_size = b53_get_mib_size(dev); 1032 struct phy_device *phydev; 1033 unsigned int i; 1034 1035 if (stringset == ETH_SS_STATS) { 1036 for (i = 0; i < mib_size; i++) 1037 ethtool_puts(&data, mibs[i].name); 1038 } else if (stringset == ETH_SS_PHY_STATS) { 1039 phydev = b53_get_phy_device(ds, port); 1040 if (!phydev) 1041 return; 1042 1043 phy_ethtool_get_strings(phydev, data); 1044 } 1045 } 1046 EXPORT_SYMBOL(b53_get_strings); 1047 1048 void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) 1049 { 1050 struct b53_device *dev = ds->priv; 1051 const struct b53_mib_desc *mibs = b53_get_mib(dev); 1052 unsigned int mib_size = b53_get_mib_size(dev); 1053 const struct b53_mib_desc *s; 1054 unsigned int i; 1055 u64 val = 0; 1056 1057 if (is5365(dev) && port == 5) 1058 port = 8; 1059 1060 mutex_lock(&dev->stats_mutex); 1061 1062 for (i = 0; i < mib_size; i++) { 1063 s = &mibs[i]; 1064 1065 if (s->size == 8) { 1066 b53_read64(dev, B53_MIB_PAGE(port), s->offset, &val); 1067 } else { 1068 u32 val32; 1069 1070 b53_read32(dev, B53_MIB_PAGE(port), s->offset, 1071 &val32); 1072 val = val32; 1073 } 1074 data[i] = (u64)val; 1075 } 1076 1077 mutex_unlock(&dev->stats_mutex); 1078 } 1079 EXPORT_SYMBOL(b53_get_ethtool_stats); 1080 1081 void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data) 1082 { 1083 struct phy_device *phydev; 1084 1085 phydev = b53_get_phy_device(ds, port); 1086 if (!phydev) 1087 return; 1088 1089 phy_ethtool_get_stats(phydev, NULL, data); 1090 } 1091 EXPORT_SYMBOL(b53_get_ethtool_phy_stats); 1092 1093 int b53_get_sset_count(struct dsa_switch *ds, int port, int sset) 1094 { 1095 struct b53_device *dev = ds->priv; 1096 struct phy_device *phydev; 1097 1098 if (sset == ETH_SS_STATS) { 1099 return b53_get_mib_size(dev); 1100 } else if (sset == ETH_SS_PHY_STATS) { 1101 phydev = b53_get_phy_device(ds, port); 1102 if (!phydev) 1103 return 0; 1104 1105 return phy_ethtool_get_sset_count(phydev); 1106 } 1107 1108 return 0; 1109 } 1110 EXPORT_SYMBOL(b53_get_sset_count); 1111 1112 enum b53_devlink_resource_id { 1113 B53_DEVLINK_PARAM_ID_VLAN_TABLE, 1114 }; 1115 1116 static u64 b53_devlink_vlan_table_get(void *priv) 1117 { 1118 struct b53_device *dev = priv; 1119 struct b53_vlan *vl; 1120 unsigned int i; 1121 u64 count = 0; 1122 1123 for (i = 0; i < dev->num_vlans; i++) { 1124 vl = &dev->vlans[i]; 1125 if (vl->members) 1126 count++; 1127 } 1128 1129 return count; 1130 } 1131 1132 int b53_setup_devlink_resources(struct dsa_switch *ds) 1133 { 1134 struct devlink_resource_size_params size_params; 1135 struct b53_device *dev = ds->priv; 1136 int err; 1137 1138 devlink_resource_size_params_init(&size_params, dev->num_vlans, 1139 dev->num_vlans, 1140 1, DEVLINK_RESOURCE_UNIT_ENTRY); 1141 1142 err = dsa_devlink_resource_register(ds, "VLAN", dev->num_vlans, 1143 B53_DEVLINK_PARAM_ID_VLAN_TABLE, 1144 DEVLINK_RESOURCE_ID_PARENT_TOP, 1145 &size_params); 1146 if (err) 1147 goto out; 1148 1149 dsa_devlink_resource_occ_get_register(ds, 1150 B53_DEVLINK_PARAM_ID_VLAN_TABLE, 1151 b53_devlink_vlan_table_get, dev); 1152 1153 return 0; 1154 out: 1155 dsa_devlink_resources_unregister(ds); 1156 return err; 1157 } 1158 EXPORT_SYMBOL(b53_setup_devlink_resources); 1159 1160 static int b53_setup(struct dsa_switch *ds) 1161 { 1162 struct b53_device *dev = ds->priv; 1163 struct b53_vlan *vl; 1164 unsigned int port; 1165 u16 pvid; 1166 int ret; 1167 1168 /* Request bridge PVID untagged when DSA_TAG_PROTO_NONE is set 1169 * which forces the CPU port to be tagged in all VLANs. 1170 */ 1171 ds->untag_bridge_pvid = dev->tag_protocol == DSA_TAG_PROTO_NONE; 1172 1173 /* The switch does not tell us the original VLAN for untagged 1174 * packets, so keep the CPU port always tagged. 1175 */ 1176 ds->untag_vlan_aware_bridge_pvid = true; 1177 1178 ret = b53_reset_switch(dev); 1179 if (ret) { 1180 dev_err(ds->dev, "failed to reset switch\n"); 1181 return ret; 1182 } 1183 1184 /* setup default vlan for filtering mode */ 1185 pvid = b53_default_pvid(dev); 1186 vl = &dev->vlans[pvid]; 1187 b53_for_each_port(dev, port) { 1188 vl->members |= BIT(port); 1189 if (!b53_vlan_port_needs_forced_tagged(ds, port)) 1190 vl->untag |= BIT(port); 1191 } 1192 1193 b53_reset_mib(dev); 1194 1195 ret = b53_apply_config(dev); 1196 if (ret) { 1197 dev_err(ds->dev, "failed to apply configuration\n"); 1198 return ret; 1199 } 1200 1201 /* Configure IMP/CPU port, disable all other ports. Enabled 1202 * ports will be configured with .port_enable 1203 */ 1204 for (port = 0; port < dev->num_ports; port++) { 1205 if (dsa_is_cpu_port(ds, port)) 1206 b53_enable_cpu_port(dev, port); 1207 else 1208 b53_disable_port(ds, port); 1209 } 1210 1211 return b53_setup_devlink_resources(ds); 1212 } 1213 1214 static void b53_teardown(struct dsa_switch *ds) 1215 { 1216 dsa_devlink_resources_unregister(ds); 1217 } 1218 1219 static void b53_force_link(struct b53_device *dev, int port, int link) 1220 { 1221 u8 reg, val, off; 1222 1223 /* Override the port settings */ 1224 if (port == dev->imp_port) { 1225 off = B53_PORT_OVERRIDE_CTRL; 1226 val = PORT_OVERRIDE_EN; 1227 } else { 1228 off = B53_GMII_PORT_OVERRIDE_CTRL(port); 1229 val = GMII_PO_EN; 1230 } 1231 1232 b53_read8(dev, B53_CTRL_PAGE, off, ®); 1233 reg |= val; 1234 if (link) 1235 reg |= PORT_OVERRIDE_LINK; 1236 else 1237 reg &= ~PORT_OVERRIDE_LINK; 1238 b53_write8(dev, B53_CTRL_PAGE, off, reg); 1239 } 1240 1241 static void b53_force_port_config(struct b53_device *dev, int port, 1242 int speed, int duplex, 1243 bool tx_pause, bool rx_pause) 1244 { 1245 u8 reg, val, off; 1246 1247 /* Override the port settings */ 1248 if (port == dev->imp_port) { 1249 off = B53_PORT_OVERRIDE_CTRL; 1250 val = PORT_OVERRIDE_EN; 1251 } else { 1252 off = B53_GMII_PORT_OVERRIDE_CTRL(port); 1253 val = GMII_PO_EN; 1254 } 1255 1256 b53_read8(dev, B53_CTRL_PAGE, off, ®); 1257 reg |= val; 1258 if (duplex == DUPLEX_FULL) 1259 reg |= PORT_OVERRIDE_FULL_DUPLEX; 1260 else 1261 reg &= ~PORT_OVERRIDE_FULL_DUPLEX; 1262 1263 switch (speed) { 1264 case 2000: 1265 reg |= PORT_OVERRIDE_SPEED_2000M; 1266 fallthrough; 1267 case SPEED_1000: 1268 reg |= PORT_OVERRIDE_SPEED_1000M; 1269 break; 1270 case SPEED_100: 1271 reg |= PORT_OVERRIDE_SPEED_100M; 1272 break; 1273 case SPEED_10: 1274 reg |= PORT_OVERRIDE_SPEED_10M; 1275 break; 1276 default: 1277 dev_err(dev->dev, "unknown speed: %d\n", speed); 1278 return; 1279 } 1280 1281 if (rx_pause) 1282 reg |= PORT_OVERRIDE_RX_FLOW; 1283 if (tx_pause) 1284 reg |= PORT_OVERRIDE_TX_FLOW; 1285 1286 b53_write8(dev, B53_CTRL_PAGE, off, reg); 1287 } 1288 1289 static void b53_adjust_63xx_rgmii(struct dsa_switch *ds, int port, 1290 phy_interface_t interface) 1291 { 1292 struct b53_device *dev = ds->priv; 1293 u8 rgmii_ctrl = 0, off; 1294 1295 if (port == dev->imp_port) 1296 off = B53_RGMII_CTRL_IMP; 1297 else 1298 off = B53_RGMII_CTRL_P(port); 1299 1300 b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl); 1301 1302 switch (interface) { 1303 case PHY_INTERFACE_MODE_RGMII_ID: 1304 rgmii_ctrl |= (RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC); 1305 break; 1306 case PHY_INTERFACE_MODE_RGMII_RXID: 1307 rgmii_ctrl &= ~(RGMII_CTRL_DLL_TXC); 1308 rgmii_ctrl |= RGMII_CTRL_DLL_RXC; 1309 break; 1310 case PHY_INTERFACE_MODE_RGMII_TXID: 1311 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC); 1312 rgmii_ctrl |= RGMII_CTRL_DLL_TXC; 1313 break; 1314 case PHY_INTERFACE_MODE_RGMII: 1315 default: 1316 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC); 1317 break; 1318 } 1319 1320 if (port != dev->imp_port) { 1321 if (is63268(dev)) 1322 rgmii_ctrl |= RGMII_CTRL_MII_OVERRIDE; 1323 1324 rgmii_ctrl |= RGMII_CTRL_ENABLE_GMII; 1325 } 1326 1327 b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl); 1328 1329 dev_dbg(ds->dev, "Configured port %d for %s\n", port, 1330 phy_modes(interface)); 1331 } 1332 1333 static void b53_adjust_531x5_rgmii(struct dsa_switch *ds, int port, 1334 phy_interface_t interface) 1335 { 1336 struct b53_device *dev = ds->priv; 1337 u8 rgmii_ctrl = 0, off; 1338 1339 if (port == dev->imp_port) 1340 off = B53_RGMII_CTRL_IMP; 1341 else 1342 off = B53_RGMII_CTRL_P(port); 1343 1344 /* Configure the port RGMII clock delay by DLL disabled and 1345 * tx_clk aligned timing (restoring to reset defaults) 1346 */ 1347 b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl); 1348 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC | 1349 RGMII_CTRL_TIMING_SEL); 1350 1351 /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make 1352 * sure that we enable the port TX clock internal delay to 1353 * account for this internal delay that is inserted, otherwise 1354 * the switch won't be able to receive correctly. 1355 * 1356 * PHY_INTERFACE_MODE_RGMII means that we are not introducing 1357 * any delay neither on transmission nor reception, so the 1358 * BCM53125 must also be configured accordingly to account for 1359 * the lack of delay and introduce 1360 * 1361 * The BCM53125 switch has its RX clock and TX clock control 1362 * swapped, hence the reason why we modify the TX clock path in 1363 * the "RGMII" case 1364 */ 1365 if (interface == PHY_INTERFACE_MODE_RGMII_TXID) 1366 rgmii_ctrl |= RGMII_CTRL_DLL_TXC; 1367 if (interface == PHY_INTERFACE_MODE_RGMII) 1368 rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC; 1369 rgmii_ctrl |= RGMII_CTRL_TIMING_SEL; 1370 b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl); 1371 1372 dev_info(ds->dev, "Configured port %d for %s\n", port, 1373 phy_modes(interface)); 1374 } 1375 1376 static void b53_adjust_5325_mii(struct dsa_switch *ds, int port) 1377 { 1378 struct b53_device *dev = ds->priv; 1379 u8 reg = 0; 1380 1381 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 1382 ®); 1383 1384 /* reverse mii needs to be enabled */ 1385 if (!(reg & PORT_OVERRIDE_RV_MII_25)) { 1386 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 1387 reg | PORT_OVERRIDE_RV_MII_25); 1388 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 1389 ®); 1390 1391 if (!(reg & PORT_OVERRIDE_RV_MII_25)) { 1392 dev_err(ds->dev, 1393 "Failed to enable reverse MII mode\n"); 1394 return; 1395 } 1396 } 1397 } 1398 1399 void b53_port_event(struct dsa_switch *ds, int port) 1400 { 1401 struct b53_device *dev = ds->priv; 1402 bool link; 1403 u16 sts; 1404 1405 b53_read16(dev, B53_STAT_PAGE, B53_LINK_STAT, &sts); 1406 link = !!(sts & BIT(port)); 1407 dsa_port_phylink_mac_change(ds, port, link); 1408 } 1409 EXPORT_SYMBOL(b53_port_event); 1410 1411 static void b53_phylink_get_caps(struct dsa_switch *ds, int port, 1412 struct phylink_config *config) 1413 { 1414 struct b53_device *dev = ds->priv; 1415 1416 /* Internal ports need GMII for PHYLIB */ 1417 __set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces); 1418 1419 /* These switches appear to support MII and RevMII too, but beyond 1420 * this, the code gives very few clues. FIXME: We probably need more 1421 * interface modes here. 1422 * 1423 * According to b53_srab_mux_init(), ports 3..5 can support: 1424 * SGMII, MII, GMII, RGMII or INTERNAL depending on the MUX setting. 1425 * However, the interface mode read from the MUX configuration is 1426 * not passed back to DSA, so phylink uses NA. 1427 * DT can specify RGMII for ports 0, 1. 1428 * For MDIO, port 8 can be RGMII_TXID. 1429 */ 1430 __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces); 1431 __set_bit(PHY_INTERFACE_MODE_REVMII, config->supported_interfaces); 1432 1433 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | 1434 MAC_10 | MAC_100; 1435 1436 /* 5325/5365 are not capable of gigabit speeds, everything else is. 1437 * Note: the original code also exclulded Gigagbit for MII, RevMII 1438 * and 802.3z modes. MII and RevMII are not able to work above 100M, 1439 * so will be excluded by the generic validator implementation. 1440 * However, the exclusion of Gigabit for 802.3z just seems wrong. 1441 */ 1442 if (!(is5325(dev) || is5365(dev))) 1443 config->mac_capabilities |= MAC_1000; 1444 1445 /* Get the implementation specific capabilities */ 1446 if (dev->ops->phylink_get_caps) 1447 dev->ops->phylink_get_caps(dev, port, config); 1448 } 1449 1450 static struct phylink_pcs *b53_phylink_mac_select_pcs(struct phylink_config *config, 1451 phy_interface_t interface) 1452 { 1453 struct dsa_port *dp = dsa_phylink_to_port(config); 1454 struct b53_device *dev = dp->ds->priv; 1455 1456 if (!dev->ops->phylink_mac_select_pcs) 1457 return NULL; 1458 1459 return dev->ops->phylink_mac_select_pcs(dev, dp->index, interface); 1460 } 1461 1462 static void b53_phylink_mac_config(struct phylink_config *config, 1463 unsigned int mode, 1464 const struct phylink_link_state *state) 1465 { 1466 struct dsa_port *dp = dsa_phylink_to_port(config); 1467 phy_interface_t interface = state->interface; 1468 struct dsa_switch *ds = dp->ds; 1469 struct b53_device *dev = ds->priv; 1470 int port = dp->index; 1471 1472 if (is63xx(dev) && port >= B53_63XX_RGMII0) 1473 b53_adjust_63xx_rgmii(ds, port, interface); 1474 1475 if (mode == MLO_AN_FIXED) { 1476 if (is531x5(dev) && phy_interface_mode_is_rgmii(interface)) 1477 b53_adjust_531x5_rgmii(ds, port, interface); 1478 1479 /* configure MII port if necessary */ 1480 if (is5325(dev)) 1481 b53_adjust_5325_mii(ds, port); 1482 } 1483 } 1484 1485 static void b53_phylink_mac_link_down(struct phylink_config *config, 1486 unsigned int mode, 1487 phy_interface_t interface) 1488 { 1489 struct dsa_port *dp = dsa_phylink_to_port(config); 1490 struct b53_device *dev = dp->ds->priv; 1491 int port = dp->index; 1492 1493 if (mode == MLO_AN_PHY) 1494 return; 1495 1496 if (mode == MLO_AN_FIXED) { 1497 b53_force_link(dev, port, false); 1498 return; 1499 } 1500 1501 if (phy_interface_mode_is_8023z(interface) && 1502 dev->ops->serdes_link_set) 1503 dev->ops->serdes_link_set(dev, port, mode, interface, false); 1504 } 1505 1506 static void b53_phylink_mac_link_up(struct phylink_config *config, 1507 struct phy_device *phydev, 1508 unsigned int mode, 1509 phy_interface_t interface, 1510 int speed, int duplex, 1511 bool tx_pause, bool rx_pause) 1512 { 1513 struct dsa_port *dp = dsa_phylink_to_port(config); 1514 struct dsa_switch *ds = dp->ds; 1515 struct b53_device *dev = ds->priv; 1516 struct ethtool_keee *p = &dev->ports[dp->index].eee; 1517 int port = dp->index; 1518 1519 if (mode == MLO_AN_PHY) { 1520 /* Re-negotiate EEE if it was enabled already */ 1521 p->eee_enabled = b53_eee_init(ds, port, phydev); 1522 return; 1523 } 1524 1525 if (mode == MLO_AN_FIXED) { 1526 /* Force flow control on BCM5301x's CPU port */ 1527 if (is5301x(dev) && dsa_is_cpu_port(ds, port)) 1528 tx_pause = rx_pause = true; 1529 1530 b53_force_port_config(dev, port, speed, duplex, 1531 tx_pause, rx_pause); 1532 b53_force_link(dev, port, true); 1533 return; 1534 } 1535 1536 if (phy_interface_mode_is_8023z(interface) && 1537 dev->ops->serdes_link_set) 1538 dev->ops->serdes_link_set(dev, port, mode, interface, true); 1539 } 1540 1541 int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, 1542 struct netlink_ext_ack *extack) 1543 { 1544 struct b53_device *dev = ds->priv; 1545 1546 if (dev->vlan_filtering != vlan_filtering) { 1547 dev->vlan_filtering = vlan_filtering; 1548 b53_apply_config(dev); 1549 } 1550 1551 return 0; 1552 } 1553 EXPORT_SYMBOL(b53_vlan_filtering); 1554 1555 static int b53_vlan_prepare(struct dsa_switch *ds, int port, 1556 const struct switchdev_obj_port_vlan *vlan) 1557 { 1558 struct b53_device *dev = ds->priv; 1559 1560 if ((is5325(dev) || is5365(dev)) && vlan->vid == 0) 1561 return -EOPNOTSUPP; 1562 1563 /* Port 7 on 7278 connects to the ASP's UniMAC which is not capable of 1564 * receiving VLAN tagged frames at all, we can still allow the port to 1565 * be configured for egress untagged. 1566 */ 1567 if (dev->chip_id == BCM7278_DEVICE_ID && port == 7 && 1568 !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) 1569 return -EINVAL; 1570 1571 if (vlan->vid >= dev->num_vlans) 1572 return -ERANGE; 1573 1574 b53_enable_vlan(dev, port, true, dev->vlan_filtering); 1575 1576 return 0; 1577 } 1578 1579 int b53_vlan_add(struct dsa_switch *ds, int port, 1580 const struct switchdev_obj_port_vlan *vlan, 1581 struct netlink_ext_ack *extack) 1582 { 1583 struct b53_device *dev = ds->priv; 1584 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1585 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 1586 struct b53_vlan *vl; 1587 u16 old_pvid, new_pvid; 1588 int err; 1589 1590 err = b53_vlan_prepare(ds, port, vlan); 1591 if (err) 1592 return err; 1593 1594 if (vlan->vid == 0) 1595 return 0; 1596 1597 old_pvid = dev->ports[port].pvid; 1598 if (pvid) 1599 new_pvid = vlan->vid; 1600 else if (!pvid && vlan->vid == old_pvid) 1601 new_pvid = b53_default_pvid(dev); 1602 else 1603 new_pvid = old_pvid; 1604 dev->ports[port].pvid = new_pvid; 1605 1606 vl = &dev->vlans[vlan->vid]; 1607 1608 if (dsa_is_cpu_port(ds, port)) 1609 untagged = false; 1610 1611 vl->members |= BIT(port); 1612 if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port)) 1613 vl->untag |= BIT(port); 1614 else 1615 vl->untag &= ~BIT(port); 1616 1617 if (!dev->vlan_filtering) 1618 return 0; 1619 1620 b53_set_vlan_entry(dev, vlan->vid, vl); 1621 b53_fast_age_vlan(dev, vlan->vid); 1622 1623 if (!dsa_is_cpu_port(ds, port) && new_pvid != old_pvid) { 1624 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), 1625 new_pvid); 1626 b53_fast_age_vlan(dev, old_pvid); 1627 } 1628 1629 return 0; 1630 } 1631 EXPORT_SYMBOL(b53_vlan_add); 1632 1633 int b53_vlan_del(struct dsa_switch *ds, int port, 1634 const struct switchdev_obj_port_vlan *vlan) 1635 { 1636 struct b53_device *dev = ds->priv; 1637 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1638 struct b53_vlan *vl; 1639 u16 pvid; 1640 1641 if (vlan->vid == 0) 1642 return 0; 1643 1644 pvid = dev->ports[port].pvid; 1645 1646 vl = &dev->vlans[vlan->vid]; 1647 1648 vl->members &= ~BIT(port); 1649 1650 if (pvid == vlan->vid) 1651 pvid = b53_default_pvid(dev); 1652 dev->ports[port].pvid = pvid; 1653 1654 if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port)) 1655 vl->untag &= ~(BIT(port)); 1656 1657 if (!dev->vlan_filtering) 1658 return 0; 1659 1660 b53_set_vlan_entry(dev, vlan->vid, vl); 1661 b53_fast_age_vlan(dev, vlan->vid); 1662 1663 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid); 1664 b53_fast_age_vlan(dev, pvid); 1665 1666 return 0; 1667 } 1668 EXPORT_SYMBOL(b53_vlan_del); 1669 1670 /* Address Resolution Logic routines. Caller must hold &dev->arl_mutex. */ 1671 static int b53_arl_op_wait(struct b53_device *dev) 1672 { 1673 unsigned int timeout = 10; 1674 u8 reg; 1675 1676 do { 1677 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®); 1678 if (!(reg & ARLTBL_START_DONE)) 1679 return 0; 1680 1681 usleep_range(1000, 2000); 1682 } while (timeout--); 1683 1684 dev_warn(dev->dev, "timeout waiting for ARL to finish: 0x%02x\n", reg); 1685 1686 return -ETIMEDOUT; 1687 } 1688 1689 static int b53_arl_rw_op(struct b53_device *dev, unsigned int op) 1690 { 1691 u8 reg; 1692 1693 if (op > ARLTBL_RW) 1694 return -EINVAL; 1695 1696 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®); 1697 reg |= ARLTBL_START_DONE; 1698 if (op) 1699 reg |= ARLTBL_RW; 1700 else 1701 reg &= ~ARLTBL_RW; 1702 if (dev->vlan_enabled) 1703 reg &= ~ARLTBL_IVL_SVL_SELECT; 1704 else 1705 reg |= ARLTBL_IVL_SVL_SELECT; 1706 b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg); 1707 1708 return b53_arl_op_wait(dev); 1709 } 1710 1711 static int b53_arl_read(struct b53_device *dev, u64 mac, 1712 u16 vid, struct b53_arl_entry *ent, u8 *idx) 1713 { 1714 DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES); 1715 unsigned int i; 1716 int ret; 1717 1718 ret = b53_arl_op_wait(dev); 1719 if (ret) 1720 return ret; 1721 1722 bitmap_zero(free_bins, dev->num_arl_bins); 1723 1724 /* Read the bins */ 1725 for (i = 0; i < dev->num_arl_bins; i++) { 1726 u64 mac_vid; 1727 u32 fwd_entry; 1728 1729 b53_read64(dev, B53_ARLIO_PAGE, 1730 B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid); 1731 b53_read32(dev, B53_ARLIO_PAGE, 1732 B53_ARLTBL_DATA_ENTRY(i), &fwd_entry); 1733 b53_arl_to_entry(ent, mac_vid, fwd_entry); 1734 1735 if (!(fwd_entry & ARLTBL_VALID)) { 1736 set_bit(i, free_bins); 1737 continue; 1738 } 1739 if ((mac_vid & ARLTBL_MAC_MASK) != mac) 1740 continue; 1741 if (dev->vlan_enabled && 1742 ((mac_vid >> ARLTBL_VID_S) & ARLTBL_VID_MASK) != vid) 1743 continue; 1744 *idx = i; 1745 return 0; 1746 } 1747 1748 *idx = find_first_bit(free_bins, dev->num_arl_bins); 1749 return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT; 1750 } 1751 1752 static int b53_arl_op(struct b53_device *dev, int op, int port, 1753 const unsigned char *addr, u16 vid, bool is_valid) 1754 { 1755 struct b53_arl_entry ent; 1756 u32 fwd_entry; 1757 u64 mac, mac_vid = 0; 1758 u8 idx = 0; 1759 int ret; 1760 1761 /* Convert the array into a 64-bit MAC */ 1762 mac = ether_addr_to_u64(addr); 1763 1764 /* Perform a read for the given MAC and VID */ 1765 b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac); 1766 b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid); 1767 1768 /* Issue a read operation for this MAC */ 1769 ret = b53_arl_rw_op(dev, 1); 1770 if (ret) 1771 return ret; 1772 1773 ret = b53_arl_read(dev, mac, vid, &ent, &idx); 1774 1775 /* If this is a read, just finish now */ 1776 if (op) 1777 return ret; 1778 1779 switch (ret) { 1780 case -ETIMEDOUT: 1781 return ret; 1782 case -ENOSPC: 1783 dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n", 1784 addr, vid); 1785 return is_valid ? ret : 0; 1786 case -ENOENT: 1787 /* We could not find a matching MAC, so reset to a new entry */ 1788 dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n", 1789 addr, vid, idx); 1790 fwd_entry = 0; 1791 break; 1792 default: 1793 dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n", 1794 addr, vid, idx); 1795 break; 1796 } 1797 1798 /* For multicast address, the port is a bitmask and the validity 1799 * is determined by having at least one port being still active 1800 */ 1801 if (!is_multicast_ether_addr(addr)) { 1802 ent.port = port; 1803 ent.is_valid = is_valid; 1804 } else { 1805 if (is_valid) 1806 ent.port |= BIT(port); 1807 else 1808 ent.port &= ~BIT(port); 1809 1810 ent.is_valid = !!(ent.port); 1811 } 1812 1813 ent.vid = vid; 1814 ent.is_static = true; 1815 ent.is_age = false; 1816 memcpy(ent.mac, addr, ETH_ALEN); 1817 b53_arl_from_entry(&mac_vid, &fwd_entry, &ent); 1818 1819 b53_write64(dev, B53_ARLIO_PAGE, 1820 B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid); 1821 b53_write32(dev, B53_ARLIO_PAGE, 1822 B53_ARLTBL_DATA_ENTRY(idx), fwd_entry); 1823 1824 return b53_arl_rw_op(dev, 0); 1825 } 1826 1827 int b53_fdb_add(struct dsa_switch *ds, int port, 1828 const unsigned char *addr, u16 vid, 1829 struct dsa_db db) 1830 { 1831 struct b53_device *priv = ds->priv; 1832 int ret; 1833 1834 /* 5325 and 5365 require some more massaging, but could 1835 * be supported eventually 1836 */ 1837 if (is5325(priv) || is5365(priv)) 1838 return -EOPNOTSUPP; 1839 1840 mutex_lock(&priv->arl_mutex); 1841 ret = b53_arl_op(priv, 0, port, addr, vid, true); 1842 mutex_unlock(&priv->arl_mutex); 1843 1844 return ret; 1845 } 1846 EXPORT_SYMBOL(b53_fdb_add); 1847 1848 int b53_fdb_del(struct dsa_switch *ds, int port, 1849 const unsigned char *addr, u16 vid, 1850 struct dsa_db db) 1851 { 1852 struct b53_device *priv = ds->priv; 1853 int ret; 1854 1855 mutex_lock(&priv->arl_mutex); 1856 ret = b53_arl_op(priv, 0, port, addr, vid, false); 1857 mutex_unlock(&priv->arl_mutex); 1858 1859 return ret; 1860 } 1861 EXPORT_SYMBOL(b53_fdb_del); 1862 1863 static int b53_arl_search_wait(struct b53_device *dev) 1864 { 1865 unsigned int timeout = 1000; 1866 u8 reg; 1867 1868 do { 1869 b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, ®); 1870 if (!(reg & ARL_SRCH_STDN)) 1871 return 0; 1872 1873 if (reg & ARL_SRCH_VLID) 1874 return 0; 1875 1876 usleep_range(1000, 2000); 1877 } while (timeout--); 1878 1879 return -ETIMEDOUT; 1880 } 1881 1882 static void b53_arl_search_rd(struct b53_device *dev, u8 idx, 1883 struct b53_arl_entry *ent) 1884 { 1885 u64 mac_vid; 1886 u32 fwd_entry; 1887 1888 b53_read64(dev, B53_ARLIO_PAGE, 1889 B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid); 1890 b53_read32(dev, B53_ARLIO_PAGE, 1891 B53_ARL_SRCH_RSTL(idx), &fwd_entry); 1892 b53_arl_to_entry(ent, mac_vid, fwd_entry); 1893 } 1894 1895 static int b53_fdb_copy(int port, const struct b53_arl_entry *ent, 1896 dsa_fdb_dump_cb_t *cb, void *data) 1897 { 1898 if (!ent->is_valid) 1899 return 0; 1900 1901 if (port != ent->port) 1902 return 0; 1903 1904 return cb(ent->mac, ent->vid, ent->is_static, data); 1905 } 1906 1907 int b53_fdb_dump(struct dsa_switch *ds, int port, 1908 dsa_fdb_dump_cb_t *cb, void *data) 1909 { 1910 struct b53_device *priv = ds->priv; 1911 struct b53_arl_entry results[2]; 1912 unsigned int count = 0; 1913 int ret; 1914 u8 reg; 1915 1916 mutex_lock(&priv->arl_mutex); 1917 1918 /* Start search operation */ 1919 reg = ARL_SRCH_STDN; 1920 b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg); 1921 1922 do { 1923 ret = b53_arl_search_wait(priv); 1924 if (ret) 1925 break; 1926 1927 b53_arl_search_rd(priv, 0, &results[0]); 1928 ret = b53_fdb_copy(port, &results[0], cb, data); 1929 if (ret) 1930 break; 1931 1932 if (priv->num_arl_bins > 2) { 1933 b53_arl_search_rd(priv, 1, &results[1]); 1934 ret = b53_fdb_copy(port, &results[1], cb, data); 1935 if (ret) 1936 break; 1937 1938 if (!results[0].is_valid && !results[1].is_valid) 1939 break; 1940 } 1941 1942 } while (count++ < b53_max_arl_entries(priv) / 2); 1943 1944 mutex_unlock(&priv->arl_mutex); 1945 1946 return 0; 1947 } 1948 EXPORT_SYMBOL(b53_fdb_dump); 1949 1950 int b53_mdb_add(struct dsa_switch *ds, int port, 1951 const struct switchdev_obj_port_mdb *mdb, 1952 struct dsa_db db) 1953 { 1954 struct b53_device *priv = ds->priv; 1955 int ret; 1956 1957 /* 5325 and 5365 require some more massaging, but could 1958 * be supported eventually 1959 */ 1960 if (is5325(priv) || is5365(priv)) 1961 return -EOPNOTSUPP; 1962 1963 mutex_lock(&priv->arl_mutex); 1964 ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true); 1965 mutex_unlock(&priv->arl_mutex); 1966 1967 return ret; 1968 } 1969 EXPORT_SYMBOL(b53_mdb_add); 1970 1971 int b53_mdb_del(struct dsa_switch *ds, int port, 1972 const struct switchdev_obj_port_mdb *mdb, 1973 struct dsa_db db) 1974 { 1975 struct b53_device *priv = ds->priv; 1976 int ret; 1977 1978 mutex_lock(&priv->arl_mutex); 1979 ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false); 1980 mutex_unlock(&priv->arl_mutex); 1981 if (ret) 1982 dev_err(ds->dev, "failed to delete MDB entry\n"); 1983 1984 return ret; 1985 } 1986 EXPORT_SYMBOL(b53_mdb_del); 1987 1988 int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, 1989 bool *tx_fwd_offload, struct netlink_ext_ack *extack) 1990 { 1991 struct b53_device *dev = ds->priv; 1992 struct b53_vlan *vl; 1993 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 1994 u16 pvlan, reg, pvid; 1995 unsigned int i; 1996 1997 /* On 7278, port 7 which connects to the ASP should only receive 1998 * traffic from matching CFP rules. 1999 */ 2000 if (dev->chip_id == BCM7278_DEVICE_ID && port == 7) 2001 return -EINVAL; 2002 2003 pvid = b53_default_pvid(dev); 2004 vl = &dev->vlans[pvid]; 2005 2006 if (dev->vlan_filtering) { 2007 /* Make this port leave the all VLANs join since we will have 2008 * proper VLAN entries from now on 2009 */ 2010 if (is58xx(dev)) { 2011 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, 2012 ®); 2013 reg &= ~BIT(port); 2014 if ((reg & BIT(cpu_port)) == BIT(cpu_port)) 2015 reg &= ~BIT(cpu_port); 2016 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, 2017 reg); 2018 } 2019 2020 b53_get_vlan_entry(dev, pvid, vl); 2021 vl->members &= ~BIT(port); 2022 if (vl->members == BIT(cpu_port)) 2023 vl->members &= ~BIT(cpu_port); 2024 vl->untag = vl->members; 2025 b53_set_vlan_entry(dev, pvid, vl); 2026 } 2027 2028 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 2029 2030 b53_for_each_port(dev, i) { 2031 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) 2032 continue; 2033 2034 /* Add this local port to the remote port VLAN control 2035 * membership and update the remote port bitmask 2036 */ 2037 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); 2038 reg |= BIT(port); 2039 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg); 2040 dev->ports[i].vlan_ctl_mask = reg; 2041 2042 pvlan |= BIT(i); 2043 } 2044 2045 /* Configure the local port VLAN control membership to include 2046 * remote ports and update the local port bitmask 2047 */ 2048 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 2049 dev->ports[port].vlan_ctl_mask = pvlan; 2050 2051 return 0; 2052 } 2053 EXPORT_SYMBOL(b53_br_join); 2054 2055 void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge) 2056 { 2057 struct b53_device *dev = ds->priv; 2058 struct b53_vlan *vl; 2059 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 2060 unsigned int i; 2061 u16 pvlan, reg, pvid; 2062 2063 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 2064 2065 b53_for_each_port(dev, i) { 2066 /* Don't touch the remaining ports */ 2067 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) 2068 continue; 2069 2070 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); 2071 reg &= ~BIT(port); 2072 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg); 2073 dev->ports[port].vlan_ctl_mask = reg; 2074 2075 /* Prevent self removal to preserve isolation */ 2076 if (port != i) 2077 pvlan &= ~BIT(i); 2078 } 2079 2080 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 2081 dev->ports[port].vlan_ctl_mask = pvlan; 2082 2083 pvid = b53_default_pvid(dev); 2084 vl = &dev->vlans[pvid]; 2085 2086 if (dev->vlan_filtering) { 2087 /* Make this port join all VLANs without VLAN entries */ 2088 if (is58xx(dev)) { 2089 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®); 2090 reg |= BIT(port); 2091 if (!(reg & BIT(cpu_port))) 2092 reg |= BIT(cpu_port); 2093 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg); 2094 } 2095 2096 b53_get_vlan_entry(dev, pvid, vl); 2097 vl->members |= BIT(port) | BIT(cpu_port); 2098 vl->untag |= BIT(port) | BIT(cpu_port); 2099 b53_set_vlan_entry(dev, pvid, vl); 2100 } 2101 } 2102 EXPORT_SYMBOL(b53_br_leave); 2103 2104 void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state) 2105 { 2106 struct b53_device *dev = ds->priv; 2107 u8 hw_state; 2108 u8 reg; 2109 2110 switch (state) { 2111 case BR_STATE_DISABLED: 2112 hw_state = PORT_CTRL_DIS_STATE; 2113 break; 2114 case BR_STATE_LISTENING: 2115 hw_state = PORT_CTRL_LISTEN_STATE; 2116 break; 2117 case BR_STATE_LEARNING: 2118 hw_state = PORT_CTRL_LEARN_STATE; 2119 break; 2120 case BR_STATE_FORWARDING: 2121 hw_state = PORT_CTRL_FWD_STATE; 2122 break; 2123 case BR_STATE_BLOCKING: 2124 hw_state = PORT_CTRL_BLOCK_STATE; 2125 break; 2126 default: 2127 dev_err(ds->dev, "invalid STP state: %d\n", state); 2128 return; 2129 } 2130 2131 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); 2132 reg &= ~PORT_CTRL_STP_STATE_MASK; 2133 reg |= hw_state; 2134 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); 2135 } 2136 EXPORT_SYMBOL(b53_br_set_stp_state); 2137 2138 void b53_br_fast_age(struct dsa_switch *ds, int port) 2139 { 2140 struct b53_device *dev = ds->priv; 2141 2142 if (b53_fast_age_port(dev, port)) 2143 dev_err(ds->dev, "fast ageing failed\n"); 2144 } 2145 EXPORT_SYMBOL(b53_br_fast_age); 2146 2147 int b53_br_flags_pre(struct dsa_switch *ds, int port, 2148 struct switchdev_brport_flags flags, 2149 struct netlink_ext_ack *extack) 2150 { 2151 if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_LEARNING)) 2152 return -EINVAL; 2153 2154 return 0; 2155 } 2156 EXPORT_SYMBOL(b53_br_flags_pre); 2157 2158 int b53_br_flags(struct dsa_switch *ds, int port, 2159 struct switchdev_brport_flags flags, 2160 struct netlink_ext_ack *extack) 2161 { 2162 if (flags.mask & BR_FLOOD) 2163 b53_port_set_ucast_flood(ds->priv, port, 2164 !!(flags.val & BR_FLOOD)); 2165 if (flags.mask & BR_MCAST_FLOOD) 2166 b53_port_set_mcast_flood(ds->priv, port, 2167 !!(flags.val & BR_MCAST_FLOOD)); 2168 if (flags.mask & BR_LEARNING) 2169 b53_port_set_learning(ds->priv, port, 2170 !!(flags.val & BR_LEARNING)); 2171 2172 return 0; 2173 } 2174 EXPORT_SYMBOL(b53_br_flags); 2175 2176 static bool b53_possible_cpu_port(struct dsa_switch *ds, int port) 2177 { 2178 /* Broadcom switches will accept enabling Broadcom tags on the 2179 * following ports: 5, 7 and 8, any other port is not supported 2180 */ 2181 switch (port) { 2182 case B53_CPU_PORT_25: 2183 case 7: 2184 case B53_CPU_PORT: 2185 return true; 2186 } 2187 2188 return false; 2189 } 2190 2191 static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port, 2192 enum dsa_tag_protocol tag_protocol) 2193 { 2194 bool ret = b53_possible_cpu_port(ds, port); 2195 2196 if (!ret) { 2197 dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n", 2198 port); 2199 return ret; 2200 } 2201 2202 switch (tag_protocol) { 2203 case DSA_TAG_PROTO_BRCM: 2204 case DSA_TAG_PROTO_BRCM_PREPEND: 2205 dev_warn(ds->dev, 2206 "Port %d is stacked to Broadcom tag switch\n", port); 2207 ret = false; 2208 break; 2209 default: 2210 ret = true; 2211 break; 2212 } 2213 2214 return ret; 2215 } 2216 2217 enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port, 2218 enum dsa_tag_protocol mprot) 2219 { 2220 struct b53_device *dev = ds->priv; 2221 2222 if (!b53_can_enable_brcm_tags(ds, port, mprot)) { 2223 dev->tag_protocol = DSA_TAG_PROTO_NONE; 2224 goto out; 2225 } 2226 2227 /* Older models require a different 6 byte tag */ 2228 if (is5325(dev) || is5365(dev) || is63xx(dev)) { 2229 dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY; 2230 goto out; 2231 } 2232 2233 /* Broadcom BCM58xx chips have a flow accelerator on Port 8 2234 * which requires us to use the prepended Broadcom tag type 2235 */ 2236 if (dev->chip_id == BCM58XX_DEVICE_ID && port == B53_CPU_PORT) { 2237 dev->tag_protocol = DSA_TAG_PROTO_BRCM_PREPEND; 2238 goto out; 2239 } 2240 2241 dev->tag_protocol = DSA_TAG_PROTO_BRCM; 2242 out: 2243 return dev->tag_protocol; 2244 } 2245 EXPORT_SYMBOL(b53_get_tag_protocol); 2246 2247 int b53_mirror_add(struct dsa_switch *ds, int port, 2248 struct dsa_mall_mirror_tc_entry *mirror, bool ingress, 2249 struct netlink_ext_ack *extack) 2250 { 2251 struct b53_device *dev = ds->priv; 2252 u16 reg, loc; 2253 2254 if (ingress) 2255 loc = B53_IG_MIR_CTL; 2256 else 2257 loc = B53_EG_MIR_CTL; 2258 2259 b53_read16(dev, B53_MGMT_PAGE, loc, ®); 2260 reg |= BIT(port); 2261 b53_write16(dev, B53_MGMT_PAGE, loc, reg); 2262 2263 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®); 2264 reg &= ~CAP_PORT_MASK; 2265 reg |= mirror->to_local_port; 2266 reg |= MIRROR_EN; 2267 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg); 2268 2269 return 0; 2270 } 2271 EXPORT_SYMBOL(b53_mirror_add); 2272 2273 void b53_mirror_del(struct dsa_switch *ds, int port, 2274 struct dsa_mall_mirror_tc_entry *mirror) 2275 { 2276 struct b53_device *dev = ds->priv; 2277 bool loc_disable = false, other_loc_disable = false; 2278 u16 reg, loc; 2279 2280 if (mirror->ingress) 2281 loc = B53_IG_MIR_CTL; 2282 else 2283 loc = B53_EG_MIR_CTL; 2284 2285 /* Update the desired ingress/egress register */ 2286 b53_read16(dev, B53_MGMT_PAGE, loc, ®); 2287 reg &= ~BIT(port); 2288 if (!(reg & MIRROR_MASK)) 2289 loc_disable = true; 2290 b53_write16(dev, B53_MGMT_PAGE, loc, reg); 2291 2292 /* Now look at the other one to know if we can disable mirroring 2293 * entirely 2294 */ 2295 if (mirror->ingress) 2296 b53_read16(dev, B53_MGMT_PAGE, B53_EG_MIR_CTL, ®); 2297 else 2298 b53_read16(dev, B53_MGMT_PAGE, B53_IG_MIR_CTL, ®); 2299 if (!(reg & MIRROR_MASK)) 2300 other_loc_disable = true; 2301 2302 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®); 2303 /* Both no longer have ports, let's disable mirroring */ 2304 if (loc_disable && other_loc_disable) { 2305 reg &= ~MIRROR_EN; 2306 reg &= ~mirror->to_local_port; 2307 } 2308 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg); 2309 } 2310 EXPORT_SYMBOL(b53_mirror_del); 2311 2312 /* Returns 0 if EEE was not enabled, or 1 otherwise 2313 */ 2314 int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy) 2315 { 2316 int ret; 2317 2318 ret = phy_init_eee(phy, false); 2319 if (ret) 2320 return 0; 2321 2322 b53_eee_enable_set(ds, port, true); 2323 2324 return 1; 2325 } 2326 EXPORT_SYMBOL(b53_eee_init); 2327 2328 bool b53_support_eee(struct dsa_switch *ds, int port) 2329 { 2330 struct b53_device *dev = ds->priv; 2331 2332 return !is5325(dev) && !is5365(dev); 2333 } 2334 EXPORT_SYMBOL(b53_support_eee); 2335 2336 int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e) 2337 { 2338 struct b53_device *dev = ds->priv; 2339 struct ethtool_keee *p = &dev->ports[port].eee; 2340 2341 p->eee_enabled = e->eee_enabled; 2342 b53_eee_enable_set(ds, port, e->eee_enabled); 2343 2344 return 0; 2345 } 2346 EXPORT_SYMBOL(b53_set_mac_eee); 2347 2348 static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu) 2349 { 2350 struct b53_device *dev = ds->priv; 2351 bool enable_jumbo; 2352 bool allow_10_100; 2353 2354 if (is5325(dev) || is5365(dev)) 2355 return 0; 2356 2357 if (!dsa_is_cpu_port(ds, port)) 2358 return 0; 2359 2360 enable_jumbo = (mtu > ETH_DATA_LEN); 2361 allow_10_100 = !is63xx(dev); 2362 2363 return b53_set_jumbo(dev, enable_jumbo, allow_10_100); 2364 } 2365 2366 static int b53_get_max_mtu(struct dsa_switch *ds, int port) 2367 { 2368 struct b53_device *dev = ds->priv; 2369 2370 if (is5325(dev) || is5365(dev)) 2371 return B53_MAX_MTU_25; 2372 2373 return B53_MAX_MTU; 2374 } 2375 2376 static const struct phylink_mac_ops b53_phylink_mac_ops = { 2377 .mac_select_pcs = b53_phylink_mac_select_pcs, 2378 .mac_config = b53_phylink_mac_config, 2379 .mac_link_down = b53_phylink_mac_link_down, 2380 .mac_link_up = b53_phylink_mac_link_up, 2381 }; 2382 2383 static const struct dsa_switch_ops b53_switch_ops = { 2384 .get_tag_protocol = b53_get_tag_protocol, 2385 .setup = b53_setup, 2386 .teardown = b53_teardown, 2387 .get_strings = b53_get_strings, 2388 .get_ethtool_stats = b53_get_ethtool_stats, 2389 .get_sset_count = b53_get_sset_count, 2390 .get_ethtool_phy_stats = b53_get_ethtool_phy_stats, 2391 .phy_read = b53_phy_read16, 2392 .phy_write = b53_phy_write16, 2393 .phylink_get_caps = b53_phylink_get_caps, 2394 .port_setup = b53_setup_port, 2395 .port_enable = b53_enable_port, 2396 .port_disable = b53_disable_port, 2397 .support_eee = b53_support_eee, 2398 .set_mac_eee = b53_set_mac_eee, 2399 .port_bridge_join = b53_br_join, 2400 .port_bridge_leave = b53_br_leave, 2401 .port_pre_bridge_flags = b53_br_flags_pre, 2402 .port_bridge_flags = b53_br_flags, 2403 .port_stp_state_set = b53_br_set_stp_state, 2404 .port_fast_age = b53_br_fast_age, 2405 .port_vlan_filtering = b53_vlan_filtering, 2406 .port_vlan_add = b53_vlan_add, 2407 .port_vlan_del = b53_vlan_del, 2408 .port_fdb_dump = b53_fdb_dump, 2409 .port_fdb_add = b53_fdb_add, 2410 .port_fdb_del = b53_fdb_del, 2411 .port_mirror_add = b53_mirror_add, 2412 .port_mirror_del = b53_mirror_del, 2413 .port_mdb_add = b53_mdb_add, 2414 .port_mdb_del = b53_mdb_del, 2415 .port_max_mtu = b53_get_max_mtu, 2416 .port_change_mtu = b53_change_mtu, 2417 }; 2418 2419 struct b53_chip_data { 2420 u32 chip_id; 2421 const char *dev_name; 2422 u16 vlans; 2423 u16 enabled_ports; 2424 u8 imp_port; 2425 u8 cpu_port; 2426 u8 vta_regs[3]; 2427 u8 arl_bins; 2428 u16 arl_buckets; 2429 u8 duplex_reg; 2430 u8 jumbo_pm_reg; 2431 u8 jumbo_size_reg; 2432 }; 2433 2434 #define B53_VTA_REGS \ 2435 { B53_VT_ACCESS, B53_VT_INDEX, B53_VT_ENTRY } 2436 #define B53_VTA_REGS_9798 \ 2437 { B53_VT_ACCESS_9798, B53_VT_INDEX_9798, B53_VT_ENTRY_9798 } 2438 #define B53_VTA_REGS_63XX \ 2439 { B53_VT_ACCESS_63XX, B53_VT_INDEX_63XX, B53_VT_ENTRY_63XX } 2440 2441 static const struct b53_chip_data b53_switch_chips[] = { 2442 { 2443 .chip_id = BCM5325_DEVICE_ID, 2444 .dev_name = "BCM5325", 2445 .vlans = 16, 2446 .enabled_ports = 0x3f, 2447 .arl_bins = 2, 2448 .arl_buckets = 1024, 2449 .imp_port = 5, 2450 .duplex_reg = B53_DUPLEX_STAT_FE, 2451 }, 2452 { 2453 .chip_id = BCM5365_DEVICE_ID, 2454 .dev_name = "BCM5365", 2455 .vlans = 256, 2456 .enabled_ports = 0x3f, 2457 .arl_bins = 2, 2458 .arl_buckets = 1024, 2459 .imp_port = 5, 2460 .duplex_reg = B53_DUPLEX_STAT_FE, 2461 }, 2462 { 2463 .chip_id = BCM5389_DEVICE_ID, 2464 .dev_name = "BCM5389", 2465 .vlans = 4096, 2466 .enabled_ports = 0x11f, 2467 .arl_bins = 4, 2468 .arl_buckets = 1024, 2469 .imp_port = 8, 2470 .vta_regs = B53_VTA_REGS, 2471 .duplex_reg = B53_DUPLEX_STAT_GE, 2472 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2473 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2474 }, 2475 { 2476 .chip_id = BCM5395_DEVICE_ID, 2477 .dev_name = "BCM5395", 2478 .vlans = 4096, 2479 .enabled_ports = 0x11f, 2480 .arl_bins = 4, 2481 .arl_buckets = 1024, 2482 .imp_port = 8, 2483 .vta_regs = B53_VTA_REGS, 2484 .duplex_reg = B53_DUPLEX_STAT_GE, 2485 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2486 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2487 }, 2488 { 2489 .chip_id = BCM5397_DEVICE_ID, 2490 .dev_name = "BCM5397", 2491 .vlans = 4096, 2492 .enabled_ports = 0x11f, 2493 .arl_bins = 4, 2494 .arl_buckets = 1024, 2495 .imp_port = 8, 2496 .vta_regs = B53_VTA_REGS_9798, 2497 .duplex_reg = B53_DUPLEX_STAT_GE, 2498 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2499 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2500 }, 2501 { 2502 .chip_id = BCM5398_DEVICE_ID, 2503 .dev_name = "BCM5398", 2504 .vlans = 4096, 2505 .enabled_ports = 0x17f, 2506 .arl_bins = 4, 2507 .arl_buckets = 1024, 2508 .imp_port = 8, 2509 .vta_regs = B53_VTA_REGS_9798, 2510 .duplex_reg = B53_DUPLEX_STAT_GE, 2511 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2512 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2513 }, 2514 { 2515 .chip_id = BCM53101_DEVICE_ID, 2516 .dev_name = "BCM53101", 2517 .vlans = 4096, 2518 .enabled_ports = 0x11f, 2519 .arl_bins = 4, 2520 .arl_buckets = 512, 2521 .vta_regs = B53_VTA_REGS, 2522 .imp_port = 8, 2523 .duplex_reg = B53_DUPLEX_STAT_GE, 2524 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2525 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2526 }, 2527 { 2528 .chip_id = BCM53115_DEVICE_ID, 2529 .dev_name = "BCM53115", 2530 .vlans = 4096, 2531 .enabled_ports = 0x11f, 2532 .arl_bins = 4, 2533 .arl_buckets = 1024, 2534 .vta_regs = B53_VTA_REGS, 2535 .imp_port = 8, 2536 .duplex_reg = B53_DUPLEX_STAT_GE, 2537 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2538 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2539 }, 2540 { 2541 .chip_id = BCM53125_DEVICE_ID, 2542 .dev_name = "BCM53125", 2543 .vlans = 4096, 2544 .enabled_ports = 0x1ff, 2545 .arl_bins = 4, 2546 .arl_buckets = 1024, 2547 .imp_port = 8, 2548 .vta_regs = B53_VTA_REGS, 2549 .duplex_reg = B53_DUPLEX_STAT_GE, 2550 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2551 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2552 }, 2553 { 2554 .chip_id = BCM53128_DEVICE_ID, 2555 .dev_name = "BCM53128", 2556 .vlans = 4096, 2557 .enabled_ports = 0x1ff, 2558 .arl_bins = 4, 2559 .arl_buckets = 1024, 2560 .imp_port = 8, 2561 .vta_regs = B53_VTA_REGS, 2562 .duplex_reg = B53_DUPLEX_STAT_GE, 2563 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2564 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2565 }, 2566 { 2567 .chip_id = BCM63XX_DEVICE_ID, 2568 .dev_name = "BCM63xx", 2569 .vlans = 4096, 2570 .enabled_ports = 0, /* pdata must provide them */ 2571 .arl_bins = 4, 2572 .arl_buckets = 1024, 2573 .imp_port = 8, 2574 .vta_regs = B53_VTA_REGS_63XX, 2575 .duplex_reg = B53_DUPLEX_STAT_63XX, 2576 .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX, 2577 .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX, 2578 }, 2579 { 2580 .chip_id = BCM63268_DEVICE_ID, 2581 .dev_name = "BCM63268", 2582 .vlans = 4096, 2583 .enabled_ports = 0, /* pdata must provide them */ 2584 .arl_bins = 4, 2585 .arl_buckets = 1024, 2586 .imp_port = 8, 2587 .vta_regs = B53_VTA_REGS_63XX, 2588 .duplex_reg = B53_DUPLEX_STAT_63XX, 2589 .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX, 2590 .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX, 2591 }, 2592 { 2593 .chip_id = BCM53010_DEVICE_ID, 2594 .dev_name = "BCM53010", 2595 .vlans = 4096, 2596 .enabled_ports = 0x1bf, 2597 .arl_bins = 4, 2598 .arl_buckets = 1024, 2599 .imp_port = 8, 2600 .vta_regs = B53_VTA_REGS, 2601 .duplex_reg = B53_DUPLEX_STAT_GE, 2602 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2603 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2604 }, 2605 { 2606 .chip_id = BCM53011_DEVICE_ID, 2607 .dev_name = "BCM53011", 2608 .vlans = 4096, 2609 .enabled_ports = 0x1bf, 2610 .arl_bins = 4, 2611 .arl_buckets = 1024, 2612 .imp_port = 8, 2613 .vta_regs = B53_VTA_REGS, 2614 .duplex_reg = B53_DUPLEX_STAT_GE, 2615 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2616 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2617 }, 2618 { 2619 .chip_id = BCM53012_DEVICE_ID, 2620 .dev_name = "BCM53012", 2621 .vlans = 4096, 2622 .enabled_ports = 0x1bf, 2623 .arl_bins = 4, 2624 .arl_buckets = 1024, 2625 .imp_port = 8, 2626 .vta_regs = B53_VTA_REGS, 2627 .duplex_reg = B53_DUPLEX_STAT_GE, 2628 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2629 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2630 }, 2631 { 2632 .chip_id = BCM53018_DEVICE_ID, 2633 .dev_name = "BCM53018", 2634 .vlans = 4096, 2635 .enabled_ports = 0x1bf, 2636 .arl_bins = 4, 2637 .arl_buckets = 1024, 2638 .imp_port = 8, 2639 .vta_regs = B53_VTA_REGS, 2640 .duplex_reg = B53_DUPLEX_STAT_GE, 2641 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2642 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2643 }, 2644 { 2645 .chip_id = BCM53019_DEVICE_ID, 2646 .dev_name = "BCM53019", 2647 .vlans = 4096, 2648 .enabled_ports = 0x1bf, 2649 .arl_bins = 4, 2650 .arl_buckets = 1024, 2651 .imp_port = 8, 2652 .vta_regs = B53_VTA_REGS, 2653 .duplex_reg = B53_DUPLEX_STAT_GE, 2654 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2655 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2656 }, 2657 { 2658 .chip_id = BCM58XX_DEVICE_ID, 2659 .dev_name = "BCM585xx/586xx/88312", 2660 .vlans = 4096, 2661 .enabled_ports = 0x1ff, 2662 .arl_bins = 4, 2663 .arl_buckets = 1024, 2664 .imp_port = 8, 2665 .vta_regs = B53_VTA_REGS, 2666 .duplex_reg = B53_DUPLEX_STAT_GE, 2667 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2668 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2669 }, 2670 { 2671 .chip_id = BCM583XX_DEVICE_ID, 2672 .dev_name = "BCM583xx/11360", 2673 .vlans = 4096, 2674 .enabled_ports = 0x103, 2675 .arl_bins = 4, 2676 .arl_buckets = 1024, 2677 .imp_port = 8, 2678 .vta_regs = B53_VTA_REGS, 2679 .duplex_reg = B53_DUPLEX_STAT_GE, 2680 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2681 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2682 }, 2683 /* Starfighter 2 */ 2684 { 2685 .chip_id = BCM4908_DEVICE_ID, 2686 .dev_name = "BCM4908", 2687 .vlans = 4096, 2688 .enabled_ports = 0x1bf, 2689 .arl_bins = 4, 2690 .arl_buckets = 256, 2691 .imp_port = 8, 2692 .vta_regs = B53_VTA_REGS, 2693 .duplex_reg = B53_DUPLEX_STAT_GE, 2694 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2695 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2696 }, 2697 { 2698 .chip_id = BCM7445_DEVICE_ID, 2699 .dev_name = "BCM7445", 2700 .vlans = 4096, 2701 .enabled_ports = 0x1ff, 2702 .arl_bins = 4, 2703 .arl_buckets = 1024, 2704 .imp_port = 8, 2705 .vta_regs = B53_VTA_REGS, 2706 .duplex_reg = B53_DUPLEX_STAT_GE, 2707 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2708 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2709 }, 2710 { 2711 .chip_id = BCM7278_DEVICE_ID, 2712 .dev_name = "BCM7278", 2713 .vlans = 4096, 2714 .enabled_ports = 0x1ff, 2715 .arl_bins = 4, 2716 .arl_buckets = 256, 2717 .imp_port = 8, 2718 .vta_regs = B53_VTA_REGS, 2719 .duplex_reg = B53_DUPLEX_STAT_GE, 2720 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2721 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2722 }, 2723 { 2724 .chip_id = BCM53134_DEVICE_ID, 2725 .dev_name = "BCM53134", 2726 .vlans = 4096, 2727 .enabled_ports = 0x12f, 2728 .imp_port = 8, 2729 .cpu_port = B53_CPU_PORT, 2730 .vta_regs = B53_VTA_REGS, 2731 .arl_bins = 4, 2732 .arl_buckets = 1024, 2733 .duplex_reg = B53_DUPLEX_STAT_GE, 2734 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2735 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2736 }, 2737 }; 2738 2739 static int b53_switch_init(struct b53_device *dev) 2740 { 2741 unsigned int i; 2742 int ret; 2743 2744 for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) { 2745 const struct b53_chip_data *chip = &b53_switch_chips[i]; 2746 2747 if (chip->chip_id == dev->chip_id) { 2748 if (!dev->enabled_ports) 2749 dev->enabled_ports = chip->enabled_ports; 2750 dev->name = chip->dev_name; 2751 dev->duplex_reg = chip->duplex_reg; 2752 dev->vta_regs[0] = chip->vta_regs[0]; 2753 dev->vta_regs[1] = chip->vta_regs[1]; 2754 dev->vta_regs[2] = chip->vta_regs[2]; 2755 dev->jumbo_pm_reg = chip->jumbo_pm_reg; 2756 dev->imp_port = chip->imp_port; 2757 dev->num_vlans = chip->vlans; 2758 dev->num_arl_bins = chip->arl_bins; 2759 dev->num_arl_buckets = chip->arl_buckets; 2760 break; 2761 } 2762 } 2763 2764 /* check which BCM5325x version we have */ 2765 if (is5325(dev)) { 2766 u8 vc4; 2767 2768 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4); 2769 2770 /* check reserved bits */ 2771 switch (vc4 & 3) { 2772 case 1: 2773 /* BCM5325E */ 2774 break; 2775 case 3: 2776 /* BCM5325F - do not use port 4 */ 2777 dev->enabled_ports &= ~BIT(4); 2778 break; 2779 default: 2780 /* On the BCM47XX SoCs this is the supported internal switch.*/ 2781 #ifndef CONFIG_BCM47XX 2782 /* BCM5325M */ 2783 return -EINVAL; 2784 #else 2785 break; 2786 #endif 2787 } 2788 } 2789 2790 dev->num_ports = fls(dev->enabled_ports); 2791 2792 dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS); 2793 2794 /* Include non standard CPU port built-in PHYs to be probed */ 2795 if (is539x(dev) || is531x5(dev)) { 2796 for (i = 0; i < dev->num_ports; i++) { 2797 if (!(dev->ds->phys_mii_mask & BIT(i)) && 2798 !b53_possible_cpu_port(dev->ds, i)) 2799 dev->ds->phys_mii_mask |= BIT(i); 2800 } 2801 } 2802 2803 dev->ports = devm_kcalloc(dev->dev, 2804 dev->num_ports, sizeof(struct b53_port), 2805 GFP_KERNEL); 2806 if (!dev->ports) 2807 return -ENOMEM; 2808 2809 dev->vlans = devm_kcalloc(dev->dev, 2810 dev->num_vlans, sizeof(struct b53_vlan), 2811 GFP_KERNEL); 2812 if (!dev->vlans) 2813 return -ENOMEM; 2814 2815 dev->reset_gpio = b53_switch_get_reset_gpio(dev); 2816 if (dev->reset_gpio >= 0) { 2817 ret = devm_gpio_request_one(dev->dev, dev->reset_gpio, 2818 GPIOF_OUT_INIT_HIGH, "robo_reset"); 2819 if (ret) 2820 return ret; 2821 } 2822 2823 return 0; 2824 } 2825 2826 struct b53_device *b53_switch_alloc(struct device *base, 2827 const struct b53_io_ops *ops, 2828 void *priv) 2829 { 2830 struct dsa_switch *ds; 2831 struct b53_device *dev; 2832 2833 ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL); 2834 if (!ds) 2835 return NULL; 2836 2837 ds->dev = base; 2838 2839 dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL); 2840 if (!dev) 2841 return NULL; 2842 2843 ds->priv = dev; 2844 dev->dev = base; 2845 2846 dev->ds = ds; 2847 dev->priv = priv; 2848 dev->ops = ops; 2849 ds->ops = &b53_switch_ops; 2850 ds->phylink_mac_ops = &b53_phylink_mac_ops; 2851 dev->vlan_enabled = true; 2852 dev->vlan_filtering = false; 2853 /* Let DSA handle the case were multiple bridges span the same switch 2854 * device and different VLAN awareness settings are requested, which 2855 * would be breaking filtering semantics for any of the other bridge 2856 * devices. (not hardware supported) 2857 */ 2858 ds->vlan_filtering_is_global = true; 2859 2860 mutex_init(&dev->reg_mutex); 2861 mutex_init(&dev->stats_mutex); 2862 mutex_init(&dev->arl_mutex); 2863 2864 return dev; 2865 } 2866 EXPORT_SYMBOL(b53_switch_alloc); 2867 2868 int b53_switch_detect(struct b53_device *dev) 2869 { 2870 u32 id32; 2871 u16 tmp; 2872 u8 id8; 2873 int ret; 2874 2875 ret = b53_read8(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id8); 2876 if (ret) 2877 return ret; 2878 2879 switch (id8) { 2880 case 0: 2881 /* BCM5325 and BCM5365 do not have this register so reads 2882 * return 0. But the read operation did succeed, so assume this 2883 * is one of them. 2884 * 2885 * Next check if we can write to the 5325's VTA register; for 2886 * 5365 it is read only. 2887 */ 2888 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf); 2889 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp); 2890 2891 if (tmp == 0xf) 2892 dev->chip_id = BCM5325_DEVICE_ID; 2893 else 2894 dev->chip_id = BCM5365_DEVICE_ID; 2895 break; 2896 case BCM5389_DEVICE_ID: 2897 case BCM5395_DEVICE_ID: 2898 case BCM5397_DEVICE_ID: 2899 case BCM5398_DEVICE_ID: 2900 dev->chip_id = id8; 2901 break; 2902 default: 2903 ret = b53_read32(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id32); 2904 if (ret) 2905 return ret; 2906 2907 switch (id32) { 2908 case BCM53101_DEVICE_ID: 2909 case BCM53115_DEVICE_ID: 2910 case BCM53125_DEVICE_ID: 2911 case BCM53128_DEVICE_ID: 2912 case BCM53010_DEVICE_ID: 2913 case BCM53011_DEVICE_ID: 2914 case BCM53012_DEVICE_ID: 2915 case BCM53018_DEVICE_ID: 2916 case BCM53019_DEVICE_ID: 2917 case BCM53134_DEVICE_ID: 2918 dev->chip_id = id32; 2919 break; 2920 default: 2921 dev_err(dev->dev, 2922 "unsupported switch detected (BCM53%02x/BCM%x)\n", 2923 id8, id32); 2924 return -ENODEV; 2925 } 2926 } 2927 2928 if (dev->chip_id == BCM5325_DEVICE_ID) 2929 return b53_read8(dev, B53_STAT_PAGE, B53_REV_ID_25, 2930 &dev->core_rev); 2931 else 2932 return b53_read8(dev, B53_MGMT_PAGE, B53_REV_ID, 2933 &dev->core_rev); 2934 } 2935 EXPORT_SYMBOL(b53_switch_detect); 2936 2937 int b53_switch_register(struct b53_device *dev) 2938 { 2939 int ret; 2940 2941 if (dev->pdata) { 2942 dev->chip_id = dev->pdata->chip_id; 2943 dev->enabled_ports = dev->pdata->enabled_ports; 2944 } 2945 2946 if (!dev->chip_id && b53_switch_detect(dev)) 2947 return -EINVAL; 2948 2949 ret = b53_switch_init(dev); 2950 if (ret) 2951 return ret; 2952 2953 dev_info(dev->dev, "found switch: %s, rev %i\n", 2954 dev->name, dev->core_rev); 2955 2956 return dsa_register_switch(dev->ds); 2957 } 2958 EXPORT_SYMBOL(b53_switch_register); 2959 2960 MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>"); 2961 MODULE_DESCRIPTION("B53 switch library"); 2962 MODULE_LICENSE("Dual BSD/GPL"); 2963