1 /* 2 * B53 switch driver main logic 3 * 4 * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org> 5 * Copyright (C) 2016 Florian Fainelli <f.fainelli@gmail.com> 6 * 7 * Permission to use, copy, modify, and/or distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <linux/delay.h> 21 #include <linux/export.h> 22 #include <linux/gpio.h> 23 #include <linux/kernel.h> 24 #include <linux/math.h> 25 #include <linux/minmax.h> 26 #include <linux/module.h> 27 #include <linux/platform_data/b53.h> 28 #include <linux/phy.h> 29 #include <linux/phylink.h> 30 #include <linux/etherdevice.h> 31 #include <linux/if_bridge.h> 32 #include <linux/if_vlan.h> 33 #include <net/dsa.h> 34 35 #include "b53_regs.h" 36 #include "b53_priv.h" 37 38 struct b53_mib_desc { 39 u8 size; 40 u8 offset; 41 const char *name; 42 }; 43 44 /* BCM5365 MIB counters */ 45 static const struct b53_mib_desc b53_mibs_65[] = { 46 { 8, 0x00, "TxOctets" }, 47 { 4, 0x08, "TxDropPkts" }, 48 { 4, 0x10, "TxBroadcastPkts" }, 49 { 4, 0x14, "TxMulticastPkts" }, 50 { 4, 0x18, "TxUnicastPkts" }, 51 { 4, 0x1c, "TxCollisions" }, 52 { 4, 0x20, "TxSingleCollision" }, 53 { 4, 0x24, "TxMultipleCollision" }, 54 { 4, 0x28, "TxDeferredTransmit" }, 55 { 4, 0x2c, "TxLateCollision" }, 56 { 4, 0x30, "TxExcessiveCollision" }, 57 { 4, 0x38, "TxPausePkts" }, 58 { 8, 0x44, "RxOctets" }, 59 { 4, 0x4c, "RxUndersizePkts" }, 60 { 4, 0x50, "RxPausePkts" }, 61 { 4, 0x54, "Pkts64Octets" }, 62 { 4, 0x58, "Pkts65to127Octets" }, 63 { 4, 0x5c, "Pkts128to255Octets" }, 64 { 4, 0x60, "Pkts256to511Octets" }, 65 { 4, 0x64, "Pkts512to1023Octets" }, 66 { 4, 0x68, "Pkts1024to1522Octets" }, 67 { 4, 0x6c, "RxOversizePkts" }, 68 { 4, 0x70, "RxJabbers" }, 69 { 4, 0x74, "RxAlignmentErrors" }, 70 { 4, 0x78, "RxFCSErrors" }, 71 { 8, 0x7c, "RxGoodOctets" }, 72 { 4, 0x84, "RxDropPkts" }, 73 { 4, 0x88, "RxUnicastPkts" }, 74 { 4, 0x8c, "RxMulticastPkts" }, 75 { 4, 0x90, "RxBroadcastPkts" }, 76 { 4, 0x94, "RxSAChanges" }, 77 { 4, 0x98, "RxFragments" }, 78 }; 79 80 #define B53_MIBS_65_SIZE ARRAY_SIZE(b53_mibs_65) 81 82 /* BCM63xx MIB counters */ 83 static const struct b53_mib_desc b53_mibs_63xx[] = { 84 { 8, 0x00, "TxOctets" }, 85 { 4, 0x08, "TxDropPkts" }, 86 { 4, 0x0c, "TxQoSPkts" }, 87 { 4, 0x10, "TxBroadcastPkts" }, 88 { 4, 0x14, "TxMulticastPkts" }, 89 { 4, 0x18, "TxUnicastPkts" }, 90 { 4, 0x1c, "TxCollisions" }, 91 { 4, 0x20, "TxSingleCollision" }, 92 { 4, 0x24, "TxMultipleCollision" }, 93 { 4, 0x28, "TxDeferredTransmit" }, 94 { 4, 0x2c, "TxLateCollision" }, 95 { 4, 0x30, "TxExcessiveCollision" }, 96 { 4, 0x38, "TxPausePkts" }, 97 { 8, 0x3c, "TxQoSOctets" }, 98 { 8, 0x44, "RxOctets" }, 99 { 4, 0x4c, "RxUndersizePkts" }, 100 { 4, 0x50, "RxPausePkts" }, 101 { 4, 0x54, "Pkts64Octets" }, 102 { 4, 0x58, "Pkts65to127Octets" }, 103 { 4, 0x5c, "Pkts128to255Octets" }, 104 { 4, 0x60, "Pkts256to511Octets" }, 105 { 4, 0x64, "Pkts512to1023Octets" }, 106 { 4, 0x68, "Pkts1024to1522Octets" }, 107 { 4, 0x6c, "RxOversizePkts" }, 108 { 4, 0x70, "RxJabbers" }, 109 { 4, 0x74, "RxAlignmentErrors" }, 110 { 4, 0x78, "RxFCSErrors" }, 111 { 8, 0x7c, "RxGoodOctets" }, 112 { 4, 0x84, "RxDropPkts" }, 113 { 4, 0x88, "RxUnicastPkts" }, 114 { 4, 0x8c, "RxMulticastPkts" }, 115 { 4, 0x90, "RxBroadcastPkts" }, 116 { 4, 0x94, "RxSAChanges" }, 117 { 4, 0x98, "RxFragments" }, 118 { 4, 0xa0, "RxSymbolErrors" }, 119 { 4, 0xa4, "RxQoSPkts" }, 120 { 8, 0xa8, "RxQoSOctets" }, 121 { 4, 0xb0, "Pkts1523to2047Octets" }, 122 { 4, 0xb4, "Pkts2048to4095Octets" }, 123 { 4, 0xb8, "Pkts4096to8191Octets" }, 124 { 4, 0xbc, "Pkts8192to9728Octets" }, 125 { 4, 0xc0, "RxDiscarded" }, 126 }; 127 128 #define B53_MIBS_63XX_SIZE ARRAY_SIZE(b53_mibs_63xx) 129 130 /* MIB counters */ 131 static const struct b53_mib_desc b53_mibs[] = { 132 { 8, 0x00, "TxOctets" }, 133 { 4, 0x08, "TxDropPkts" }, 134 { 4, 0x10, "TxBroadcastPkts" }, 135 { 4, 0x14, "TxMulticastPkts" }, 136 { 4, 0x18, "TxUnicastPkts" }, 137 { 4, 0x1c, "TxCollisions" }, 138 { 4, 0x20, "TxSingleCollision" }, 139 { 4, 0x24, "TxMultipleCollision" }, 140 { 4, 0x28, "TxDeferredTransmit" }, 141 { 4, 0x2c, "TxLateCollision" }, 142 { 4, 0x30, "TxExcessiveCollision" }, 143 { 4, 0x38, "TxPausePkts" }, 144 { 8, 0x50, "RxOctets" }, 145 { 4, 0x58, "RxUndersizePkts" }, 146 { 4, 0x5c, "RxPausePkts" }, 147 { 4, 0x60, "Pkts64Octets" }, 148 { 4, 0x64, "Pkts65to127Octets" }, 149 { 4, 0x68, "Pkts128to255Octets" }, 150 { 4, 0x6c, "Pkts256to511Octets" }, 151 { 4, 0x70, "Pkts512to1023Octets" }, 152 { 4, 0x74, "Pkts1024to1522Octets" }, 153 { 4, 0x78, "RxOversizePkts" }, 154 { 4, 0x7c, "RxJabbers" }, 155 { 4, 0x80, "RxAlignmentErrors" }, 156 { 4, 0x84, "RxFCSErrors" }, 157 { 8, 0x88, "RxGoodOctets" }, 158 { 4, 0x90, "RxDropPkts" }, 159 { 4, 0x94, "RxUnicastPkts" }, 160 { 4, 0x98, "RxMulticastPkts" }, 161 { 4, 0x9c, "RxBroadcastPkts" }, 162 { 4, 0xa0, "RxSAChanges" }, 163 { 4, 0xa4, "RxFragments" }, 164 { 4, 0xa8, "RxJumboPkts" }, 165 { 4, 0xac, "RxSymbolErrors" }, 166 { 4, 0xc0, "RxDiscarded" }, 167 }; 168 169 #define B53_MIBS_SIZE ARRAY_SIZE(b53_mibs) 170 171 static const struct b53_mib_desc b53_mibs_58xx[] = { 172 { 8, 0x00, "TxOctets" }, 173 { 4, 0x08, "TxDropPkts" }, 174 { 4, 0x0c, "TxQPKTQ0" }, 175 { 4, 0x10, "TxBroadcastPkts" }, 176 { 4, 0x14, "TxMulticastPkts" }, 177 { 4, 0x18, "TxUnicastPKts" }, 178 { 4, 0x1c, "TxCollisions" }, 179 { 4, 0x20, "TxSingleCollision" }, 180 { 4, 0x24, "TxMultipleCollision" }, 181 { 4, 0x28, "TxDeferredCollision" }, 182 { 4, 0x2c, "TxLateCollision" }, 183 { 4, 0x30, "TxExcessiveCollision" }, 184 { 4, 0x34, "TxFrameInDisc" }, 185 { 4, 0x38, "TxPausePkts" }, 186 { 4, 0x3c, "TxQPKTQ1" }, 187 { 4, 0x40, "TxQPKTQ2" }, 188 { 4, 0x44, "TxQPKTQ3" }, 189 { 4, 0x48, "TxQPKTQ4" }, 190 { 4, 0x4c, "TxQPKTQ5" }, 191 { 8, 0x50, "RxOctets" }, 192 { 4, 0x58, "RxUndersizePkts" }, 193 { 4, 0x5c, "RxPausePkts" }, 194 { 4, 0x60, "RxPkts64Octets" }, 195 { 4, 0x64, "RxPkts65to127Octets" }, 196 { 4, 0x68, "RxPkts128to255Octets" }, 197 { 4, 0x6c, "RxPkts256to511Octets" }, 198 { 4, 0x70, "RxPkts512to1023Octets" }, 199 { 4, 0x74, "RxPkts1024toMaxPktsOctets" }, 200 { 4, 0x78, "RxOversizePkts" }, 201 { 4, 0x7c, "RxJabbers" }, 202 { 4, 0x80, "RxAlignmentErrors" }, 203 { 4, 0x84, "RxFCSErrors" }, 204 { 8, 0x88, "RxGoodOctets" }, 205 { 4, 0x90, "RxDropPkts" }, 206 { 4, 0x94, "RxUnicastPkts" }, 207 { 4, 0x98, "RxMulticastPkts" }, 208 { 4, 0x9c, "RxBroadcastPkts" }, 209 { 4, 0xa0, "RxSAChanges" }, 210 { 4, 0xa4, "RxFragments" }, 211 { 4, 0xa8, "RxJumboPkt" }, 212 { 4, 0xac, "RxSymblErr" }, 213 { 4, 0xb0, "InRangeErrCount" }, 214 { 4, 0xb4, "OutRangeErrCount" }, 215 { 4, 0xb8, "EEELpiEvent" }, 216 { 4, 0xbc, "EEELpiDuration" }, 217 { 4, 0xc0, "RxDiscard" }, 218 { 4, 0xc8, "TxQPKTQ6" }, 219 { 4, 0xcc, "TxQPKTQ7" }, 220 { 4, 0xd0, "TxPkts64Octets" }, 221 { 4, 0xd4, "TxPkts65to127Octets" }, 222 { 4, 0xd8, "TxPkts128to255Octets" }, 223 { 4, 0xdc, "TxPkts256to511Ocets" }, 224 { 4, 0xe0, "TxPkts512to1023Ocets" }, 225 { 4, 0xe4, "TxPkts1024toMaxPktOcets" }, 226 }; 227 228 #define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx) 229 230 #define B53_MAX_MTU_25 (1536 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) 231 #define B53_MAX_MTU (9720 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) 232 233 static int b53_do_vlan_op(struct b53_device *dev, u8 op) 234 { 235 unsigned int i; 236 237 b53_write8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], VTA_START_CMD | op); 238 239 for (i = 0; i < 10; i++) { 240 u8 vta; 241 242 b53_read8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], &vta); 243 if (!(vta & VTA_START_CMD)) 244 return 0; 245 246 usleep_range(100, 200); 247 } 248 249 return -EIO; 250 } 251 252 static void b53_set_vlan_entry(struct b53_device *dev, u16 vid, 253 struct b53_vlan *vlan) 254 { 255 if (is5325(dev)) { 256 u32 entry = 0; 257 258 if (vlan->members) { 259 entry = ((vlan->untag & VA_UNTAG_MASK_25) << 260 VA_UNTAG_S_25) | vlan->members; 261 if (dev->core_rev >= 3) 262 entry |= VA_VALID_25_R4 | vid << VA_VID_HIGH_S; 263 else 264 entry |= VA_VALID_25; 265 } 266 267 b53_write32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, entry); 268 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid | 269 VTA_RW_STATE_WR | VTA_RW_OP_EN); 270 } else if (is5365(dev)) { 271 u16 entry = 0; 272 273 if (vlan->members) 274 entry = ((vlan->untag & VA_UNTAG_MASK_65) << 275 VA_UNTAG_S_65) | vlan->members | VA_VALID_65; 276 277 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, entry); 278 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid | 279 VTA_RW_STATE_WR | VTA_RW_OP_EN); 280 } else { 281 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid); 282 b53_write32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], 283 (vlan->untag << VTE_UNTAG_S) | vlan->members); 284 285 b53_do_vlan_op(dev, VTA_CMD_WRITE); 286 } 287 288 dev_dbg(dev->ds->dev, "VID: %d, members: 0x%04x, untag: 0x%04x\n", 289 vid, vlan->members, vlan->untag); 290 } 291 292 static void b53_get_vlan_entry(struct b53_device *dev, u16 vid, 293 struct b53_vlan *vlan) 294 { 295 if (is5325(dev)) { 296 u32 entry = 0; 297 298 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid | 299 VTA_RW_STATE_RD | VTA_RW_OP_EN); 300 b53_read32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, &entry); 301 302 if (dev->core_rev >= 3) 303 vlan->valid = !!(entry & VA_VALID_25_R4); 304 else 305 vlan->valid = !!(entry & VA_VALID_25); 306 vlan->members = entry & VA_MEMBER_MASK; 307 vlan->untag = (entry >> VA_UNTAG_S_25) & VA_UNTAG_MASK_25; 308 309 } else if (is5365(dev)) { 310 u16 entry = 0; 311 312 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid | 313 VTA_RW_STATE_WR | VTA_RW_OP_EN); 314 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, &entry); 315 316 vlan->valid = !!(entry & VA_VALID_65); 317 vlan->members = entry & VA_MEMBER_MASK; 318 vlan->untag = (entry >> VA_UNTAG_S_65) & VA_UNTAG_MASK_65; 319 } else { 320 u32 entry = 0; 321 322 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid); 323 b53_do_vlan_op(dev, VTA_CMD_READ); 324 b53_read32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], &entry); 325 vlan->members = entry & VTE_MEMBERS; 326 vlan->untag = (entry >> VTE_UNTAG_S) & VTE_MEMBERS; 327 vlan->valid = true; 328 } 329 } 330 331 static void b53_set_eap_mode(struct b53_device *dev, int port, int mode) 332 { 333 u64 eap_conf; 334 335 if (is5325(dev) || is5365(dev) || dev->chip_id == BCM5389_DEVICE_ID) 336 return; 337 338 b53_read64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), &eap_conf); 339 340 if (is63xx(dev)) { 341 eap_conf &= ~EAP_MODE_MASK_63XX; 342 eap_conf |= (u64)mode << EAP_MODE_SHIFT_63XX; 343 } else { 344 eap_conf &= ~EAP_MODE_MASK; 345 eap_conf |= (u64)mode << EAP_MODE_SHIFT; 346 } 347 348 b53_write64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), eap_conf); 349 } 350 351 static void b53_set_forwarding(struct b53_device *dev, int enable) 352 { 353 u8 mgmt; 354 355 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 356 357 if (enable) 358 mgmt |= SM_SW_FWD_EN; 359 else 360 mgmt &= ~SM_SW_FWD_EN; 361 362 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 363 364 if (!is5325(dev)) { 365 /* Include IMP port in dumb forwarding mode */ 366 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt); 367 mgmt |= B53_MII_DUMB_FWDG_EN; 368 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); 369 370 /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether 371 * frames should be flooded or not. 372 */ 373 b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); 374 mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IP_MC; 375 b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); 376 } else { 377 b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); 378 mgmt |= B53_IP_MC; 379 b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); 380 } 381 } 382 383 static void b53_enable_vlan(struct b53_device *dev, int port, bool enable, 384 bool enable_filtering) 385 { 386 u8 mgmt, vc0, vc1, vc4 = 0, vc5; 387 388 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 389 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, &vc0); 390 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, &vc1); 391 392 if (is5325(dev) || is5365(dev)) { 393 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4); 394 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, &vc5); 395 } else if (is63xx(dev)) { 396 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, &vc4); 397 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, &vc5); 398 } else { 399 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, &vc4); 400 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5); 401 } 402 403 vc1 &= ~VC1_RX_MCST_FWD_EN; 404 405 if (enable) { 406 vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; 407 vc1 |= VC1_RX_MCST_UNTAG_EN; 408 vc4 &= ~VC4_ING_VID_CHECK_MASK; 409 if (enable_filtering) { 410 vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; 411 vc5 |= VC5_DROP_VTABLE_MISS; 412 } else { 413 vc4 |= VC4_NO_ING_VID_CHK << VC4_ING_VID_CHECK_S; 414 vc5 &= ~VC5_DROP_VTABLE_MISS; 415 } 416 417 if (is5325(dev)) 418 vc0 &= ~VC0_RESERVED_1; 419 420 if (is5325(dev) || is5365(dev)) 421 vc1 |= VC1_RX_MCST_TAG_EN; 422 423 } else { 424 vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID); 425 vc1 &= ~VC1_RX_MCST_UNTAG_EN; 426 vc4 &= ~VC4_ING_VID_CHECK_MASK; 427 vc5 &= ~VC5_DROP_VTABLE_MISS; 428 429 if (is5325(dev) || is5365(dev)) 430 vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S; 431 else 432 vc4 |= VC4_ING_VID_VIO_TO_IMP << VC4_ING_VID_CHECK_S; 433 434 if (is5325(dev) || is5365(dev)) 435 vc1 &= ~VC1_RX_MCST_TAG_EN; 436 } 437 438 if (!is5325(dev) && !is5365(dev)) 439 vc5 &= ~VC5_VID_FFF_EN; 440 441 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, vc0); 442 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, vc1); 443 444 if (is5325(dev) || is5365(dev)) { 445 /* enable the high 8 bit vid check on 5325 */ 446 if (is5325(dev) && enable) 447 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 448 VC3_HIGH_8BIT_EN); 449 else 450 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0); 451 452 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, vc4); 453 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, vc5); 454 } else if (is63xx(dev)) { 455 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3_63XX, 0); 456 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, vc4); 457 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, vc5); 458 } else { 459 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0); 460 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, vc4); 461 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, vc5); 462 } 463 464 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 465 466 dev->vlan_enabled = enable; 467 468 dev_dbg(dev->dev, "Port %d VLAN enabled: %d, filtering: %d\n", 469 port, enable, enable_filtering); 470 } 471 472 static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) 473 { 474 u32 port_mask = 0; 475 u16 max_size = JMS_MIN_SIZE; 476 477 if (is5325(dev) || is5365(dev)) 478 return -EINVAL; 479 480 if (enable) { 481 port_mask = dev->enabled_ports; 482 max_size = JMS_MAX_SIZE; 483 if (allow_10_100) 484 port_mask |= JPM_10_100_JUMBO_EN; 485 } 486 487 b53_write32(dev, B53_JUMBO_PAGE, dev->jumbo_pm_reg, port_mask); 488 return b53_write16(dev, B53_JUMBO_PAGE, dev->jumbo_size_reg, max_size); 489 } 490 491 static int b53_flush_arl(struct b53_device *dev, u8 mask) 492 { 493 unsigned int i; 494 495 if (is5325(dev)) 496 return 0; 497 498 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, 499 FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask); 500 501 for (i = 0; i < 10; i++) { 502 u8 fast_age_ctrl; 503 504 b53_read8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, 505 &fast_age_ctrl); 506 507 if (!(fast_age_ctrl & FAST_AGE_DONE)) 508 goto out; 509 510 msleep(1); 511 } 512 513 return -ETIMEDOUT; 514 out: 515 /* Only age dynamic entries (default behavior) */ 516 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, FAST_AGE_DYNAMIC); 517 return 0; 518 } 519 520 static int b53_fast_age_port(struct b53_device *dev, int port) 521 { 522 if (is5325(dev)) 523 return 0; 524 525 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port); 526 527 return b53_flush_arl(dev, FAST_AGE_PORT); 528 } 529 530 static int b53_fast_age_vlan(struct b53_device *dev, u16 vid) 531 { 532 if (is5325(dev)) 533 return 0; 534 535 b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid); 536 537 return b53_flush_arl(dev, FAST_AGE_VLAN); 538 } 539 540 void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) 541 { 542 struct b53_device *dev = ds->priv; 543 unsigned int i; 544 u16 pvlan; 545 546 /* BCM5325 CPU port is at 8 */ 547 if ((is5325(dev) || is5365(dev)) && cpu_port == B53_CPU_PORT_25) 548 cpu_port = B53_CPU_PORT; 549 550 /* Enable the IMP port to be in the same VLAN as the other ports 551 * on a per-port basis such that we only have Port i and IMP in 552 * the same VLAN. 553 */ 554 b53_for_each_port(dev, i) { 555 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &pvlan); 556 pvlan |= BIT(cpu_port); 557 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan); 558 } 559 } 560 EXPORT_SYMBOL(b53_imp_vlan_setup); 561 562 static void b53_port_set_ucast_flood(struct b53_device *dev, int port, 563 bool unicast) 564 { 565 u16 uc; 566 567 if (is5325(dev)) { 568 if (port == B53_CPU_PORT_25) 569 port = B53_CPU_PORT; 570 571 b53_read16(dev, B53_IEEE_PAGE, B53_IEEE_UCAST_DLF, &uc); 572 if (unicast) 573 uc |= BIT(port) | B53_IEEE_UCAST_DROP_EN; 574 else 575 uc &= ~BIT(port); 576 b53_write16(dev, B53_IEEE_PAGE, B53_IEEE_UCAST_DLF, uc); 577 } else { 578 b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc); 579 if (unicast) 580 uc |= BIT(port); 581 else 582 uc &= ~BIT(port); 583 b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc); 584 } 585 } 586 587 static void b53_port_set_mcast_flood(struct b53_device *dev, int port, 588 bool multicast) 589 { 590 u16 mc; 591 592 if (is5325(dev)) { 593 if (port == B53_CPU_PORT_25) 594 port = B53_CPU_PORT; 595 596 b53_read16(dev, B53_IEEE_PAGE, B53_IEEE_MCAST_DLF, &mc); 597 if (multicast) 598 mc |= BIT(port) | B53_IEEE_MCAST_DROP_EN; 599 else 600 mc &= ~BIT(port); 601 b53_write16(dev, B53_IEEE_PAGE, B53_IEEE_MCAST_DLF, mc); 602 } else { 603 b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc); 604 if (multicast) 605 mc |= BIT(port); 606 else 607 mc &= ~BIT(port); 608 b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc); 609 610 b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc); 611 if (multicast) 612 mc |= BIT(port); 613 else 614 mc &= ~BIT(port); 615 b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc); 616 } 617 } 618 619 static void b53_port_set_learning(struct b53_device *dev, int port, 620 bool learning) 621 { 622 u16 reg; 623 624 if (is5325(dev)) 625 return; 626 627 b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, ®); 628 if (learning) 629 reg &= ~BIT(port); 630 else 631 reg |= BIT(port); 632 b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg); 633 } 634 635 static void b53_port_set_isolated(struct b53_device *dev, int port, 636 bool isolated) 637 { 638 u8 offset; 639 u16 reg; 640 641 if (is5325(dev)) 642 offset = B53_PROTECTED_PORT_SEL_25; 643 else 644 offset = B53_PROTECTED_PORT_SEL; 645 646 b53_read16(dev, B53_CTRL_PAGE, offset, ®); 647 if (isolated) 648 reg |= BIT(port); 649 else 650 reg &= ~BIT(port); 651 b53_write16(dev, B53_CTRL_PAGE, offset, reg); 652 } 653 654 static void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable) 655 { 656 struct b53_device *dev = ds->priv; 657 u16 reg; 658 659 b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, ®); 660 if (enable) 661 reg |= BIT(port); 662 else 663 reg &= ~BIT(port); 664 b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg); 665 } 666 667 int b53_setup_port(struct dsa_switch *ds, int port) 668 { 669 struct b53_device *dev = ds->priv; 670 671 b53_port_set_ucast_flood(dev, port, true); 672 b53_port_set_mcast_flood(dev, port, true); 673 b53_port_set_learning(dev, port, false); 674 b53_port_set_isolated(dev, port, false); 675 676 /* Force all traffic to go to the CPU port to prevent the ASIC from 677 * trying to forward to bridged ports on matching FDB entries, then 678 * dropping frames because it isn't allowed to forward there. 679 */ 680 if (dsa_is_user_port(ds, port)) 681 b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED); 682 683 if (is5325(dev) && 684 in_range(port, 1, 4)) { 685 u8 reg; 686 687 b53_read8(dev, B53_CTRL_PAGE, B53_PD_MODE_CTRL_25, ®); 688 reg &= ~PD_MODE_POWER_DOWN_PORT(0); 689 if (dsa_is_unused_port(ds, port)) 690 reg |= PD_MODE_POWER_DOWN_PORT(port); 691 else 692 reg &= ~PD_MODE_POWER_DOWN_PORT(port); 693 b53_write8(dev, B53_CTRL_PAGE, B53_PD_MODE_CTRL_25, reg); 694 } 695 696 return 0; 697 } 698 EXPORT_SYMBOL(b53_setup_port); 699 700 int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) 701 { 702 struct b53_device *dev = ds->priv; 703 unsigned int cpu_port; 704 int ret = 0; 705 u16 pvlan; 706 707 if (!dsa_is_user_port(ds, port)) 708 return 0; 709 710 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 711 712 if (dev->ops->phy_enable) 713 dev->ops->phy_enable(dev, port); 714 715 if (dev->ops->irq_enable) 716 ret = dev->ops->irq_enable(dev, port); 717 if (ret) 718 return ret; 719 720 /* Clear the Rx and Tx disable bits and set to no spanning tree */ 721 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0); 722 723 /* Set this port, and only this one to be in the default VLAN, 724 * if member of a bridge, restore its membership prior to 725 * bringing down this port. 726 */ 727 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 728 pvlan &= ~0x1ff; 729 pvlan |= BIT(port); 730 pvlan |= dev->ports[port].vlan_ctl_mask; 731 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 732 733 b53_imp_vlan_setup(ds, cpu_port); 734 735 /* If EEE was enabled, restore it */ 736 if (dev->ports[port].eee.eee_enabled) 737 b53_eee_enable_set(ds, port, true); 738 739 return 0; 740 } 741 EXPORT_SYMBOL(b53_enable_port); 742 743 void b53_disable_port(struct dsa_switch *ds, int port) 744 { 745 struct b53_device *dev = ds->priv; 746 u8 reg; 747 748 /* Disable Tx/Rx for the port */ 749 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); 750 reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE; 751 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); 752 753 if (dev->ops->phy_disable) 754 dev->ops->phy_disable(dev, port); 755 756 if (dev->ops->irq_disable) 757 dev->ops->irq_disable(dev, port); 758 } 759 EXPORT_SYMBOL(b53_disable_port); 760 761 void b53_brcm_hdr_setup(struct dsa_switch *ds, int port) 762 { 763 struct b53_device *dev = ds->priv; 764 bool tag_en = !(dev->tag_protocol == DSA_TAG_PROTO_NONE); 765 u8 hdr_ctl, val; 766 u16 reg; 767 768 /* Resolve which bit controls the Broadcom tag */ 769 switch (port) { 770 case 8: 771 val = BRCM_HDR_P8_EN; 772 break; 773 case 7: 774 val = BRCM_HDR_P7_EN; 775 break; 776 case 5: 777 val = BRCM_HDR_P5_EN; 778 break; 779 default: 780 val = 0; 781 break; 782 } 783 784 /* Enable management mode if tagging is requested */ 785 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &hdr_ctl); 786 if (tag_en) 787 hdr_ctl |= SM_SW_FWD_MODE; 788 else 789 hdr_ctl &= ~SM_SW_FWD_MODE; 790 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, hdr_ctl); 791 792 /* Configure the appropriate IMP port */ 793 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &hdr_ctl); 794 if (port == 8) 795 hdr_ctl |= GC_FRM_MGMT_PORT_MII; 796 else if (port == 5) 797 hdr_ctl |= GC_FRM_MGMT_PORT_M; 798 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, hdr_ctl); 799 800 /* B53_BRCM_HDR not present on devices with legacy tags */ 801 if (dev->tag_protocol == DSA_TAG_PROTO_BRCM_LEGACY || 802 dev->tag_protocol == DSA_TAG_PROTO_BRCM_LEGACY_FCS) 803 return; 804 805 /* Enable Broadcom tags for IMP port */ 806 b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl); 807 if (tag_en) 808 hdr_ctl |= val; 809 else 810 hdr_ctl &= ~val; 811 b53_write8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, hdr_ctl); 812 813 /* Registers below are only accessible on newer devices */ 814 if (!is58xx(dev)) 815 return; 816 817 /* Enable reception Broadcom tag for CPU TX (switch RX) to 818 * allow us to tag outgoing frames 819 */ 820 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, ®); 821 if (tag_en) 822 reg &= ~BIT(port); 823 else 824 reg |= BIT(port); 825 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, reg); 826 827 /* Enable transmission of Broadcom tags from the switch (CPU RX) to 828 * allow delivering frames to the per-port net_devices 829 */ 830 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, ®); 831 if (tag_en) 832 reg &= ~BIT(port); 833 else 834 reg |= BIT(port); 835 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, reg); 836 } 837 EXPORT_SYMBOL(b53_brcm_hdr_setup); 838 839 static void b53_enable_cpu_port(struct b53_device *dev, int port) 840 { 841 u8 port_ctrl; 842 843 /* BCM5325 CPU port is at 8 */ 844 if ((is5325(dev) || is5365(dev)) && port == B53_CPU_PORT_25) 845 port = B53_CPU_PORT; 846 847 port_ctrl = PORT_CTRL_RX_BCST_EN | 848 PORT_CTRL_RX_MCST_EN | 849 PORT_CTRL_RX_UCST_EN; 850 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl); 851 852 b53_brcm_hdr_setup(dev->ds, port); 853 } 854 855 static void b53_enable_mib(struct b53_device *dev) 856 { 857 u8 gc; 858 859 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); 860 gc &= ~(GC_RESET_MIB | GC_MIB_AC_EN); 861 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); 862 } 863 864 static void b53_enable_stp(struct b53_device *dev) 865 { 866 u8 gc; 867 868 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); 869 gc |= GC_RX_BPDU_EN; 870 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); 871 } 872 873 static u16 b53_default_pvid(struct b53_device *dev) 874 { 875 if (is5325(dev) || is5365(dev)) 876 return 1; 877 else 878 return 0; 879 } 880 881 static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port) 882 { 883 struct b53_device *dev = ds->priv; 884 885 return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port); 886 } 887 888 static bool b53_vlan_port_may_join_untagged(struct dsa_switch *ds, int port) 889 { 890 struct b53_device *dev = ds->priv; 891 struct dsa_port *dp; 892 893 if (!dev->vlan_filtering) 894 return true; 895 896 dp = dsa_to_port(ds, port); 897 898 if (dsa_port_is_cpu(dp)) 899 return true; 900 901 return dp->bridge == NULL; 902 } 903 904 int b53_configure_vlan(struct dsa_switch *ds) 905 { 906 struct b53_device *dev = ds->priv; 907 struct b53_vlan vl = { 0 }; 908 struct b53_vlan *v; 909 int i, def_vid; 910 u16 vid; 911 912 def_vid = b53_default_pvid(dev); 913 914 /* clear all vlan entries */ 915 if (is5325(dev) || is5365(dev)) { 916 for (i = def_vid; i < dev->num_vlans; i++) 917 b53_set_vlan_entry(dev, i, &vl); 918 } else { 919 b53_do_vlan_op(dev, VTA_CMD_CLEAR); 920 } 921 922 b53_enable_vlan(dev, -1, dev->vlan_enabled, dev->vlan_filtering); 923 924 /* Create an untagged VLAN entry for the default PVID in case 925 * CONFIG_VLAN_8021Q is disabled and there are no calls to 926 * dsa_user_vlan_rx_add_vid() to create the default VLAN 927 * entry. Do this only when the tagging protocol is not 928 * DSA_TAG_PROTO_NONE 929 */ 930 v = &dev->vlans[def_vid]; 931 b53_for_each_port(dev, i) { 932 if (!b53_vlan_port_may_join_untagged(ds, i)) 933 continue; 934 935 vl.members |= BIT(i); 936 if (!b53_vlan_port_needs_forced_tagged(ds, i)) 937 vl.untag = vl.members; 938 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(i), 939 def_vid); 940 } 941 b53_set_vlan_entry(dev, def_vid, &vl); 942 943 if (dev->vlan_filtering) { 944 /* Upon initial call we have not set-up any VLANs, but upon 945 * system resume, we need to restore all VLAN entries. 946 */ 947 for (vid = def_vid + 1; vid < dev->num_vlans; vid++) { 948 v = &dev->vlans[vid]; 949 950 if (!v->members) 951 continue; 952 953 b53_set_vlan_entry(dev, vid, v); 954 b53_fast_age_vlan(dev, vid); 955 } 956 957 b53_for_each_port(dev, i) { 958 if (!dsa_is_cpu_port(ds, i)) 959 b53_write16(dev, B53_VLAN_PAGE, 960 B53_VLAN_PORT_DEF_TAG(i), 961 dev->ports[i].pvid); 962 } 963 } 964 965 return 0; 966 } 967 EXPORT_SYMBOL(b53_configure_vlan); 968 969 static void b53_switch_reset_gpio(struct b53_device *dev) 970 { 971 int gpio = dev->reset_gpio; 972 973 if (gpio < 0) 974 return; 975 976 /* Reset sequence: RESET low(50ms)->high(20ms) 977 */ 978 gpio_set_value(gpio, 0); 979 mdelay(50); 980 981 gpio_set_value(gpio, 1); 982 mdelay(20); 983 984 dev->current_page = 0xff; 985 } 986 987 static int b53_switch_reset(struct b53_device *dev) 988 { 989 unsigned int timeout = 1000; 990 u8 mgmt, reg; 991 992 b53_switch_reset_gpio(dev); 993 994 if (is539x(dev)) { 995 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83); 996 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00); 997 } 998 999 /* This is specific to 58xx devices here, do not use is58xx() which 1000 * covers the larger Starfigther 2 family, including 7445/7278 which 1001 * still use this driver as a library and need to perform the reset 1002 * earlier. 1003 */ 1004 if (dev->chip_id == BCM58XX_DEVICE_ID || 1005 dev->chip_id == BCM583XX_DEVICE_ID) { 1006 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®); 1007 reg |= SW_RST | EN_SW_RST | EN_CH_RST; 1008 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg); 1009 1010 do { 1011 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®); 1012 if (!(reg & SW_RST)) 1013 break; 1014 1015 usleep_range(1000, 2000); 1016 } while (timeout-- > 0); 1017 1018 if (timeout == 0) { 1019 dev_err(dev->dev, 1020 "Timeout waiting for SW_RST to clear!\n"); 1021 return -ETIMEDOUT; 1022 } 1023 } 1024 1025 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 1026 1027 if (!(mgmt & SM_SW_FWD_EN)) { 1028 mgmt &= ~SM_SW_FWD_MODE; 1029 mgmt |= SM_SW_FWD_EN; 1030 1031 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 1032 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 1033 1034 if (!(mgmt & SM_SW_FWD_EN)) { 1035 dev_err(dev->dev, "Failed to enable switch!\n"); 1036 return -EINVAL; 1037 } 1038 } 1039 1040 b53_enable_mib(dev); 1041 b53_enable_stp(dev); 1042 1043 return b53_flush_arl(dev, FAST_AGE_STATIC); 1044 } 1045 1046 static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg) 1047 { 1048 struct b53_device *priv = ds->priv; 1049 u16 value = 0; 1050 int ret; 1051 1052 if (priv->ops->phy_read16) 1053 ret = priv->ops->phy_read16(priv, addr, reg, &value); 1054 else 1055 ret = b53_read16(priv, B53_PORT_MII_PAGE(addr), 1056 reg * 2, &value); 1057 1058 return ret ? ret : value; 1059 } 1060 1061 static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val) 1062 { 1063 struct b53_device *priv = ds->priv; 1064 1065 if (priv->ops->phy_write16) 1066 return priv->ops->phy_write16(priv, addr, reg, val); 1067 1068 return b53_write16(priv, B53_PORT_MII_PAGE(addr), reg * 2, val); 1069 } 1070 1071 static int b53_reset_switch(struct b53_device *priv) 1072 { 1073 /* reset vlans */ 1074 memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans); 1075 memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports); 1076 1077 priv->serdes_lane = B53_INVALID_LANE; 1078 1079 return b53_switch_reset(priv); 1080 } 1081 1082 static int b53_apply_config(struct b53_device *priv) 1083 { 1084 /* disable switching */ 1085 b53_set_forwarding(priv, 0); 1086 1087 b53_configure_vlan(priv->ds); 1088 1089 /* enable switching */ 1090 b53_set_forwarding(priv, 1); 1091 1092 return 0; 1093 } 1094 1095 static void b53_reset_mib(struct b53_device *priv) 1096 { 1097 u8 gc; 1098 1099 b53_read8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); 1100 1101 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc | GC_RESET_MIB); 1102 msleep(1); 1103 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc & ~GC_RESET_MIB); 1104 msleep(1); 1105 } 1106 1107 static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev) 1108 { 1109 if (is5365(dev)) 1110 return b53_mibs_65; 1111 else if (is63xx(dev)) 1112 return b53_mibs_63xx; 1113 else if (is58xx(dev)) 1114 return b53_mibs_58xx; 1115 else 1116 return b53_mibs; 1117 } 1118 1119 static unsigned int b53_get_mib_size(struct b53_device *dev) 1120 { 1121 if (is5365(dev)) 1122 return B53_MIBS_65_SIZE; 1123 else if (is63xx(dev)) 1124 return B53_MIBS_63XX_SIZE; 1125 else if (is58xx(dev)) 1126 return B53_MIBS_58XX_SIZE; 1127 else 1128 return B53_MIBS_SIZE; 1129 } 1130 1131 static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port) 1132 { 1133 /* These ports typically do not have built-in PHYs */ 1134 switch (port) { 1135 case B53_CPU_PORT_25: 1136 case 7: 1137 case B53_CPU_PORT: 1138 return NULL; 1139 } 1140 1141 return mdiobus_get_phy(ds->user_mii_bus, port); 1142 } 1143 1144 void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset, 1145 uint8_t *data) 1146 { 1147 struct b53_device *dev = ds->priv; 1148 const struct b53_mib_desc *mibs = b53_get_mib(dev); 1149 unsigned int mib_size = b53_get_mib_size(dev); 1150 struct phy_device *phydev; 1151 unsigned int i; 1152 1153 if (stringset == ETH_SS_STATS) { 1154 for (i = 0; i < mib_size; i++) 1155 ethtool_puts(&data, mibs[i].name); 1156 } else if (stringset == ETH_SS_PHY_STATS) { 1157 phydev = b53_get_phy_device(ds, port); 1158 if (!phydev) 1159 return; 1160 1161 phy_ethtool_get_strings(phydev, data); 1162 } 1163 } 1164 EXPORT_SYMBOL(b53_get_strings); 1165 1166 void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) 1167 { 1168 struct b53_device *dev = ds->priv; 1169 const struct b53_mib_desc *mibs = b53_get_mib(dev); 1170 unsigned int mib_size = b53_get_mib_size(dev); 1171 const struct b53_mib_desc *s; 1172 unsigned int i; 1173 u64 val = 0; 1174 1175 if (is5365(dev) && port == 5) 1176 port = 8; 1177 1178 mutex_lock(&dev->stats_mutex); 1179 1180 for (i = 0; i < mib_size; i++) { 1181 s = &mibs[i]; 1182 1183 if (s->size == 8) { 1184 b53_read64(dev, B53_MIB_PAGE(port), s->offset, &val); 1185 } else { 1186 u32 val32; 1187 1188 b53_read32(dev, B53_MIB_PAGE(port), s->offset, 1189 &val32); 1190 val = val32; 1191 } 1192 data[i] = (u64)val; 1193 } 1194 1195 mutex_unlock(&dev->stats_mutex); 1196 } 1197 EXPORT_SYMBOL(b53_get_ethtool_stats); 1198 1199 void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data) 1200 { 1201 struct phy_device *phydev; 1202 1203 phydev = b53_get_phy_device(ds, port); 1204 if (!phydev) 1205 return; 1206 1207 phy_ethtool_get_stats(phydev, NULL, data); 1208 } 1209 EXPORT_SYMBOL(b53_get_ethtool_phy_stats); 1210 1211 int b53_get_sset_count(struct dsa_switch *ds, int port, int sset) 1212 { 1213 struct b53_device *dev = ds->priv; 1214 struct phy_device *phydev; 1215 1216 if (sset == ETH_SS_STATS) { 1217 return b53_get_mib_size(dev); 1218 } else if (sset == ETH_SS_PHY_STATS) { 1219 phydev = b53_get_phy_device(ds, port); 1220 if (!phydev) 1221 return 0; 1222 1223 return phy_ethtool_get_sset_count(phydev); 1224 } 1225 1226 return 0; 1227 } 1228 EXPORT_SYMBOL(b53_get_sset_count); 1229 1230 enum b53_devlink_resource_id { 1231 B53_DEVLINK_PARAM_ID_VLAN_TABLE, 1232 }; 1233 1234 static u64 b53_devlink_vlan_table_get(void *priv) 1235 { 1236 struct b53_device *dev = priv; 1237 struct b53_vlan *vl; 1238 unsigned int i; 1239 u64 count = 0; 1240 1241 for (i = 0; i < dev->num_vlans; i++) { 1242 vl = &dev->vlans[i]; 1243 if (vl->members) 1244 count++; 1245 } 1246 1247 return count; 1248 } 1249 1250 int b53_setup_devlink_resources(struct dsa_switch *ds) 1251 { 1252 struct devlink_resource_size_params size_params; 1253 struct b53_device *dev = ds->priv; 1254 int err; 1255 1256 devlink_resource_size_params_init(&size_params, dev->num_vlans, 1257 dev->num_vlans, 1258 1, DEVLINK_RESOURCE_UNIT_ENTRY); 1259 1260 err = dsa_devlink_resource_register(ds, "VLAN", dev->num_vlans, 1261 B53_DEVLINK_PARAM_ID_VLAN_TABLE, 1262 DEVLINK_RESOURCE_ID_PARENT_TOP, 1263 &size_params); 1264 if (err) 1265 goto out; 1266 1267 dsa_devlink_resource_occ_get_register(ds, 1268 B53_DEVLINK_PARAM_ID_VLAN_TABLE, 1269 b53_devlink_vlan_table_get, dev); 1270 1271 return 0; 1272 out: 1273 dsa_devlink_resources_unregister(ds); 1274 return err; 1275 } 1276 EXPORT_SYMBOL(b53_setup_devlink_resources); 1277 1278 static int b53_setup(struct dsa_switch *ds) 1279 { 1280 struct b53_device *dev = ds->priv; 1281 struct b53_vlan *vl; 1282 unsigned int port; 1283 u16 pvid; 1284 int ret; 1285 1286 /* Request bridge PVID untagged when DSA_TAG_PROTO_NONE is set 1287 * which forces the CPU port to be tagged in all VLANs. 1288 */ 1289 ds->untag_bridge_pvid = dev->tag_protocol == DSA_TAG_PROTO_NONE; 1290 1291 /* The switch does not tell us the original VLAN for untagged 1292 * packets, so keep the CPU port always tagged. 1293 */ 1294 ds->untag_vlan_aware_bridge_pvid = true; 1295 1296 if (dev->chip_id == BCM53101_DEVICE_ID) { 1297 /* BCM53101 uses 0.5 second increments */ 1298 ds->ageing_time_min = 1 * 500; 1299 ds->ageing_time_max = AGE_TIME_MAX * 500; 1300 } else { 1301 /* Everything else uses 1 second increments */ 1302 ds->ageing_time_min = 1 * 1000; 1303 ds->ageing_time_max = AGE_TIME_MAX * 1000; 1304 } 1305 1306 ret = b53_reset_switch(dev); 1307 if (ret) { 1308 dev_err(ds->dev, "failed to reset switch\n"); 1309 return ret; 1310 } 1311 1312 /* setup default vlan for filtering mode */ 1313 pvid = b53_default_pvid(dev); 1314 vl = &dev->vlans[pvid]; 1315 b53_for_each_port(dev, port) { 1316 vl->members |= BIT(port); 1317 if (!b53_vlan_port_needs_forced_tagged(ds, port)) 1318 vl->untag |= BIT(port); 1319 } 1320 1321 b53_reset_mib(dev); 1322 1323 ret = b53_apply_config(dev); 1324 if (ret) { 1325 dev_err(ds->dev, "failed to apply configuration\n"); 1326 return ret; 1327 } 1328 1329 /* Configure IMP/CPU port, disable all other ports. Enabled 1330 * ports will be configured with .port_enable 1331 */ 1332 for (port = 0; port < dev->num_ports; port++) { 1333 if (dsa_is_cpu_port(ds, port)) 1334 b53_enable_cpu_port(dev, port); 1335 else 1336 b53_disable_port(ds, port); 1337 } 1338 1339 return b53_setup_devlink_resources(ds); 1340 } 1341 1342 static void b53_teardown(struct dsa_switch *ds) 1343 { 1344 dsa_devlink_resources_unregister(ds); 1345 } 1346 1347 static void b53_force_link(struct b53_device *dev, int port, int link) 1348 { 1349 u8 reg, val, off; 1350 1351 /* Override the port settings */ 1352 if (port == dev->imp_port) { 1353 off = B53_PORT_OVERRIDE_CTRL; 1354 val = PORT_OVERRIDE_EN; 1355 } else if (is5325(dev)) { 1356 return; 1357 } else { 1358 off = B53_GMII_PORT_OVERRIDE_CTRL(port); 1359 val = GMII_PO_EN; 1360 } 1361 1362 b53_read8(dev, B53_CTRL_PAGE, off, ®); 1363 reg |= val; 1364 if (link) 1365 reg |= PORT_OVERRIDE_LINK; 1366 else 1367 reg &= ~PORT_OVERRIDE_LINK; 1368 b53_write8(dev, B53_CTRL_PAGE, off, reg); 1369 } 1370 1371 static void b53_force_port_config(struct b53_device *dev, int port, 1372 int speed, int duplex, 1373 bool tx_pause, bool rx_pause) 1374 { 1375 u8 reg, val, off; 1376 1377 /* Override the port settings */ 1378 if (port == dev->imp_port) { 1379 off = B53_PORT_OVERRIDE_CTRL; 1380 val = PORT_OVERRIDE_EN; 1381 } else if (is5325(dev)) { 1382 return; 1383 } else { 1384 off = B53_GMII_PORT_OVERRIDE_CTRL(port); 1385 val = GMII_PO_EN; 1386 } 1387 1388 b53_read8(dev, B53_CTRL_PAGE, off, ®); 1389 reg |= val; 1390 if (duplex == DUPLEX_FULL) 1391 reg |= PORT_OVERRIDE_FULL_DUPLEX; 1392 else 1393 reg &= ~PORT_OVERRIDE_FULL_DUPLEX; 1394 1395 reg &= ~(0x3 << GMII_PO_SPEED_S); 1396 if (is5301x(dev) || is58xx(dev)) 1397 reg &= ~PORT_OVERRIDE_SPEED_2000M; 1398 1399 switch (speed) { 1400 case 2000: 1401 reg |= PORT_OVERRIDE_SPEED_2000M; 1402 fallthrough; 1403 case SPEED_1000: 1404 reg |= PORT_OVERRIDE_SPEED_1000M; 1405 break; 1406 case SPEED_100: 1407 reg |= PORT_OVERRIDE_SPEED_100M; 1408 break; 1409 case SPEED_10: 1410 reg |= PORT_OVERRIDE_SPEED_10M; 1411 break; 1412 default: 1413 dev_err(dev->dev, "unknown speed: %d\n", speed); 1414 return; 1415 } 1416 1417 if (is5325(dev)) 1418 reg &= ~PORT_OVERRIDE_LP_FLOW_25; 1419 else 1420 reg &= ~(PORT_OVERRIDE_RX_FLOW | PORT_OVERRIDE_TX_FLOW); 1421 1422 if (rx_pause) { 1423 if (is5325(dev)) 1424 reg |= PORT_OVERRIDE_LP_FLOW_25; 1425 else 1426 reg |= PORT_OVERRIDE_RX_FLOW; 1427 } 1428 1429 if (tx_pause) { 1430 if (is5325(dev)) 1431 reg |= PORT_OVERRIDE_LP_FLOW_25; 1432 else 1433 reg |= PORT_OVERRIDE_TX_FLOW; 1434 } 1435 1436 b53_write8(dev, B53_CTRL_PAGE, off, reg); 1437 } 1438 1439 static void b53_adjust_63xx_rgmii(struct dsa_switch *ds, int port, 1440 phy_interface_t interface) 1441 { 1442 struct b53_device *dev = ds->priv; 1443 u8 rgmii_ctrl = 0; 1444 1445 b53_read8(dev, B53_CTRL_PAGE, B53_RGMII_CTRL_P(port), &rgmii_ctrl); 1446 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC); 1447 1448 if (is6318_268(dev)) 1449 rgmii_ctrl |= RGMII_CTRL_MII_OVERRIDE; 1450 1451 rgmii_ctrl |= RGMII_CTRL_ENABLE_GMII; 1452 1453 b53_write8(dev, B53_CTRL_PAGE, B53_RGMII_CTRL_P(port), rgmii_ctrl); 1454 1455 dev_dbg(ds->dev, "Configured port %d for %s\n", port, 1456 phy_modes(interface)); 1457 } 1458 1459 static void b53_adjust_531x5_rgmii(struct dsa_switch *ds, int port, 1460 phy_interface_t interface) 1461 { 1462 struct b53_device *dev = ds->priv; 1463 u8 rgmii_ctrl = 0, off; 1464 1465 if (port == dev->imp_port) 1466 off = B53_RGMII_CTRL_IMP; 1467 else 1468 off = B53_RGMII_CTRL_P(port); 1469 1470 /* Configure the port RGMII clock delay by DLL disabled and 1471 * tx_clk aligned timing (restoring to reset defaults) 1472 */ 1473 b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl); 1474 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC); 1475 1476 /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make 1477 * sure that we enable the port TX clock internal delay to 1478 * account for this internal delay that is inserted, otherwise 1479 * the switch won't be able to receive correctly. 1480 * 1481 * PHY_INTERFACE_MODE_RGMII means that we are not introducing 1482 * any delay neither on transmission nor reception, so the 1483 * BCM53125 must also be configured accordingly to account for 1484 * the lack of delay and introduce 1485 * 1486 * The BCM53125 switch has its RX clock and TX clock control 1487 * swapped, hence the reason why we modify the TX clock path in 1488 * the "RGMII" case 1489 */ 1490 if (interface == PHY_INTERFACE_MODE_RGMII_TXID) 1491 rgmii_ctrl |= RGMII_CTRL_DLL_TXC; 1492 if (interface == PHY_INTERFACE_MODE_RGMII) 1493 rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC; 1494 1495 if (dev->chip_id != BCM53115_DEVICE_ID) 1496 rgmii_ctrl |= RGMII_CTRL_TIMING_SEL; 1497 1498 b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl); 1499 1500 dev_info(ds->dev, "Configured port %d for %s\n", port, 1501 phy_modes(interface)); 1502 } 1503 1504 static void b53_adjust_5325_mii(struct dsa_switch *ds, int port) 1505 { 1506 struct b53_device *dev = ds->priv; 1507 u8 reg = 0; 1508 1509 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 1510 ®); 1511 1512 /* reverse mii needs to be enabled */ 1513 if (!(reg & PORT_OVERRIDE_RV_MII_25)) { 1514 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 1515 reg | PORT_OVERRIDE_RV_MII_25); 1516 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 1517 ®); 1518 1519 if (!(reg & PORT_OVERRIDE_RV_MII_25)) { 1520 dev_err(ds->dev, 1521 "Failed to enable reverse MII mode\n"); 1522 return; 1523 } 1524 } 1525 } 1526 1527 void b53_port_event(struct dsa_switch *ds, int port) 1528 { 1529 struct b53_device *dev = ds->priv; 1530 bool link; 1531 u16 sts; 1532 1533 b53_read16(dev, B53_STAT_PAGE, B53_LINK_STAT, &sts); 1534 link = !!(sts & BIT(port)); 1535 dsa_port_phylink_mac_change(ds, port, link); 1536 } 1537 EXPORT_SYMBOL(b53_port_event); 1538 1539 static void b53_phylink_get_caps(struct dsa_switch *ds, int port, 1540 struct phylink_config *config) 1541 { 1542 struct b53_device *dev = ds->priv; 1543 1544 /* Internal ports need GMII for PHYLIB */ 1545 __set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces); 1546 1547 /* These switches appear to support MII and RevMII too, but beyond 1548 * this, the code gives very few clues. FIXME: We probably need more 1549 * interface modes here. 1550 * 1551 * According to b53_srab_mux_init(), ports 3..5 can support: 1552 * SGMII, MII, GMII, RGMII or INTERNAL depending on the MUX setting. 1553 * However, the interface mode read from the MUX configuration is 1554 * not passed back to DSA, so phylink uses NA. 1555 * DT can specify RGMII for ports 0, 1. 1556 * For MDIO, port 8 can be RGMII_TXID. 1557 */ 1558 __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces); 1559 __set_bit(PHY_INTERFACE_MODE_REVMII, config->supported_interfaces); 1560 1561 /* BCM63xx RGMII ports support RGMII */ 1562 if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4)) 1563 phy_interface_set_rgmii(config->supported_interfaces); 1564 1565 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | 1566 MAC_10 | MAC_100; 1567 1568 /* 5325/5365 are not capable of gigabit speeds, everything else is. 1569 * Note: the original code also exclulded Gigagbit for MII, RevMII 1570 * and 802.3z modes. MII and RevMII are not able to work above 100M, 1571 * so will be excluded by the generic validator implementation. 1572 * However, the exclusion of Gigabit for 802.3z just seems wrong. 1573 */ 1574 if (!(is5325(dev) || is5365(dev))) 1575 config->mac_capabilities |= MAC_1000; 1576 1577 /* Get the implementation specific capabilities */ 1578 if (dev->ops->phylink_get_caps) 1579 dev->ops->phylink_get_caps(dev, port, config); 1580 } 1581 1582 static struct phylink_pcs *b53_phylink_mac_select_pcs(struct phylink_config *config, 1583 phy_interface_t interface) 1584 { 1585 struct dsa_port *dp = dsa_phylink_to_port(config); 1586 struct b53_device *dev = dp->ds->priv; 1587 1588 if (!dev->ops->phylink_mac_select_pcs) 1589 return NULL; 1590 1591 return dev->ops->phylink_mac_select_pcs(dev, dp->index, interface); 1592 } 1593 1594 static void b53_phylink_mac_config(struct phylink_config *config, 1595 unsigned int mode, 1596 const struct phylink_link_state *state) 1597 { 1598 struct dsa_port *dp = dsa_phylink_to_port(config); 1599 phy_interface_t interface = state->interface; 1600 struct dsa_switch *ds = dp->ds; 1601 struct b53_device *dev = ds->priv; 1602 int port = dp->index; 1603 1604 if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4)) 1605 b53_adjust_63xx_rgmii(ds, port, interface); 1606 1607 if (mode == MLO_AN_FIXED) { 1608 if (is531x5(dev) && phy_interface_mode_is_rgmii(interface)) 1609 b53_adjust_531x5_rgmii(ds, port, interface); 1610 1611 /* configure MII port if necessary */ 1612 if (is5325(dev)) 1613 b53_adjust_5325_mii(ds, port); 1614 } 1615 } 1616 1617 static void b53_phylink_mac_link_down(struct phylink_config *config, 1618 unsigned int mode, 1619 phy_interface_t interface) 1620 { 1621 struct dsa_port *dp = dsa_phylink_to_port(config); 1622 struct b53_device *dev = dp->ds->priv; 1623 int port = dp->index; 1624 1625 if (mode == MLO_AN_PHY) { 1626 if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4)) 1627 b53_force_link(dev, port, false); 1628 return; 1629 } 1630 1631 if (mode == MLO_AN_FIXED) { 1632 b53_force_link(dev, port, false); 1633 return; 1634 } 1635 1636 if (phy_interface_mode_is_8023z(interface) && 1637 dev->ops->serdes_link_set) 1638 dev->ops->serdes_link_set(dev, port, mode, interface, false); 1639 } 1640 1641 static void b53_phylink_mac_link_up(struct phylink_config *config, 1642 struct phy_device *phydev, 1643 unsigned int mode, 1644 phy_interface_t interface, 1645 int speed, int duplex, 1646 bool tx_pause, bool rx_pause) 1647 { 1648 struct dsa_port *dp = dsa_phylink_to_port(config); 1649 struct dsa_switch *ds = dp->ds; 1650 struct b53_device *dev = ds->priv; 1651 struct ethtool_keee *p = &dev->ports[dp->index].eee; 1652 int port = dp->index; 1653 1654 if (mode == MLO_AN_PHY) { 1655 /* Re-negotiate EEE if it was enabled already */ 1656 p->eee_enabled = b53_eee_init(ds, port, phydev); 1657 1658 if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4)) { 1659 b53_force_port_config(dev, port, speed, duplex, 1660 tx_pause, rx_pause); 1661 b53_force_link(dev, port, true); 1662 } 1663 1664 return; 1665 } 1666 1667 if (mode == MLO_AN_FIXED) { 1668 /* Force flow control on BCM5301x's CPU port */ 1669 if (is5301x(dev) && dsa_is_cpu_port(ds, port)) 1670 tx_pause = rx_pause = true; 1671 1672 b53_force_port_config(dev, port, speed, duplex, 1673 tx_pause, rx_pause); 1674 b53_force_link(dev, port, true); 1675 return; 1676 } 1677 1678 if (phy_interface_mode_is_8023z(interface) && 1679 dev->ops->serdes_link_set) 1680 dev->ops->serdes_link_set(dev, port, mode, interface, true); 1681 } 1682 1683 int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, 1684 struct netlink_ext_ack *extack) 1685 { 1686 struct b53_device *dev = ds->priv; 1687 1688 if (dev->vlan_filtering != vlan_filtering) { 1689 dev->vlan_filtering = vlan_filtering; 1690 b53_apply_config(dev); 1691 } 1692 1693 return 0; 1694 } 1695 EXPORT_SYMBOL(b53_vlan_filtering); 1696 1697 static int b53_vlan_prepare(struct dsa_switch *ds, int port, 1698 const struct switchdev_obj_port_vlan *vlan) 1699 { 1700 struct b53_device *dev = ds->priv; 1701 1702 if ((is5325(dev) || is5365(dev)) && vlan->vid == 0) 1703 return -EOPNOTSUPP; 1704 1705 /* Port 7 on 7278 connects to the ASP's UniMAC which is not capable of 1706 * receiving VLAN tagged frames at all, we can still allow the port to 1707 * be configured for egress untagged. 1708 */ 1709 if (dev->chip_id == BCM7278_DEVICE_ID && port == 7 && 1710 !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) 1711 return -EINVAL; 1712 1713 if (vlan->vid >= dev->num_vlans) 1714 return -ERANGE; 1715 1716 b53_enable_vlan(dev, port, true, dev->vlan_filtering); 1717 1718 return 0; 1719 } 1720 1721 int b53_vlan_add(struct dsa_switch *ds, int port, 1722 const struct switchdev_obj_port_vlan *vlan, 1723 struct netlink_ext_ack *extack) 1724 { 1725 struct b53_device *dev = ds->priv; 1726 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1727 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 1728 struct b53_vlan *vl; 1729 u16 old_pvid, new_pvid; 1730 int err; 1731 1732 err = b53_vlan_prepare(ds, port, vlan); 1733 if (err) 1734 return err; 1735 1736 if (vlan->vid == 0) 1737 return 0; 1738 1739 old_pvid = dev->ports[port].pvid; 1740 if (pvid) 1741 new_pvid = vlan->vid; 1742 else if (!pvid && vlan->vid == old_pvid) 1743 new_pvid = b53_default_pvid(dev); 1744 else 1745 new_pvid = old_pvid; 1746 dev->ports[port].pvid = new_pvid; 1747 1748 vl = &dev->vlans[vlan->vid]; 1749 1750 if (dsa_is_cpu_port(ds, port)) 1751 untagged = false; 1752 1753 vl->members |= BIT(port); 1754 if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port)) 1755 vl->untag |= BIT(port); 1756 else 1757 vl->untag &= ~BIT(port); 1758 1759 if (!dev->vlan_filtering) 1760 return 0; 1761 1762 b53_set_vlan_entry(dev, vlan->vid, vl); 1763 b53_fast_age_vlan(dev, vlan->vid); 1764 1765 if (!dsa_is_cpu_port(ds, port) && new_pvid != old_pvid) { 1766 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), 1767 new_pvid); 1768 b53_fast_age_vlan(dev, old_pvid); 1769 } 1770 1771 return 0; 1772 } 1773 EXPORT_SYMBOL(b53_vlan_add); 1774 1775 int b53_vlan_del(struct dsa_switch *ds, int port, 1776 const struct switchdev_obj_port_vlan *vlan) 1777 { 1778 struct b53_device *dev = ds->priv; 1779 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1780 struct b53_vlan *vl; 1781 u16 pvid; 1782 1783 if (vlan->vid == 0) 1784 return 0; 1785 1786 pvid = dev->ports[port].pvid; 1787 1788 vl = &dev->vlans[vlan->vid]; 1789 1790 vl->members &= ~BIT(port); 1791 1792 if (pvid == vlan->vid) 1793 pvid = b53_default_pvid(dev); 1794 dev->ports[port].pvid = pvid; 1795 1796 if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port)) 1797 vl->untag &= ~(BIT(port)); 1798 1799 if (!dev->vlan_filtering) 1800 return 0; 1801 1802 b53_set_vlan_entry(dev, vlan->vid, vl); 1803 b53_fast_age_vlan(dev, vlan->vid); 1804 1805 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid); 1806 b53_fast_age_vlan(dev, pvid); 1807 1808 return 0; 1809 } 1810 EXPORT_SYMBOL(b53_vlan_del); 1811 1812 /* Address Resolution Logic routines. Caller must hold &dev->arl_mutex. */ 1813 static int b53_arl_op_wait(struct b53_device *dev) 1814 { 1815 unsigned int timeout = 10; 1816 u8 reg; 1817 1818 do { 1819 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®); 1820 if (!(reg & ARLTBL_START_DONE)) 1821 return 0; 1822 1823 usleep_range(1000, 2000); 1824 } while (timeout--); 1825 1826 dev_warn(dev->dev, "timeout waiting for ARL to finish: 0x%02x\n", reg); 1827 1828 return -ETIMEDOUT; 1829 } 1830 1831 static int b53_arl_rw_op(struct b53_device *dev, unsigned int op) 1832 { 1833 u8 reg; 1834 1835 if (op > ARLTBL_RW) 1836 return -EINVAL; 1837 1838 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®); 1839 reg |= ARLTBL_START_DONE; 1840 if (op) 1841 reg |= ARLTBL_RW; 1842 else 1843 reg &= ~ARLTBL_RW; 1844 if (dev->vlan_enabled) 1845 reg &= ~ARLTBL_IVL_SVL_SELECT; 1846 else 1847 reg |= ARLTBL_IVL_SVL_SELECT; 1848 b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg); 1849 1850 return b53_arl_op_wait(dev); 1851 } 1852 1853 static void b53_arl_read_entry_25(struct b53_device *dev, 1854 struct b53_arl_entry *ent, u8 idx) 1855 { 1856 u64 mac_vid; 1857 1858 b53_read64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx), 1859 &mac_vid); 1860 b53_arl_to_entry_25(ent, mac_vid); 1861 } 1862 1863 static void b53_arl_write_entry_25(struct b53_device *dev, 1864 const struct b53_arl_entry *ent, u8 idx) 1865 { 1866 u64 mac_vid; 1867 1868 b53_arl_from_entry_25(&mac_vid, ent); 1869 b53_write64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx), 1870 mac_vid); 1871 } 1872 1873 static void b53_arl_read_entry_89(struct b53_device *dev, 1874 struct b53_arl_entry *ent, u8 idx) 1875 { 1876 u64 mac_vid; 1877 u16 fwd_entry; 1878 1879 b53_read64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx), 1880 &mac_vid); 1881 b53_read16(dev, B53_ARLIO_PAGE, B53_ARLTBL_DATA_ENTRY(idx), &fwd_entry); 1882 b53_arl_to_entry_89(ent, mac_vid, fwd_entry); 1883 } 1884 1885 static void b53_arl_write_entry_89(struct b53_device *dev, 1886 const struct b53_arl_entry *ent, u8 idx) 1887 { 1888 u32 fwd_entry; 1889 u64 mac_vid; 1890 1891 b53_arl_from_entry_89(&mac_vid, &fwd_entry, ent); 1892 b53_write64(dev, B53_ARLIO_PAGE, 1893 B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid); 1894 b53_write16(dev, B53_ARLIO_PAGE, 1895 B53_ARLTBL_DATA_ENTRY(idx), fwd_entry); 1896 } 1897 1898 static void b53_arl_read_entry_95(struct b53_device *dev, 1899 struct b53_arl_entry *ent, u8 idx) 1900 { 1901 u32 fwd_entry; 1902 u64 mac_vid; 1903 1904 b53_read64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx), 1905 &mac_vid); 1906 b53_read32(dev, B53_ARLIO_PAGE, B53_ARLTBL_DATA_ENTRY(idx), &fwd_entry); 1907 b53_arl_to_entry(ent, mac_vid, fwd_entry); 1908 } 1909 1910 static void b53_arl_write_entry_95(struct b53_device *dev, 1911 const struct b53_arl_entry *ent, u8 idx) 1912 { 1913 u32 fwd_entry; 1914 u64 mac_vid; 1915 1916 b53_arl_from_entry(&mac_vid, &fwd_entry, ent); 1917 b53_write64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx), 1918 mac_vid); 1919 b53_write32(dev, B53_ARLIO_PAGE, B53_ARLTBL_DATA_ENTRY(idx), 1920 fwd_entry); 1921 } 1922 1923 static int b53_arl_read(struct b53_device *dev, const u8 *mac, 1924 u16 vid, struct b53_arl_entry *ent, u8 *idx) 1925 { 1926 DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES); 1927 unsigned int i; 1928 int ret; 1929 1930 ret = b53_arl_op_wait(dev); 1931 if (ret) 1932 return ret; 1933 1934 bitmap_zero(free_bins, dev->num_arl_bins); 1935 1936 /* Read the bins */ 1937 for (i = 0; i < dev->num_arl_bins; i++) { 1938 b53_arl_read_entry(dev, ent, i); 1939 1940 if (!ent->is_valid) { 1941 set_bit(i, free_bins); 1942 continue; 1943 } 1944 if (!ether_addr_equal(ent->mac, mac)) 1945 continue; 1946 if (dev->vlan_enabled && ent->vid != vid) 1947 continue; 1948 *idx = i; 1949 return 0; 1950 } 1951 1952 *idx = find_first_bit(free_bins, dev->num_arl_bins); 1953 return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT; 1954 } 1955 1956 static int b53_arl_op(struct b53_device *dev, int op, int port, 1957 const unsigned char *addr, u16 vid, bool is_valid) 1958 { 1959 struct b53_arl_entry ent; 1960 u8 idx = 0; 1961 u64 mac; 1962 int ret; 1963 1964 /* Convert the array into a 64-bit MAC */ 1965 mac = ether_addr_to_u64(addr); 1966 1967 /* Perform a read for the given MAC and VID */ 1968 b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac); 1969 if (!is5325m(dev)) 1970 b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid); 1971 1972 /* Issue a read operation for this MAC */ 1973 ret = b53_arl_rw_op(dev, 1); 1974 if (ret) 1975 return ret; 1976 1977 ret = b53_arl_read(dev, addr, vid, &ent, &idx); 1978 1979 /* If this is a read, just finish now */ 1980 if (op) 1981 return ret; 1982 1983 switch (ret) { 1984 case -ETIMEDOUT: 1985 return ret; 1986 case -ENOSPC: 1987 dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n", 1988 addr, vid); 1989 return is_valid ? ret : 0; 1990 case -ENOENT: 1991 /* We could not find a matching MAC, so reset to a new entry */ 1992 dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n", 1993 addr, vid, idx); 1994 break; 1995 default: 1996 dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n", 1997 addr, vid, idx); 1998 break; 1999 } 2000 2001 /* For multicast address, the port is a bitmask and the validity 2002 * is determined by having at least one port being still active 2003 */ 2004 if (!is_multicast_ether_addr(addr)) { 2005 ent.port = port; 2006 ent.is_valid = is_valid; 2007 } else { 2008 if (is_valid) 2009 ent.port |= BIT(port); 2010 else 2011 ent.port &= ~BIT(port); 2012 2013 ent.is_valid = !!(ent.port); 2014 } 2015 2016 ent.vid = vid; 2017 ent.is_static = true; 2018 ent.is_age = false; 2019 memcpy(ent.mac, addr, ETH_ALEN); 2020 b53_arl_write_entry(dev, &ent, idx); 2021 2022 return b53_arl_rw_op(dev, 0); 2023 } 2024 2025 int b53_fdb_add(struct dsa_switch *ds, int port, 2026 const unsigned char *addr, u16 vid, 2027 struct dsa_db db) 2028 { 2029 struct b53_device *priv = ds->priv; 2030 int ret; 2031 2032 mutex_lock(&priv->arl_mutex); 2033 ret = b53_arl_op(priv, 0, port, addr, vid, true); 2034 mutex_unlock(&priv->arl_mutex); 2035 2036 return ret; 2037 } 2038 EXPORT_SYMBOL(b53_fdb_add); 2039 2040 int b53_fdb_del(struct dsa_switch *ds, int port, 2041 const unsigned char *addr, u16 vid, 2042 struct dsa_db db) 2043 { 2044 struct b53_device *priv = ds->priv; 2045 int ret; 2046 2047 mutex_lock(&priv->arl_mutex); 2048 ret = b53_arl_op(priv, 0, port, addr, vid, false); 2049 mutex_unlock(&priv->arl_mutex); 2050 2051 return ret; 2052 } 2053 EXPORT_SYMBOL(b53_fdb_del); 2054 2055 static void b53_read_arl_srch_ctl(struct b53_device *dev, u8 *val) 2056 { 2057 u8 offset; 2058 2059 if (is5325(dev) || is5365(dev)) 2060 offset = B53_ARL_SRCH_CTL_25; 2061 else if (dev->chip_id == BCM5389_DEVICE_ID || is5397_98(dev) || 2062 is63xx(dev)) 2063 offset = B53_ARL_SRCH_CTL_89; 2064 else 2065 offset = B53_ARL_SRCH_CTL; 2066 2067 if (is63xx(dev)) { 2068 u16 val16; 2069 2070 b53_read16(dev, B53_ARLIO_PAGE, offset, &val16); 2071 *val = val16 & 0xff; 2072 } else { 2073 b53_read8(dev, B53_ARLIO_PAGE, offset, val); 2074 } 2075 } 2076 2077 static void b53_write_arl_srch_ctl(struct b53_device *dev, u8 val) 2078 { 2079 u8 offset; 2080 2081 if (is5325(dev) || is5365(dev)) 2082 offset = B53_ARL_SRCH_CTL_25; 2083 else if (dev->chip_id == BCM5389_DEVICE_ID || is5397_98(dev) || 2084 is63xx(dev)) 2085 offset = B53_ARL_SRCH_CTL_89; 2086 else 2087 offset = B53_ARL_SRCH_CTL; 2088 2089 if (is63xx(dev)) 2090 b53_write16(dev, B53_ARLIO_PAGE, offset, val); 2091 else 2092 b53_write8(dev, B53_ARLIO_PAGE, offset, val); 2093 } 2094 2095 static int b53_arl_search_wait(struct b53_device *dev) 2096 { 2097 unsigned int timeout = 1000; 2098 u8 reg; 2099 2100 do { 2101 b53_read_arl_srch_ctl(dev, ®); 2102 if (!(reg & ARL_SRCH_STDN)) 2103 return -ENOENT; 2104 2105 if (reg & ARL_SRCH_VLID) 2106 return 0; 2107 2108 usleep_range(1000, 2000); 2109 } while (timeout--); 2110 2111 return -ETIMEDOUT; 2112 } 2113 2114 static void b53_arl_search_read_25(struct b53_device *dev, u8 idx, 2115 struct b53_arl_entry *ent) 2116 { 2117 u64 mac_vid; 2118 2119 b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_0_MACVID_25, 2120 &mac_vid); 2121 b53_arl_to_entry_25(ent, mac_vid); 2122 } 2123 2124 static void b53_arl_search_read_65(struct b53_device *dev, u8 idx, 2125 struct b53_arl_entry *ent) 2126 { 2127 u64 mac_vid; 2128 2129 b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_0_MACVID_65, 2130 &mac_vid); 2131 b53_arl_to_entry_25(ent, mac_vid); 2132 } 2133 2134 static void b53_arl_search_read_89(struct b53_device *dev, u8 idx, 2135 struct b53_arl_entry *ent) 2136 { 2137 u16 fwd_entry; 2138 u64 mac_vid; 2139 2140 b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_MACVID_89, 2141 &mac_vid); 2142 b53_read16(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_89, &fwd_entry); 2143 b53_arl_to_entry_89(ent, mac_vid, fwd_entry); 2144 } 2145 2146 static void b53_arl_search_read_63xx(struct b53_device *dev, u8 idx, 2147 struct b53_arl_entry *ent) 2148 { 2149 u16 fwd_entry; 2150 u64 mac_vid; 2151 2152 b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_MACVID_63XX, 2153 &mac_vid); 2154 b53_read16(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_63XX, &fwd_entry); 2155 b53_arl_search_to_entry_63xx(ent, mac_vid, fwd_entry); 2156 } 2157 2158 static void b53_arl_search_read_95(struct b53_device *dev, u8 idx, 2159 struct b53_arl_entry *ent) 2160 { 2161 u32 fwd_entry; 2162 u64 mac_vid; 2163 2164 b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_MACVID(idx), 2165 &mac_vid); 2166 b53_read32(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL(idx), 2167 &fwd_entry); 2168 b53_arl_to_entry(ent, mac_vid, fwd_entry); 2169 } 2170 2171 static int b53_fdb_copy(int port, const struct b53_arl_entry *ent, 2172 dsa_fdb_dump_cb_t *cb, void *data) 2173 { 2174 if (!ent->is_valid) 2175 return 0; 2176 2177 if (port != ent->port) 2178 return 0; 2179 2180 return cb(ent->mac, ent->vid, ent->is_static, data); 2181 } 2182 2183 int b53_fdb_dump(struct dsa_switch *ds, int port, 2184 dsa_fdb_dump_cb_t *cb, void *data) 2185 { 2186 unsigned int count = 0, results_per_hit = 1; 2187 struct b53_device *priv = ds->priv; 2188 struct b53_arl_entry results[2]; 2189 int ret; 2190 2191 if (priv->num_arl_bins > 2) 2192 results_per_hit = 2; 2193 2194 mutex_lock(&priv->arl_mutex); 2195 2196 /* Start search operation */ 2197 b53_write_arl_srch_ctl(priv, ARL_SRCH_STDN); 2198 2199 do { 2200 ret = b53_arl_search_wait(priv); 2201 if (ret) 2202 break; 2203 2204 b53_arl_search_read(priv, 0, &results[0]); 2205 ret = b53_fdb_copy(port, &results[0], cb, data); 2206 if (ret) 2207 break; 2208 2209 if (results_per_hit == 2) { 2210 b53_arl_search_read(priv, 1, &results[1]); 2211 ret = b53_fdb_copy(port, &results[1], cb, data); 2212 if (ret) 2213 break; 2214 2215 if (!results[0].is_valid && !results[1].is_valid) 2216 break; 2217 } 2218 2219 } while (count++ < b53_max_arl_entries(priv) / results_per_hit); 2220 2221 mutex_unlock(&priv->arl_mutex); 2222 2223 return 0; 2224 } 2225 EXPORT_SYMBOL(b53_fdb_dump); 2226 2227 int b53_mdb_add(struct dsa_switch *ds, int port, 2228 const struct switchdev_obj_port_mdb *mdb, 2229 struct dsa_db db) 2230 { 2231 struct b53_device *priv = ds->priv; 2232 int ret; 2233 2234 /* 5325 and 5365 require some more massaging, but could 2235 * be supported eventually 2236 */ 2237 if (is5325(priv) || is5365(priv)) 2238 return -EOPNOTSUPP; 2239 2240 mutex_lock(&priv->arl_mutex); 2241 ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true); 2242 mutex_unlock(&priv->arl_mutex); 2243 2244 return ret; 2245 } 2246 EXPORT_SYMBOL(b53_mdb_add); 2247 2248 int b53_mdb_del(struct dsa_switch *ds, int port, 2249 const struct switchdev_obj_port_mdb *mdb, 2250 struct dsa_db db) 2251 { 2252 struct b53_device *priv = ds->priv; 2253 int ret; 2254 2255 mutex_lock(&priv->arl_mutex); 2256 ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false); 2257 mutex_unlock(&priv->arl_mutex); 2258 if (ret) 2259 dev_err(ds->dev, "failed to delete MDB entry\n"); 2260 2261 return ret; 2262 } 2263 EXPORT_SYMBOL(b53_mdb_del); 2264 2265 int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, 2266 bool *tx_fwd_offload, struct netlink_ext_ack *extack) 2267 { 2268 struct b53_device *dev = ds->priv; 2269 struct b53_vlan *vl; 2270 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 2271 u16 pvlan, reg, pvid; 2272 unsigned int i; 2273 2274 /* On 7278, port 7 which connects to the ASP should only receive 2275 * traffic from matching CFP rules. 2276 */ 2277 if (dev->chip_id == BCM7278_DEVICE_ID && port == 7) 2278 return -EINVAL; 2279 2280 pvid = b53_default_pvid(dev); 2281 vl = &dev->vlans[pvid]; 2282 2283 if (dev->vlan_filtering) { 2284 /* Make this port leave the all VLANs join since we will have 2285 * proper VLAN entries from now on 2286 */ 2287 if (is58xx(dev)) { 2288 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, 2289 ®); 2290 reg &= ~BIT(port); 2291 if ((reg & BIT(cpu_port)) == BIT(cpu_port)) 2292 reg &= ~BIT(cpu_port); 2293 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, 2294 reg); 2295 } 2296 2297 b53_get_vlan_entry(dev, pvid, vl); 2298 vl->members &= ~BIT(port); 2299 b53_set_vlan_entry(dev, pvid, vl); 2300 } 2301 2302 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 2303 2304 b53_for_each_port(dev, i) { 2305 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) 2306 continue; 2307 2308 /* Add this local port to the remote port VLAN control 2309 * membership and update the remote port bitmask 2310 */ 2311 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); 2312 reg |= BIT(port); 2313 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg); 2314 dev->ports[i].vlan_ctl_mask = reg; 2315 2316 pvlan |= BIT(i); 2317 } 2318 2319 /* Disable redirection of unknown SA to the CPU port */ 2320 b53_set_eap_mode(dev, port, EAP_MODE_BASIC); 2321 2322 /* Configure the local port VLAN control membership to include 2323 * remote ports and update the local port bitmask 2324 */ 2325 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 2326 dev->ports[port].vlan_ctl_mask = pvlan; 2327 2328 return 0; 2329 } 2330 EXPORT_SYMBOL(b53_br_join); 2331 2332 void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge) 2333 { 2334 struct b53_device *dev = ds->priv; 2335 struct b53_vlan *vl; 2336 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 2337 unsigned int i; 2338 u16 pvlan, reg, pvid; 2339 2340 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 2341 2342 b53_for_each_port(dev, i) { 2343 /* Don't touch the remaining ports */ 2344 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) 2345 continue; 2346 2347 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); 2348 reg &= ~BIT(port); 2349 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg); 2350 dev->ports[port].vlan_ctl_mask = reg; 2351 2352 /* Prevent self removal to preserve isolation */ 2353 if (port != i) 2354 pvlan &= ~BIT(i); 2355 } 2356 2357 /* Enable redirection of unknown SA to the CPU port */ 2358 b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED); 2359 2360 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 2361 dev->ports[port].vlan_ctl_mask = pvlan; 2362 2363 pvid = b53_default_pvid(dev); 2364 vl = &dev->vlans[pvid]; 2365 2366 if (dev->vlan_filtering) { 2367 /* Make this port join all VLANs without VLAN entries */ 2368 if (is58xx(dev)) { 2369 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®); 2370 reg |= BIT(port); 2371 if (!(reg & BIT(cpu_port))) 2372 reg |= BIT(cpu_port); 2373 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg); 2374 } 2375 2376 b53_get_vlan_entry(dev, pvid, vl); 2377 vl->members |= BIT(port); 2378 b53_set_vlan_entry(dev, pvid, vl); 2379 } 2380 } 2381 EXPORT_SYMBOL(b53_br_leave); 2382 2383 void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state) 2384 { 2385 struct b53_device *dev = ds->priv; 2386 u8 hw_state; 2387 u8 reg; 2388 2389 switch (state) { 2390 case BR_STATE_DISABLED: 2391 hw_state = PORT_CTRL_DIS_STATE; 2392 break; 2393 case BR_STATE_LISTENING: 2394 hw_state = PORT_CTRL_LISTEN_STATE; 2395 break; 2396 case BR_STATE_LEARNING: 2397 hw_state = PORT_CTRL_LEARN_STATE; 2398 break; 2399 case BR_STATE_FORWARDING: 2400 hw_state = PORT_CTRL_FWD_STATE; 2401 break; 2402 case BR_STATE_BLOCKING: 2403 hw_state = PORT_CTRL_BLOCK_STATE; 2404 break; 2405 default: 2406 dev_err(ds->dev, "invalid STP state: %d\n", state); 2407 return; 2408 } 2409 2410 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); 2411 reg &= ~PORT_CTRL_STP_STATE_MASK; 2412 reg |= hw_state; 2413 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); 2414 } 2415 EXPORT_SYMBOL(b53_br_set_stp_state); 2416 2417 void b53_br_fast_age(struct dsa_switch *ds, int port) 2418 { 2419 struct b53_device *dev = ds->priv; 2420 2421 if (b53_fast_age_port(dev, port)) 2422 dev_err(ds->dev, "fast ageing failed\n"); 2423 } 2424 EXPORT_SYMBOL(b53_br_fast_age); 2425 2426 int b53_br_flags_pre(struct dsa_switch *ds, int port, 2427 struct switchdev_brport_flags flags, 2428 struct netlink_ext_ack *extack) 2429 { 2430 struct b53_device *dev = ds->priv; 2431 unsigned long mask = (BR_FLOOD | BR_MCAST_FLOOD | BR_ISOLATED); 2432 2433 if (!is5325(dev)) 2434 mask |= BR_LEARNING; 2435 2436 if (flags.mask & ~mask) 2437 return -EINVAL; 2438 2439 return 0; 2440 } 2441 EXPORT_SYMBOL(b53_br_flags_pre); 2442 2443 int b53_br_flags(struct dsa_switch *ds, int port, 2444 struct switchdev_brport_flags flags, 2445 struct netlink_ext_ack *extack) 2446 { 2447 if (flags.mask & BR_FLOOD) 2448 b53_port_set_ucast_flood(ds->priv, port, 2449 !!(flags.val & BR_FLOOD)); 2450 if (flags.mask & BR_MCAST_FLOOD) 2451 b53_port_set_mcast_flood(ds->priv, port, 2452 !!(flags.val & BR_MCAST_FLOOD)); 2453 if (flags.mask & BR_LEARNING) 2454 b53_port_set_learning(ds->priv, port, 2455 !!(flags.val & BR_LEARNING)); 2456 if (flags.mask & BR_ISOLATED) 2457 b53_port_set_isolated(ds->priv, port, 2458 !!(flags.val & BR_ISOLATED)); 2459 2460 return 0; 2461 } 2462 EXPORT_SYMBOL(b53_br_flags); 2463 2464 static bool b53_possible_cpu_port(struct dsa_switch *ds, int port) 2465 { 2466 /* Broadcom switches will accept enabling Broadcom tags on the 2467 * following ports: 5, 7 and 8, any other port is not supported 2468 */ 2469 switch (port) { 2470 case B53_CPU_PORT_25: 2471 case 7: 2472 case B53_CPU_PORT: 2473 return true; 2474 } 2475 2476 return false; 2477 } 2478 2479 static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port, 2480 enum dsa_tag_protocol tag_protocol) 2481 { 2482 bool ret = b53_possible_cpu_port(ds, port); 2483 2484 if (!ret) { 2485 dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n", 2486 port); 2487 return ret; 2488 } 2489 2490 switch (tag_protocol) { 2491 case DSA_TAG_PROTO_BRCM: 2492 case DSA_TAG_PROTO_BRCM_PREPEND: 2493 dev_warn(ds->dev, 2494 "Port %d is stacked to Broadcom tag switch\n", port); 2495 ret = false; 2496 break; 2497 default: 2498 ret = true; 2499 break; 2500 } 2501 2502 return ret; 2503 } 2504 2505 enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port, 2506 enum dsa_tag_protocol mprot) 2507 { 2508 struct b53_device *dev = ds->priv; 2509 2510 if (!b53_can_enable_brcm_tags(ds, port, mprot)) { 2511 dev->tag_protocol = DSA_TAG_PROTO_NONE; 2512 goto out; 2513 } 2514 2515 /* Older models require different 6 byte tags */ 2516 if (is5325(dev) || is5365(dev)) { 2517 dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY_FCS; 2518 goto out; 2519 } else if (is63xx(dev)) { 2520 dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY; 2521 goto out; 2522 } 2523 2524 /* Broadcom BCM58xx chips have a flow accelerator on Port 8 2525 * which requires us to use the prepended Broadcom tag type 2526 */ 2527 if (dev->chip_id == BCM58XX_DEVICE_ID && port == B53_CPU_PORT) { 2528 dev->tag_protocol = DSA_TAG_PROTO_BRCM_PREPEND; 2529 goto out; 2530 } 2531 2532 dev->tag_protocol = DSA_TAG_PROTO_BRCM; 2533 out: 2534 return dev->tag_protocol; 2535 } 2536 EXPORT_SYMBOL(b53_get_tag_protocol); 2537 2538 int b53_mirror_add(struct dsa_switch *ds, int port, 2539 struct dsa_mall_mirror_tc_entry *mirror, bool ingress, 2540 struct netlink_ext_ack *extack) 2541 { 2542 struct b53_device *dev = ds->priv; 2543 u16 reg, loc; 2544 2545 if (ingress) 2546 loc = B53_IG_MIR_CTL; 2547 else 2548 loc = B53_EG_MIR_CTL; 2549 2550 b53_read16(dev, B53_MGMT_PAGE, loc, ®); 2551 reg |= BIT(port); 2552 b53_write16(dev, B53_MGMT_PAGE, loc, reg); 2553 2554 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®); 2555 reg &= ~CAP_PORT_MASK; 2556 reg |= mirror->to_local_port; 2557 reg |= MIRROR_EN; 2558 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg); 2559 2560 return 0; 2561 } 2562 EXPORT_SYMBOL(b53_mirror_add); 2563 2564 void b53_mirror_del(struct dsa_switch *ds, int port, 2565 struct dsa_mall_mirror_tc_entry *mirror) 2566 { 2567 struct b53_device *dev = ds->priv; 2568 bool loc_disable = false, other_loc_disable = false; 2569 u16 reg, loc; 2570 2571 if (mirror->ingress) 2572 loc = B53_IG_MIR_CTL; 2573 else 2574 loc = B53_EG_MIR_CTL; 2575 2576 /* Update the desired ingress/egress register */ 2577 b53_read16(dev, B53_MGMT_PAGE, loc, ®); 2578 reg &= ~BIT(port); 2579 if (!(reg & MIRROR_MASK)) 2580 loc_disable = true; 2581 b53_write16(dev, B53_MGMT_PAGE, loc, reg); 2582 2583 /* Now look at the other one to know if we can disable mirroring 2584 * entirely 2585 */ 2586 if (mirror->ingress) 2587 b53_read16(dev, B53_MGMT_PAGE, B53_EG_MIR_CTL, ®); 2588 else 2589 b53_read16(dev, B53_MGMT_PAGE, B53_IG_MIR_CTL, ®); 2590 if (!(reg & MIRROR_MASK)) 2591 other_loc_disable = true; 2592 2593 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®); 2594 /* Both no longer have ports, let's disable mirroring */ 2595 if (loc_disable && other_loc_disable) { 2596 reg &= ~MIRROR_EN; 2597 reg &= ~mirror->to_local_port; 2598 } 2599 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg); 2600 } 2601 EXPORT_SYMBOL(b53_mirror_del); 2602 2603 /* Returns 0 if EEE was not enabled, or 1 otherwise 2604 */ 2605 int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy) 2606 { 2607 int ret; 2608 2609 if (!b53_support_eee(ds, port)) 2610 return 0; 2611 2612 ret = phy_init_eee(phy, false); 2613 if (ret) 2614 return 0; 2615 2616 b53_eee_enable_set(ds, port, true); 2617 2618 return 1; 2619 } 2620 EXPORT_SYMBOL(b53_eee_init); 2621 2622 bool b53_support_eee(struct dsa_switch *ds, int port) 2623 { 2624 struct b53_device *dev = ds->priv; 2625 2626 return !is5325(dev) && !is5365(dev) && !is63xx(dev); 2627 } 2628 EXPORT_SYMBOL(b53_support_eee); 2629 2630 int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e) 2631 { 2632 struct b53_device *dev = ds->priv; 2633 struct ethtool_keee *p = &dev->ports[port].eee; 2634 2635 p->eee_enabled = e->eee_enabled; 2636 b53_eee_enable_set(ds, port, e->eee_enabled); 2637 2638 return 0; 2639 } 2640 EXPORT_SYMBOL(b53_set_mac_eee); 2641 2642 static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu) 2643 { 2644 struct b53_device *dev = ds->priv; 2645 bool enable_jumbo; 2646 bool allow_10_100; 2647 2648 if (is5325(dev) || is5365(dev)) 2649 return 0; 2650 2651 if (!dsa_is_cpu_port(ds, port)) 2652 return 0; 2653 2654 enable_jumbo = (mtu > ETH_DATA_LEN); 2655 allow_10_100 = !is63xx(dev); 2656 2657 return b53_set_jumbo(dev, enable_jumbo, allow_10_100); 2658 } 2659 2660 static int b53_get_max_mtu(struct dsa_switch *ds, int port) 2661 { 2662 struct b53_device *dev = ds->priv; 2663 2664 if (is5325(dev) || is5365(dev)) 2665 return B53_MAX_MTU_25; 2666 2667 return B53_MAX_MTU; 2668 } 2669 2670 int b53_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) 2671 { 2672 struct b53_device *dev = ds->priv; 2673 u32 atc; 2674 int reg; 2675 2676 if (is63xx(dev)) 2677 reg = B53_AGING_TIME_CONTROL_63XX; 2678 else 2679 reg = B53_AGING_TIME_CONTROL; 2680 2681 if (dev->chip_id == BCM53101_DEVICE_ID) 2682 atc = DIV_ROUND_CLOSEST(msecs, 500); 2683 else 2684 atc = DIV_ROUND_CLOSEST(msecs, 1000); 2685 2686 if (!is5325(dev) && !is5365(dev)) 2687 atc |= AGE_CHANGE; 2688 2689 b53_write32(dev, B53_MGMT_PAGE, reg, atc); 2690 2691 return 0; 2692 } 2693 EXPORT_SYMBOL_GPL(b53_set_ageing_time); 2694 2695 static const struct phylink_mac_ops b53_phylink_mac_ops = { 2696 .mac_select_pcs = b53_phylink_mac_select_pcs, 2697 .mac_config = b53_phylink_mac_config, 2698 .mac_link_down = b53_phylink_mac_link_down, 2699 .mac_link_up = b53_phylink_mac_link_up, 2700 }; 2701 2702 static const struct dsa_switch_ops b53_switch_ops = { 2703 .get_tag_protocol = b53_get_tag_protocol, 2704 .setup = b53_setup, 2705 .teardown = b53_teardown, 2706 .get_strings = b53_get_strings, 2707 .get_ethtool_stats = b53_get_ethtool_stats, 2708 .get_sset_count = b53_get_sset_count, 2709 .get_ethtool_phy_stats = b53_get_ethtool_phy_stats, 2710 .phy_read = b53_phy_read16, 2711 .phy_write = b53_phy_write16, 2712 .phylink_get_caps = b53_phylink_get_caps, 2713 .port_setup = b53_setup_port, 2714 .port_enable = b53_enable_port, 2715 .port_disable = b53_disable_port, 2716 .support_eee = b53_support_eee, 2717 .set_mac_eee = b53_set_mac_eee, 2718 .set_ageing_time = b53_set_ageing_time, 2719 .port_bridge_join = b53_br_join, 2720 .port_bridge_leave = b53_br_leave, 2721 .port_pre_bridge_flags = b53_br_flags_pre, 2722 .port_bridge_flags = b53_br_flags, 2723 .port_stp_state_set = b53_br_set_stp_state, 2724 .port_fast_age = b53_br_fast_age, 2725 .port_vlan_filtering = b53_vlan_filtering, 2726 .port_vlan_add = b53_vlan_add, 2727 .port_vlan_del = b53_vlan_del, 2728 .port_fdb_dump = b53_fdb_dump, 2729 .port_fdb_add = b53_fdb_add, 2730 .port_fdb_del = b53_fdb_del, 2731 .port_mirror_add = b53_mirror_add, 2732 .port_mirror_del = b53_mirror_del, 2733 .port_mdb_add = b53_mdb_add, 2734 .port_mdb_del = b53_mdb_del, 2735 .port_max_mtu = b53_get_max_mtu, 2736 .port_change_mtu = b53_change_mtu, 2737 }; 2738 2739 static const struct b53_arl_ops b53_arl_ops_25 = { 2740 .arl_read_entry = b53_arl_read_entry_25, 2741 .arl_write_entry = b53_arl_write_entry_25, 2742 .arl_search_read = b53_arl_search_read_25, 2743 }; 2744 2745 static const struct b53_arl_ops b53_arl_ops_65 = { 2746 .arl_read_entry = b53_arl_read_entry_25, 2747 .arl_write_entry = b53_arl_write_entry_25, 2748 .arl_search_read = b53_arl_search_read_65, 2749 }; 2750 2751 static const struct b53_arl_ops b53_arl_ops_89 = { 2752 .arl_read_entry = b53_arl_read_entry_89, 2753 .arl_write_entry = b53_arl_write_entry_89, 2754 .arl_search_read = b53_arl_search_read_89, 2755 }; 2756 2757 static const struct b53_arl_ops b53_arl_ops_63xx = { 2758 .arl_read_entry = b53_arl_read_entry_89, 2759 .arl_write_entry = b53_arl_write_entry_89, 2760 .arl_search_read = b53_arl_search_read_63xx, 2761 }; 2762 2763 static const struct b53_arl_ops b53_arl_ops_95 = { 2764 .arl_read_entry = b53_arl_read_entry_95, 2765 .arl_write_entry = b53_arl_write_entry_95, 2766 .arl_search_read = b53_arl_search_read_95, 2767 }; 2768 2769 struct b53_chip_data { 2770 u32 chip_id; 2771 const char *dev_name; 2772 u16 vlans; 2773 u16 enabled_ports; 2774 u8 imp_port; 2775 u8 cpu_port; 2776 u8 vta_regs[3]; 2777 u8 arl_bins; 2778 u16 arl_buckets; 2779 u8 duplex_reg; 2780 u8 jumbo_pm_reg; 2781 u8 jumbo_size_reg; 2782 const struct b53_arl_ops *arl_ops; 2783 }; 2784 2785 #define B53_VTA_REGS \ 2786 { B53_VT_ACCESS, B53_VT_INDEX, B53_VT_ENTRY } 2787 #define B53_VTA_REGS_9798 \ 2788 { B53_VT_ACCESS_9798, B53_VT_INDEX_9798, B53_VT_ENTRY_9798 } 2789 #define B53_VTA_REGS_63XX \ 2790 { B53_VT_ACCESS_63XX, B53_VT_INDEX_63XX, B53_VT_ENTRY_63XX } 2791 2792 static const struct b53_chip_data b53_switch_chips[] = { 2793 { 2794 .chip_id = BCM5325_DEVICE_ID, 2795 .dev_name = "BCM5325", 2796 .vlans = 16, 2797 .enabled_ports = 0x3f, 2798 .arl_bins = 2, 2799 .arl_buckets = 1024, 2800 .imp_port = 5, 2801 .duplex_reg = B53_DUPLEX_STAT_FE, 2802 .arl_ops = &b53_arl_ops_25, 2803 }, 2804 { 2805 .chip_id = BCM5365_DEVICE_ID, 2806 .dev_name = "BCM5365", 2807 .vlans = 256, 2808 .enabled_ports = 0x3f, 2809 .arl_bins = 2, 2810 .arl_buckets = 1024, 2811 .imp_port = 5, 2812 .duplex_reg = B53_DUPLEX_STAT_FE, 2813 .arl_ops = &b53_arl_ops_65, 2814 }, 2815 { 2816 .chip_id = BCM5389_DEVICE_ID, 2817 .dev_name = "BCM5389", 2818 .vlans = 4096, 2819 .enabled_ports = 0x11f, 2820 .arl_bins = 4, 2821 .arl_buckets = 1024, 2822 .imp_port = 8, 2823 .vta_regs = B53_VTA_REGS, 2824 .duplex_reg = B53_DUPLEX_STAT_GE, 2825 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2826 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2827 .arl_ops = &b53_arl_ops_89, 2828 }, 2829 { 2830 .chip_id = BCM5395_DEVICE_ID, 2831 .dev_name = "BCM5395", 2832 .vlans = 4096, 2833 .enabled_ports = 0x11f, 2834 .arl_bins = 4, 2835 .arl_buckets = 1024, 2836 .imp_port = 8, 2837 .vta_regs = B53_VTA_REGS, 2838 .duplex_reg = B53_DUPLEX_STAT_GE, 2839 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2840 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2841 .arl_ops = &b53_arl_ops_95, 2842 }, 2843 { 2844 .chip_id = BCM5397_DEVICE_ID, 2845 .dev_name = "BCM5397", 2846 .vlans = 4096, 2847 .enabled_ports = 0x11f, 2848 .arl_bins = 4, 2849 .arl_buckets = 1024, 2850 .imp_port = 8, 2851 .vta_regs = B53_VTA_REGS_9798, 2852 .duplex_reg = B53_DUPLEX_STAT_GE, 2853 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2854 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2855 .arl_ops = &b53_arl_ops_89, 2856 }, 2857 { 2858 .chip_id = BCM5398_DEVICE_ID, 2859 .dev_name = "BCM5398", 2860 .vlans = 4096, 2861 .enabled_ports = 0x17f, 2862 .arl_bins = 4, 2863 .arl_buckets = 1024, 2864 .imp_port = 8, 2865 .vta_regs = B53_VTA_REGS_9798, 2866 .duplex_reg = B53_DUPLEX_STAT_GE, 2867 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2868 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2869 .arl_ops = &b53_arl_ops_89, 2870 }, 2871 { 2872 .chip_id = BCM53101_DEVICE_ID, 2873 .dev_name = "BCM53101", 2874 .vlans = 4096, 2875 .enabled_ports = 0x11f, 2876 .arl_bins = 4, 2877 .arl_buckets = 512, 2878 .vta_regs = B53_VTA_REGS, 2879 .imp_port = 8, 2880 .duplex_reg = B53_DUPLEX_STAT_GE, 2881 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2882 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2883 .arl_ops = &b53_arl_ops_95, 2884 }, 2885 { 2886 .chip_id = BCM53115_DEVICE_ID, 2887 .dev_name = "BCM53115", 2888 .vlans = 4096, 2889 .enabled_ports = 0x11f, 2890 .arl_bins = 4, 2891 .arl_buckets = 1024, 2892 .vta_regs = B53_VTA_REGS, 2893 .imp_port = 8, 2894 .duplex_reg = B53_DUPLEX_STAT_GE, 2895 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2896 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2897 .arl_ops = &b53_arl_ops_95, 2898 }, 2899 { 2900 .chip_id = BCM53125_DEVICE_ID, 2901 .dev_name = "BCM53125", 2902 .vlans = 4096, 2903 .enabled_ports = 0x1ff, 2904 .arl_bins = 4, 2905 .arl_buckets = 1024, 2906 .imp_port = 8, 2907 .vta_regs = B53_VTA_REGS, 2908 .duplex_reg = B53_DUPLEX_STAT_GE, 2909 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2910 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2911 .arl_ops = &b53_arl_ops_95, 2912 }, 2913 { 2914 .chip_id = BCM53128_DEVICE_ID, 2915 .dev_name = "BCM53128", 2916 .vlans = 4096, 2917 .enabled_ports = 0x1ff, 2918 .arl_bins = 4, 2919 .arl_buckets = 1024, 2920 .imp_port = 8, 2921 .vta_regs = B53_VTA_REGS, 2922 .duplex_reg = B53_DUPLEX_STAT_GE, 2923 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2924 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2925 .arl_ops = &b53_arl_ops_95, 2926 }, 2927 { 2928 .chip_id = BCM63XX_DEVICE_ID, 2929 .dev_name = "BCM63xx", 2930 .vlans = 4096, 2931 .enabled_ports = 0, /* pdata must provide them */ 2932 .arl_bins = 1, 2933 .arl_buckets = 4096, 2934 .imp_port = 8, 2935 .vta_regs = B53_VTA_REGS_63XX, 2936 .duplex_reg = B53_DUPLEX_STAT_63XX, 2937 .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX, 2938 .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX, 2939 .arl_ops = &b53_arl_ops_63xx, 2940 }, 2941 { 2942 .chip_id = BCM53010_DEVICE_ID, 2943 .dev_name = "BCM53010", 2944 .vlans = 4096, 2945 .enabled_ports = 0x1bf, 2946 .arl_bins = 4, 2947 .arl_buckets = 1024, 2948 .imp_port = 8, 2949 .vta_regs = B53_VTA_REGS, 2950 .duplex_reg = B53_DUPLEX_STAT_GE, 2951 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2952 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2953 .arl_ops = &b53_arl_ops_95, 2954 }, 2955 { 2956 .chip_id = BCM53011_DEVICE_ID, 2957 .dev_name = "BCM53011", 2958 .vlans = 4096, 2959 .enabled_ports = 0x1bf, 2960 .arl_bins = 4, 2961 .arl_buckets = 1024, 2962 .imp_port = 8, 2963 .vta_regs = B53_VTA_REGS, 2964 .duplex_reg = B53_DUPLEX_STAT_GE, 2965 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2966 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2967 .arl_ops = &b53_arl_ops_95, 2968 }, 2969 { 2970 .chip_id = BCM53012_DEVICE_ID, 2971 .dev_name = "BCM53012", 2972 .vlans = 4096, 2973 .enabled_ports = 0x1bf, 2974 .arl_bins = 4, 2975 .arl_buckets = 1024, 2976 .imp_port = 8, 2977 .vta_regs = B53_VTA_REGS, 2978 .duplex_reg = B53_DUPLEX_STAT_GE, 2979 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2980 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2981 .arl_ops = &b53_arl_ops_95, 2982 }, 2983 { 2984 .chip_id = BCM53018_DEVICE_ID, 2985 .dev_name = "BCM53018", 2986 .vlans = 4096, 2987 .enabled_ports = 0x1bf, 2988 .arl_bins = 4, 2989 .arl_buckets = 1024, 2990 .imp_port = 8, 2991 .vta_regs = B53_VTA_REGS, 2992 .duplex_reg = B53_DUPLEX_STAT_GE, 2993 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2994 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2995 .arl_ops = &b53_arl_ops_95, 2996 }, 2997 { 2998 .chip_id = BCM53019_DEVICE_ID, 2999 .dev_name = "BCM53019", 3000 .vlans = 4096, 3001 .enabled_ports = 0x1bf, 3002 .arl_bins = 4, 3003 .arl_buckets = 1024, 3004 .imp_port = 8, 3005 .vta_regs = B53_VTA_REGS, 3006 .duplex_reg = B53_DUPLEX_STAT_GE, 3007 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 3008 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 3009 .arl_ops = &b53_arl_ops_95, 3010 }, 3011 { 3012 .chip_id = BCM58XX_DEVICE_ID, 3013 .dev_name = "BCM585xx/586xx/88312", 3014 .vlans = 4096, 3015 .enabled_ports = 0x1ff, 3016 .arl_bins = 4, 3017 .arl_buckets = 1024, 3018 .imp_port = 8, 3019 .vta_regs = B53_VTA_REGS, 3020 .duplex_reg = B53_DUPLEX_STAT_GE, 3021 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 3022 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 3023 .arl_ops = &b53_arl_ops_95, 3024 }, 3025 { 3026 .chip_id = BCM583XX_DEVICE_ID, 3027 .dev_name = "BCM583xx/11360", 3028 .vlans = 4096, 3029 .enabled_ports = 0x103, 3030 .arl_bins = 4, 3031 .arl_buckets = 1024, 3032 .imp_port = 8, 3033 .vta_regs = B53_VTA_REGS, 3034 .duplex_reg = B53_DUPLEX_STAT_GE, 3035 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 3036 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 3037 .arl_ops = &b53_arl_ops_95, 3038 }, 3039 /* Starfighter 2 */ 3040 { 3041 .chip_id = BCM4908_DEVICE_ID, 3042 .dev_name = "BCM4908", 3043 .vlans = 4096, 3044 .enabled_ports = 0x1bf, 3045 .arl_bins = 4, 3046 .arl_buckets = 256, 3047 .imp_port = 8, 3048 .vta_regs = B53_VTA_REGS, 3049 .duplex_reg = B53_DUPLEX_STAT_GE, 3050 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 3051 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 3052 .arl_ops = &b53_arl_ops_95, 3053 }, 3054 { 3055 .chip_id = BCM7445_DEVICE_ID, 3056 .dev_name = "BCM7445", 3057 .vlans = 4096, 3058 .enabled_ports = 0x1ff, 3059 .arl_bins = 4, 3060 .arl_buckets = 1024, 3061 .imp_port = 8, 3062 .vta_regs = B53_VTA_REGS, 3063 .duplex_reg = B53_DUPLEX_STAT_GE, 3064 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 3065 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 3066 .arl_ops = &b53_arl_ops_95, 3067 }, 3068 { 3069 .chip_id = BCM7278_DEVICE_ID, 3070 .dev_name = "BCM7278", 3071 .vlans = 4096, 3072 .enabled_ports = 0x1ff, 3073 .arl_bins = 4, 3074 .arl_buckets = 256, 3075 .imp_port = 8, 3076 .vta_regs = B53_VTA_REGS, 3077 .duplex_reg = B53_DUPLEX_STAT_GE, 3078 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 3079 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 3080 .arl_ops = &b53_arl_ops_95, 3081 }, 3082 { 3083 .chip_id = BCM53134_DEVICE_ID, 3084 .dev_name = "BCM53134", 3085 .vlans = 4096, 3086 .enabled_ports = 0x12f, 3087 .imp_port = 8, 3088 .cpu_port = B53_CPU_PORT, 3089 .vta_regs = B53_VTA_REGS, 3090 .arl_bins = 4, 3091 .arl_buckets = 1024, 3092 .duplex_reg = B53_DUPLEX_STAT_GE, 3093 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 3094 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 3095 .arl_ops = &b53_arl_ops_95, 3096 }, 3097 }; 3098 3099 static int b53_switch_init(struct b53_device *dev) 3100 { 3101 u32 chip_id = dev->chip_id; 3102 unsigned int i; 3103 int ret; 3104 3105 if (is63xx(dev)) 3106 chip_id = BCM63XX_DEVICE_ID; 3107 3108 for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) { 3109 const struct b53_chip_data *chip = &b53_switch_chips[i]; 3110 3111 if (chip->chip_id == chip_id) { 3112 if (!dev->enabled_ports) 3113 dev->enabled_ports = chip->enabled_ports; 3114 dev->name = chip->dev_name; 3115 dev->duplex_reg = chip->duplex_reg; 3116 dev->vta_regs[0] = chip->vta_regs[0]; 3117 dev->vta_regs[1] = chip->vta_regs[1]; 3118 dev->vta_regs[2] = chip->vta_regs[2]; 3119 dev->jumbo_pm_reg = chip->jumbo_pm_reg; 3120 dev->imp_port = chip->imp_port; 3121 dev->num_vlans = chip->vlans; 3122 dev->num_arl_bins = chip->arl_bins; 3123 dev->num_arl_buckets = chip->arl_buckets; 3124 dev->arl_ops = chip->arl_ops; 3125 break; 3126 } 3127 } 3128 3129 /* check which BCM5325x version we have */ 3130 if (is5325(dev)) { 3131 u8 vc4; 3132 3133 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4); 3134 3135 /* check reserved bits */ 3136 switch (vc4 & 3) { 3137 case 1: 3138 /* BCM5325E */ 3139 break; 3140 case 3: 3141 /* BCM5325F - do not use port 4 */ 3142 dev->enabled_ports &= ~BIT(4); 3143 break; 3144 default: 3145 /* On the BCM47XX SoCs this is the supported internal switch.*/ 3146 #ifndef CONFIG_BCM47XX 3147 /* BCM5325M */ 3148 return -EINVAL; 3149 #else 3150 break; 3151 #endif 3152 } 3153 } 3154 3155 if (is5325e(dev)) 3156 dev->num_arl_buckets = 512; 3157 3158 dev->num_ports = fls(dev->enabled_ports); 3159 3160 dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS); 3161 3162 /* Include non standard CPU port built-in PHYs to be probed */ 3163 if (is539x(dev) || is531x5(dev)) { 3164 for (i = 0; i < dev->num_ports; i++) { 3165 if (!(dev->ds->phys_mii_mask & BIT(i)) && 3166 !b53_possible_cpu_port(dev->ds, i)) 3167 dev->ds->phys_mii_mask |= BIT(i); 3168 } 3169 } 3170 3171 dev->ports = devm_kcalloc(dev->dev, 3172 dev->num_ports, sizeof(struct b53_port), 3173 GFP_KERNEL); 3174 if (!dev->ports) 3175 return -ENOMEM; 3176 3177 dev->vlans = devm_kcalloc(dev->dev, 3178 dev->num_vlans, sizeof(struct b53_vlan), 3179 GFP_KERNEL); 3180 if (!dev->vlans) 3181 return -ENOMEM; 3182 3183 dev->reset_gpio = b53_switch_get_reset_gpio(dev); 3184 if (dev->reset_gpio >= 0) { 3185 ret = devm_gpio_request_one(dev->dev, dev->reset_gpio, 3186 GPIOF_OUT_INIT_HIGH, "robo_reset"); 3187 if (ret) 3188 return ret; 3189 } 3190 3191 return 0; 3192 } 3193 3194 struct b53_device *b53_switch_alloc(struct device *base, 3195 const struct b53_io_ops *ops, 3196 void *priv) 3197 { 3198 struct dsa_switch *ds; 3199 struct b53_device *dev; 3200 3201 ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL); 3202 if (!ds) 3203 return NULL; 3204 3205 ds->dev = base; 3206 3207 dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL); 3208 if (!dev) 3209 return NULL; 3210 3211 ds->priv = dev; 3212 dev->dev = base; 3213 3214 dev->ds = ds; 3215 dev->priv = priv; 3216 dev->ops = ops; 3217 ds->ops = &b53_switch_ops; 3218 ds->phylink_mac_ops = &b53_phylink_mac_ops; 3219 dev->vlan_enabled = true; 3220 dev->vlan_filtering = false; 3221 /* Let DSA handle the case were multiple bridges span the same switch 3222 * device and different VLAN awareness settings are requested, which 3223 * would be breaking filtering semantics for any of the other bridge 3224 * devices. (not hardware supported) 3225 */ 3226 ds->vlan_filtering_is_global = true; 3227 3228 mutex_init(&dev->reg_mutex); 3229 mutex_init(&dev->stats_mutex); 3230 mutex_init(&dev->arl_mutex); 3231 3232 return dev; 3233 } 3234 EXPORT_SYMBOL(b53_switch_alloc); 3235 3236 int b53_switch_detect(struct b53_device *dev) 3237 { 3238 u32 id32; 3239 u16 tmp; 3240 u8 id8; 3241 int ret; 3242 3243 ret = b53_read8(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id8); 3244 if (ret) 3245 return ret; 3246 3247 switch (id8) { 3248 case 0: 3249 /* BCM5325 and BCM5365 do not have this register so reads 3250 * return 0. But the read operation did succeed, so assume this 3251 * is one of them. 3252 * 3253 * Next check if we can write to the 5325's VTA register; for 3254 * 5365 it is read only. 3255 */ 3256 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf); 3257 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp); 3258 3259 if (tmp == 0xf) { 3260 u32 phy_id; 3261 int val; 3262 3263 dev->chip_id = BCM5325_DEVICE_ID; 3264 3265 val = b53_phy_read16(dev->ds, 0, MII_PHYSID1); 3266 phy_id = (val & 0xffff) << 16; 3267 val = b53_phy_read16(dev->ds, 0, MII_PHYSID2); 3268 phy_id |= (val & 0xfff0); 3269 3270 if (phy_id == 0x00406330) 3271 dev->variant_id = B53_VARIANT_5325M; 3272 else if (phy_id == 0x0143bc30) 3273 dev->variant_id = B53_VARIANT_5325E; 3274 } else { 3275 dev->chip_id = BCM5365_DEVICE_ID; 3276 } 3277 break; 3278 case BCM5389_DEVICE_ID: 3279 case BCM5395_DEVICE_ID: 3280 case BCM5397_DEVICE_ID: 3281 case BCM5398_DEVICE_ID: 3282 dev->chip_id = id8; 3283 break; 3284 default: 3285 ret = b53_read32(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id32); 3286 if (ret) 3287 return ret; 3288 3289 switch (id32) { 3290 case BCM53101_DEVICE_ID: 3291 case BCM53115_DEVICE_ID: 3292 case BCM53125_DEVICE_ID: 3293 case BCM53128_DEVICE_ID: 3294 case BCM53010_DEVICE_ID: 3295 case BCM53011_DEVICE_ID: 3296 case BCM53012_DEVICE_ID: 3297 case BCM53018_DEVICE_ID: 3298 case BCM53019_DEVICE_ID: 3299 case BCM53134_DEVICE_ID: 3300 dev->chip_id = id32; 3301 break; 3302 default: 3303 dev_err(dev->dev, 3304 "unsupported switch detected (BCM53%02x/BCM%x)\n", 3305 id8, id32); 3306 return -ENODEV; 3307 } 3308 } 3309 3310 if (dev->chip_id == BCM5325_DEVICE_ID) 3311 return b53_read8(dev, B53_STAT_PAGE, B53_REV_ID_25, 3312 &dev->core_rev); 3313 else 3314 return b53_read8(dev, B53_MGMT_PAGE, B53_REV_ID, 3315 &dev->core_rev); 3316 } 3317 EXPORT_SYMBOL(b53_switch_detect); 3318 3319 int b53_switch_register(struct b53_device *dev) 3320 { 3321 int ret; 3322 3323 if (dev->pdata) { 3324 dev->chip_id = dev->pdata->chip_id; 3325 dev->enabled_ports = dev->pdata->enabled_ports; 3326 } 3327 3328 if (!dev->chip_id && b53_switch_detect(dev)) 3329 return -EINVAL; 3330 3331 ret = b53_switch_init(dev); 3332 if (ret) 3333 return ret; 3334 3335 dev_info(dev->dev, "found switch: %s, rev %i\n", 3336 dev->name, dev->core_rev); 3337 3338 return dsa_register_switch(dev->ds); 3339 } 3340 EXPORT_SYMBOL(b53_switch_register); 3341 3342 MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>"); 3343 MODULE_DESCRIPTION("B53 switch library"); 3344 MODULE_LICENSE("Dual BSD/GPL"); 3345