1 /* 2 * B53 switch driver main logic 3 * 4 * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org> 5 * Copyright (C) 2016 Florian Fainelli <f.fainelli@gmail.com> 6 * 7 * Permission to use, copy, modify, and/or distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <linux/delay.h> 21 #include <linux/export.h> 22 #include <linux/gpio.h> 23 #include <linux/kernel.h> 24 #include <linux/math.h> 25 #include <linux/minmax.h> 26 #include <linux/module.h> 27 #include <linux/platform_data/b53.h> 28 #include <linux/phy.h> 29 #include <linux/phylink.h> 30 #include <linux/etherdevice.h> 31 #include <linux/if_bridge.h> 32 #include <linux/if_vlan.h> 33 #include <net/dsa.h> 34 35 #include "b53_regs.h" 36 #include "b53_priv.h" 37 38 struct b53_mib_desc { 39 u8 size; 40 u8 offset; 41 const char *name; 42 }; 43 44 /* BCM5365 MIB counters */ 45 static const struct b53_mib_desc b53_mibs_65[] = { 46 { 8, 0x00, "TxOctets" }, 47 { 4, 0x08, "TxDropPkts" }, 48 { 4, 0x10, "TxBroadcastPkts" }, 49 { 4, 0x14, "TxMulticastPkts" }, 50 { 4, 0x18, "TxUnicastPkts" }, 51 { 4, 0x1c, "TxCollisions" }, 52 { 4, 0x20, "TxSingleCollision" }, 53 { 4, 0x24, "TxMultipleCollision" }, 54 { 4, 0x28, "TxDeferredTransmit" }, 55 { 4, 0x2c, "TxLateCollision" }, 56 { 4, 0x30, "TxExcessiveCollision" }, 57 { 4, 0x38, "TxPausePkts" }, 58 { 8, 0x44, "RxOctets" }, 59 { 4, 0x4c, "RxUndersizePkts" }, 60 { 4, 0x50, "RxPausePkts" }, 61 { 4, 0x54, "Pkts64Octets" }, 62 { 4, 0x58, "Pkts65to127Octets" }, 63 { 4, 0x5c, "Pkts128to255Octets" }, 64 { 4, 0x60, "Pkts256to511Octets" }, 65 { 4, 0x64, "Pkts512to1023Octets" }, 66 { 4, 0x68, "Pkts1024to1522Octets" }, 67 { 4, 0x6c, "RxOversizePkts" }, 68 { 4, 0x70, "RxJabbers" }, 69 { 4, 0x74, "RxAlignmentErrors" }, 70 { 4, 0x78, "RxFCSErrors" }, 71 { 8, 0x7c, "RxGoodOctets" }, 72 { 4, 0x84, "RxDropPkts" }, 73 { 4, 0x88, "RxUnicastPkts" }, 74 { 4, 0x8c, "RxMulticastPkts" }, 75 { 4, 0x90, "RxBroadcastPkts" }, 76 { 4, 0x94, "RxSAChanges" }, 77 { 4, 0x98, "RxFragments" }, 78 }; 79 80 #define B53_MIBS_65_SIZE ARRAY_SIZE(b53_mibs_65) 81 82 /* BCM63xx MIB counters */ 83 static const struct b53_mib_desc b53_mibs_63xx[] = { 84 { 8, 0x00, "TxOctets" }, 85 { 4, 0x08, "TxDropPkts" }, 86 { 4, 0x0c, "TxQoSPkts" }, 87 { 4, 0x10, "TxBroadcastPkts" }, 88 { 4, 0x14, "TxMulticastPkts" }, 89 { 4, 0x18, "TxUnicastPkts" }, 90 { 4, 0x1c, "TxCollisions" }, 91 { 4, 0x20, "TxSingleCollision" }, 92 { 4, 0x24, "TxMultipleCollision" }, 93 { 4, 0x28, "TxDeferredTransmit" }, 94 { 4, 0x2c, "TxLateCollision" }, 95 { 4, 0x30, "TxExcessiveCollision" }, 96 { 4, 0x38, "TxPausePkts" }, 97 { 8, 0x3c, "TxQoSOctets" }, 98 { 8, 0x44, "RxOctets" }, 99 { 4, 0x4c, "RxUndersizePkts" }, 100 { 4, 0x50, "RxPausePkts" }, 101 { 4, 0x54, "Pkts64Octets" }, 102 { 4, 0x58, "Pkts65to127Octets" }, 103 { 4, 0x5c, "Pkts128to255Octets" }, 104 { 4, 0x60, "Pkts256to511Octets" }, 105 { 4, 0x64, "Pkts512to1023Octets" }, 106 { 4, 0x68, "Pkts1024to1522Octets" }, 107 { 4, 0x6c, "RxOversizePkts" }, 108 { 4, 0x70, "RxJabbers" }, 109 { 4, 0x74, "RxAlignmentErrors" }, 110 { 4, 0x78, "RxFCSErrors" }, 111 { 8, 0x7c, "RxGoodOctets" }, 112 { 4, 0x84, "RxDropPkts" }, 113 { 4, 0x88, "RxUnicastPkts" }, 114 { 4, 0x8c, "RxMulticastPkts" }, 115 { 4, 0x90, "RxBroadcastPkts" }, 116 { 4, 0x94, "RxSAChanges" }, 117 { 4, 0x98, "RxFragments" }, 118 { 4, 0xa0, "RxSymbolErrors" }, 119 { 4, 0xa4, "RxQoSPkts" }, 120 { 8, 0xa8, "RxQoSOctets" }, 121 { 4, 0xb0, "Pkts1523to2047Octets" }, 122 { 4, 0xb4, "Pkts2048to4095Octets" }, 123 { 4, 0xb8, "Pkts4096to8191Octets" }, 124 { 4, 0xbc, "Pkts8192to9728Octets" }, 125 { 4, 0xc0, "RxDiscarded" }, 126 }; 127 128 #define B53_MIBS_63XX_SIZE ARRAY_SIZE(b53_mibs_63xx) 129 130 /* MIB counters */ 131 static const struct b53_mib_desc b53_mibs[] = { 132 { 8, 0x00, "TxOctets" }, 133 { 4, 0x08, "TxDropPkts" }, 134 { 4, 0x10, "TxBroadcastPkts" }, 135 { 4, 0x14, "TxMulticastPkts" }, 136 { 4, 0x18, "TxUnicastPkts" }, 137 { 4, 0x1c, "TxCollisions" }, 138 { 4, 0x20, "TxSingleCollision" }, 139 { 4, 0x24, "TxMultipleCollision" }, 140 { 4, 0x28, "TxDeferredTransmit" }, 141 { 4, 0x2c, "TxLateCollision" }, 142 { 4, 0x30, "TxExcessiveCollision" }, 143 { 4, 0x38, "TxPausePkts" }, 144 { 8, 0x50, "RxOctets" }, 145 { 4, 0x58, "RxUndersizePkts" }, 146 { 4, 0x5c, "RxPausePkts" }, 147 { 4, 0x60, "Pkts64Octets" }, 148 { 4, 0x64, "Pkts65to127Octets" }, 149 { 4, 0x68, "Pkts128to255Octets" }, 150 { 4, 0x6c, "Pkts256to511Octets" }, 151 { 4, 0x70, "Pkts512to1023Octets" }, 152 { 4, 0x74, "Pkts1024to1522Octets" }, 153 { 4, 0x78, "RxOversizePkts" }, 154 { 4, 0x7c, "RxJabbers" }, 155 { 4, 0x80, "RxAlignmentErrors" }, 156 { 4, 0x84, "RxFCSErrors" }, 157 { 8, 0x88, "RxGoodOctets" }, 158 { 4, 0x90, "RxDropPkts" }, 159 { 4, 0x94, "RxUnicastPkts" }, 160 { 4, 0x98, "RxMulticastPkts" }, 161 { 4, 0x9c, "RxBroadcastPkts" }, 162 { 4, 0xa0, "RxSAChanges" }, 163 { 4, 0xa4, "RxFragments" }, 164 { 4, 0xa8, "RxJumboPkts" }, 165 { 4, 0xac, "RxSymbolErrors" }, 166 { 4, 0xc0, "RxDiscarded" }, 167 }; 168 169 #define B53_MIBS_SIZE ARRAY_SIZE(b53_mibs) 170 171 static const struct b53_mib_desc b53_mibs_58xx[] = { 172 { 8, 0x00, "TxOctets" }, 173 { 4, 0x08, "TxDropPkts" }, 174 { 4, 0x0c, "TxQPKTQ0" }, 175 { 4, 0x10, "TxBroadcastPkts" }, 176 { 4, 0x14, "TxMulticastPkts" }, 177 { 4, 0x18, "TxUnicastPKts" }, 178 { 4, 0x1c, "TxCollisions" }, 179 { 4, 0x20, "TxSingleCollision" }, 180 { 4, 0x24, "TxMultipleCollision" }, 181 { 4, 0x28, "TxDeferredCollision" }, 182 { 4, 0x2c, "TxLateCollision" }, 183 { 4, 0x30, "TxExcessiveCollision" }, 184 { 4, 0x34, "TxFrameInDisc" }, 185 { 4, 0x38, "TxPausePkts" }, 186 { 4, 0x3c, "TxQPKTQ1" }, 187 { 4, 0x40, "TxQPKTQ2" }, 188 { 4, 0x44, "TxQPKTQ3" }, 189 { 4, 0x48, "TxQPKTQ4" }, 190 { 4, 0x4c, "TxQPKTQ5" }, 191 { 8, 0x50, "RxOctets" }, 192 { 4, 0x58, "RxUndersizePkts" }, 193 { 4, 0x5c, "RxPausePkts" }, 194 { 4, 0x60, "RxPkts64Octets" }, 195 { 4, 0x64, "RxPkts65to127Octets" }, 196 { 4, 0x68, "RxPkts128to255Octets" }, 197 { 4, 0x6c, "RxPkts256to511Octets" }, 198 { 4, 0x70, "RxPkts512to1023Octets" }, 199 { 4, 0x74, "RxPkts1024toMaxPktsOctets" }, 200 { 4, 0x78, "RxOversizePkts" }, 201 { 4, 0x7c, "RxJabbers" }, 202 { 4, 0x80, "RxAlignmentErrors" }, 203 { 4, 0x84, "RxFCSErrors" }, 204 { 8, 0x88, "RxGoodOctets" }, 205 { 4, 0x90, "RxDropPkts" }, 206 { 4, 0x94, "RxUnicastPkts" }, 207 { 4, 0x98, "RxMulticastPkts" }, 208 { 4, 0x9c, "RxBroadcastPkts" }, 209 { 4, 0xa0, "RxSAChanges" }, 210 { 4, 0xa4, "RxFragments" }, 211 { 4, 0xa8, "RxJumboPkt" }, 212 { 4, 0xac, "RxSymblErr" }, 213 { 4, 0xb0, "InRangeErrCount" }, 214 { 4, 0xb4, "OutRangeErrCount" }, 215 { 4, 0xb8, "EEELpiEvent" }, 216 { 4, 0xbc, "EEELpiDuration" }, 217 { 4, 0xc0, "RxDiscard" }, 218 { 4, 0xc8, "TxQPKTQ6" }, 219 { 4, 0xcc, "TxQPKTQ7" }, 220 { 4, 0xd0, "TxPkts64Octets" }, 221 { 4, 0xd4, "TxPkts65to127Octets" }, 222 { 4, 0xd8, "TxPkts128to255Octets" }, 223 { 4, 0xdc, "TxPkts256to511Ocets" }, 224 { 4, 0xe0, "TxPkts512to1023Ocets" }, 225 { 4, 0xe4, "TxPkts1024toMaxPktOcets" }, 226 }; 227 228 #define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx) 229 230 #define B53_MAX_MTU_25 (1536 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) 231 #define B53_MAX_MTU (9720 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) 232 233 static int b53_do_vlan_op(struct b53_device *dev, u8 op) 234 { 235 unsigned int i; 236 237 b53_write8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], VTA_START_CMD | op); 238 239 for (i = 0; i < 10; i++) { 240 u8 vta; 241 242 b53_read8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], &vta); 243 if (!(vta & VTA_START_CMD)) 244 return 0; 245 246 usleep_range(100, 200); 247 } 248 249 return -EIO; 250 } 251 252 static void b53_set_vlan_entry(struct b53_device *dev, u16 vid, 253 struct b53_vlan *vlan) 254 { 255 if (is5325(dev)) { 256 u32 entry = 0; 257 258 if (vlan->members) { 259 entry = ((vlan->untag & VA_UNTAG_MASK_25) << 260 VA_UNTAG_S_25) | vlan->members; 261 if (dev->core_rev >= 3) 262 entry |= VA_VALID_25_R4 | vid << VA_VID_HIGH_S; 263 else 264 entry |= VA_VALID_25; 265 } 266 267 b53_write32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, entry); 268 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid | 269 VTA_RW_STATE_WR | VTA_RW_OP_EN); 270 } else if (is5365(dev)) { 271 u16 entry = 0; 272 273 if (vlan->members) 274 entry = ((vlan->untag & VA_UNTAG_MASK_65) << 275 VA_UNTAG_S_65) | vlan->members | VA_VALID_65; 276 277 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, entry); 278 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid | 279 VTA_RW_STATE_WR | VTA_RW_OP_EN); 280 } else { 281 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid); 282 b53_write32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], 283 (vlan->untag << VTE_UNTAG_S) | vlan->members); 284 285 b53_do_vlan_op(dev, VTA_CMD_WRITE); 286 } 287 288 dev_dbg(dev->ds->dev, "VID: %d, members: 0x%04x, untag: 0x%04x\n", 289 vid, vlan->members, vlan->untag); 290 } 291 292 static void b53_get_vlan_entry(struct b53_device *dev, u16 vid, 293 struct b53_vlan *vlan) 294 { 295 if (is5325(dev)) { 296 u32 entry = 0; 297 298 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid | 299 VTA_RW_STATE_RD | VTA_RW_OP_EN); 300 b53_read32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, &entry); 301 302 if (dev->core_rev >= 3) 303 vlan->valid = !!(entry & VA_VALID_25_R4); 304 else 305 vlan->valid = !!(entry & VA_VALID_25); 306 vlan->members = entry & VA_MEMBER_MASK; 307 vlan->untag = (entry >> VA_UNTAG_S_25) & VA_UNTAG_MASK_25; 308 309 } else if (is5365(dev)) { 310 u16 entry = 0; 311 312 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid | 313 VTA_RW_STATE_WR | VTA_RW_OP_EN); 314 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, &entry); 315 316 vlan->valid = !!(entry & VA_VALID_65); 317 vlan->members = entry & VA_MEMBER_MASK; 318 vlan->untag = (entry >> VA_UNTAG_S_65) & VA_UNTAG_MASK_65; 319 } else { 320 u32 entry = 0; 321 322 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid); 323 b53_do_vlan_op(dev, VTA_CMD_READ); 324 b53_read32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], &entry); 325 vlan->members = entry & VTE_MEMBERS; 326 vlan->untag = (entry >> VTE_UNTAG_S) & VTE_MEMBERS; 327 vlan->valid = true; 328 } 329 } 330 331 static void b53_set_eap_mode(struct b53_device *dev, int port, int mode) 332 { 333 u64 eap_conf; 334 335 if (is5325(dev) || is5365(dev) || dev->chip_id == BCM5389_DEVICE_ID) 336 return; 337 338 b53_read64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), &eap_conf); 339 340 if (is63xx(dev)) { 341 eap_conf &= ~EAP_MODE_MASK_63XX; 342 eap_conf |= (u64)mode << EAP_MODE_SHIFT_63XX; 343 } else { 344 eap_conf &= ~EAP_MODE_MASK; 345 eap_conf |= (u64)mode << EAP_MODE_SHIFT; 346 } 347 348 b53_write64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), eap_conf); 349 } 350 351 static void b53_set_forwarding(struct b53_device *dev, int enable) 352 { 353 u8 mgmt; 354 355 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 356 357 if (enable) 358 mgmt |= SM_SW_FWD_EN; 359 else 360 mgmt &= ~SM_SW_FWD_EN; 361 362 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 363 364 if (!is5325(dev)) { 365 /* Include IMP port in dumb forwarding mode */ 366 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt); 367 mgmt |= B53_MII_DUMB_FWDG_EN; 368 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); 369 370 /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether 371 * frames should be flooded or not. 372 */ 373 b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); 374 mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN; 375 b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); 376 } else { 377 b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); 378 mgmt |= B53_IP_MCAST_25; 379 b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); 380 } 381 } 382 383 static void b53_enable_vlan(struct b53_device *dev, int port, bool enable, 384 bool enable_filtering) 385 { 386 u8 mgmt, vc0, vc1, vc4 = 0, vc5; 387 388 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 389 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, &vc0); 390 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, &vc1); 391 392 if (is5325(dev) || is5365(dev)) { 393 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4); 394 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, &vc5); 395 } else if (is63xx(dev)) { 396 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, &vc4); 397 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, &vc5); 398 } else { 399 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, &vc4); 400 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5); 401 } 402 403 vc1 &= ~VC1_RX_MCST_FWD_EN; 404 405 if (enable) { 406 vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; 407 vc1 |= VC1_RX_MCST_UNTAG_EN; 408 vc4 &= ~VC4_ING_VID_CHECK_MASK; 409 if (enable_filtering) { 410 vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; 411 vc5 |= VC5_DROP_VTABLE_MISS; 412 } else { 413 vc4 |= VC4_NO_ING_VID_CHK << VC4_ING_VID_CHECK_S; 414 vc5 &= ~VC5_DROP_VTABLE_MISS; 415 } 416 417 if (is5325(dev)) 418 vc0 &= ~VC0_RESERVED_1; 419 420 if (is5325(dev) || is5365(dev)) 421 vc1 |= VC1_RX_MCST_TAG_EN; 422 423 } else { 424 vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID); 425 vc1 &= ~VC1_RX_MCST_UNTAG_EN; 426 vc4 &= ~VC4_ING_VID_CHECK_MASK; 427 vc5 &= ~VC5_DROP_VTABLE_MISS; 428 429 if (is5325(dev) || is5365(dev)) 430 vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S; 431 else 432 vc4 |= VC4_ING_VID_VIO_TO_IMP << VC4_ING_VID_CHECK_S; 433 434 if (is5325(dev) || is5365(dev)) 435 vc1 &= ~VC1_RX_MCST_TAG_EN; 436 } 437 438 if (!is5325(dev) && !is5365(dev)) 439 vc5 &= ~VC5_VID_FFF_EN; 440 441 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, vc0); 442 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, vc1); 443 444 if (is5325(dev) || is5365(dev)) { 445 /* enable the high 8 bit vid check on 5325 */ 446 if (is5325(dev) && enable) 447 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 448 VC3_HIGH_8BIT_EN); 449 else 450 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0); 451 452 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, vc4); 453 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, vc5); 454 } else if (is63xx(dev)) { 455 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3_63XX, 0); 456 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, vc4); 457 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, vc5); 458 } else { 459 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0); 460 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, vc4); 461 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, vc5); 462 } 463 464 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 465 466 dev->vlan_enabled = enable; 467 468 dev_dbg(dev->dev, "Port %d VLAN enabled: %d, filtering: %d\n", 469 port, enable, enable_filtering); 470 } 471 472 static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) 473 { 474 u32 port_mask = 0; 475 u16 max_size = JMS_MIN_SIZE; 476 477 if (is5325(dev) || is5365(dev)) 478 return -EINVAL; 479 480 if (enable) { 481 port_mask = dev->enabled_ports; 482 max_size = JMS_MAX_SIZE; 483 if (allow_10_100) 484 port_mask |= JPM_10_100_JUMBO_EN; 485 } 486 487 b53_write32(dev, B53_JUMBO_PAGE, dev->jumbo_pm_reg, port_mask); 488 return b53_write16(dev, B53_JUMBO_PAGE, dev->jumbo_size_reg, max_size); 489 } 490 491 static int b53_flush_arl(struct b53_device *dev, u8 mask) 492 { 493 unsigned int i; 494 495 if (is5325(dev)) 496 return 0; 497 498 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, 499 FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask); 500 501 for (i = 0; i < 10; i++) { 502 u8 fast_age_ctrl; 503 504 b53_read8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, 505 &fast_age_ctrl); 506 507 if (!(fast_age_ctrl & FAST_AGE_DONE)) 508 goto out; 509 510 msleep(1); 511 } 512 513 return -ETIMEDOUT; 514 out: 515 /* Only age dynamic entries (default behavior) */ 516 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, FAST_AGE_DYNAMIC); 517 return 0; 518 } 519 520 static int b53_fast_age_port(struct b53_device *dev, int port) 521 { 522 if (is5325(dev)) 523 return 0; 524 525 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port); 526 527 return b53_flush_arl(dev, FAST_AGE_PORT); 528 } 529 530 static int b53_fast_age_vlan(struct b53_device *dev, u16 vid) 531 { 532 if (is5325(dev)) 533 return 0; 534 535 b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid); 536 537 return b53_flush_arl(dev, FAST_AGE_VLAN); 538 } 539 540 void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) 541 { 542 struct b53_device *dev = ds->priv; 543 unsigned int i; 544 u16 pvlan; 545 546 /* BCM5325 CPU port is at 8 */ 547 if ((is5325(dev) || is5365(dev)) && cpu_port == B53_CPU_PORT_25) 548 cpu_port = B53_CPU_PORT; 549 550 /* Enable the IMP port to be in the same VLAN as the other ports 551 * on a per-port basis such that we only have Port i and IMP in 552 * the same VLAN. 553 */ 554 b53_for_each_port(dev, i) { 555 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &pvlan); 556 pvlan |= BIT(cpu_port); 557 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan); 558 } 559 } 560 EXPORT_SYMBOL(b53_imp_vlan_setup); 561 562 static void b53_port_set_ucast_flood(struct b53_device *dev, int port, 563 bool unicast) 564 { 565 u16 uc; 566 567 if (is5325(dev)) { 568 if (port == B53_CPU_PORT_25) 569 port = B53_CPU_PORT; 570 571 b53_read16(dev, B53_IEEE_PAGE, B53_IEEE_UCAST_DLF, &uc); 572 if (unicast) 573 uc |= BIT(port) | B53_IEEE_UCAST_DROP_EN; 574 else 575 uc &= ~BIT(port); 576 b53_write16(dev, B53_IEEE_PAGE, B53_IEEE_UCAST_DLF, uc); 577 } else { 578 b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc); 579 if (unicast) 580 uc |= BIT(port); 581 else 582 uc &= ~BIT(port); 583 b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc); 584 } 585 } 586 587 static void b53_port_set_mcast_flood(struct b53_device *dev, int port, 588 bool multicast) 589 { 590 u16 mc; 591 592 if (is5325(dev)) { 593 if (port == B53_CPU_PORT_25) 594 port = B53_CPU_PORT; 595 596 b53_read16(dev, B53_IEEE_PAGE, B53_IEEE_MCAST_DLF, &mc); 597 if (multicast) 598 mc |= BIT(port) | B53_IEEE_MCAST_DROP_EN; 599 else 600 mc &= ~BIT(port); 601 b53_write16(dev, B53_IEEE_PAGE, B53_IEEE_MCAST_DLF, mc); 602 } else { 603 b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc); 604 if (multicast) 605 mc |= BIT(port); 606 else 607 mc &= ~BIT(port); 608 b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc); 609 610 b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc); 611 if (multicast) 612 mc |= BIT(port); 613 else 614 mc &= ~BIT(port); 615 b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc); 616 } 617 } 618 619 static void b53_port_set_learning(struct b53_device *dev, int port, 620 bool learning) 621 { 622 u16 reg; 623 624 if (is5325(dev)) 625 return; 626 627 b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, ®); 628 if (learning) 629 reg &= ~BIT(port); 630 else 631 reg |= BIT(port); 632 b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg); 633 } 634 635 static void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable) 636 { 637 struct b53_device *dev = ds->priv; 638 u16 reg; 639 640 b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, ®); 641 if (enable) 642 reg |= BIT(port); 643 else 644 reg &= ~BIT(port); 645 b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg); 646 } 647 648 int b53_setup_port(struct dsa_switch *ds, int port) 649 { 650 struct b53_device *dev = ds->priv; 651 652 b53_port_set_ucast_flood(dev, port, true); 653 b53_port_set_mcast_flood(dev, port, true); 654 b53_port_set_learning(dev, port, false); 655 656 /* Force all traffic to go to the CPU port to prevent the ASIC from 657 * trying to forward to bridged ports on matching FDB entries, then 658 * dropping frames because it isn't allowed to forward there. 659 */ 660 if (dsa_is_user_port(ds, port)) 661 b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED); 662 663 if (is5325(dev) && 664 in_range(port, 1, 4)) { 665 u8 reg; 666 667 b53_read8(dev, B53_CTRL_PAGE, B53_PD_MODE_CTRL_25, ®); 668 reg &= ~PD_MODE_POWER_DOWN_PORT(0); 669 if (dsa_is_unused_port(ds, port)) 670 reg |= PD_MODE_POWER_DOWN_PORT(port); 671 else 672 reg &= ~PD_MODE_POWER_DOWN_PORT(port); 673 b53_write8(dev, B53_CTRL_PAGE, B53_PD_MODE_CTRL_25, reg); 674 } 675 676 return 0; 677 } 678 EXPORT_SYMBOL(b53_setup_port); 679 680 int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) 681 { 682 struct b53_device *dev = ds->priv; 683 unsigned int cpu_port; 684 int ret = 0; 685 u16 pvlan; 686 687 if (!dsa_is_user_port(ds, port)) 688 return 0; 689 690 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 691 692 if (dev->ops->irq_enable) 693 ret = dev->ops->irq_enable(dev, port); 694 if (ret) 695 return ret; 696 697 /* Clear the Rx and Tx disable bits and set to no spanning tree */ 698 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0); 699 700 /* Set this port, and only this one to be in the default VLAN, 701 * if member of a bridge, restore its membership prior to 702 * bringing down this port. 703 */ 704 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 705 pvlan &= ~0x1ff; 706 pvlan |= BIT(port); 707 pvlan |= dev->ports[port].vlan_ctl_mask; 708 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 709 710 b53_imp_vlan_setup(ds, cpu_port); 711 712 /* If EEE was enabled, restore it */ 713 if (dev->ports[port].eee.eee_enabled) 714 b53_eee_enable_set(ds, port, true); 715 716 return 0; 717 } 718 EXPORT_SYMBOL(b53_enable_port); 719 720 void b53_disable_port(struct dsa_switch *ds, int port) 721 { 722 struct b53_device *dev = ds->priv; 723 u8 reg; 724 725 /* Disable Tx/Rx for the port */ 726 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); 727 reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE; 728 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); 729 730 if (dev->ops->irq_disable) 731 dev->ops->irq_disable(dev, port); 732 } 733 EXPORT_SYMBOL(b53_disable_port); 734 735 void b53_brcm_hdr_setup(struct dsa_switch *ds, int port) 736 { 737 struct b53_device *dev = ds->priv; 738 bool tag_en = !(dev->tag_protocol == DSA_TAG_PROTO_NONE); 739 u8 hdr_ctl, val; 740 u16 reg; 741 742 /* Resolve which bit controls the Broadcom tag */ 743 switch (port) { 744 case 8: 745 val = BRCM_HDR_P8_EN; 746 break; 747 case 7: 748 val = BRCM_HDR_P7_EN; 749 break; 750 case 5: 751 val = BRCM_HDR_P5_EN; 752 break; 753 default: 754 val = 0; 755 break; 756 } 757 758 /* Enable management mode if tagging is requested */ 759 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &hdr_ctl); 760 if (tag_en) 761 hdr_ctl |= SM_SW_FWD_MODE; 762 else 763 hdr_ctl &= ~SM_SW_FWD_MODE; 764 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, hdr_ctl); 765 766 /* Configure the appropriate IMP port */ 767 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &hdr_ctl); 768 if (port == 8) 769 hdr_ctl |= GC_FRM_MGMT_PORT_MII; 770 else if (port == 5) 771 hdr_ctl |= GC_FRM_MGMT_PORT_M; 772 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, hdr_ctl); 773 774 /* B53_BRCM_HDR not present on devices with legacy tags */ 775 if (dev->tag_protocol == DSA_TAG_PROTO_BRCM_LEGACY || 776 dev->tag_protocol == DSA_TAG_PROTO_BRCM_LEGACY_FCS) 777 return; 778 779 /* Enable Broadcom tags for IMP port */ 780 b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl); 781 if (tag_en) 782 hdr_ctl |= val; 783 else 784 hdr_ctl &= ~val; 785 b53_write8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, hdr_ctl); 786 787 /* Registers below are only accessible on newer devices */ 788 if (!is58xx(dev)) 789 return; 790 791 /* Enable reception Broadcom tag for CPU TX (switch RX) to 792 * allow us to tag outgoing frames 793 */ 794 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, ®); 795 if (tag_en) 796 reg &= ~BIT(port); 797 else 798 reg |= BIT(port); 799 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, reg); 800 801 /* Enable transmission of Broadcom tags from the switch (CPU RX) to 802 * allow delivering frames to the per-port net_devices 803 */ 804 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, ®); 805 if (tag_en) 806 reg &= ~BIT(port); 807 else 808 reg |= BIT(port); 809 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, reg); 810 } 811 EXPORT_SYMBOL(b53_brcm_hdr_setup); 812 813 static void b53_enable_cpu_port(struct b53_device *dev, int port) 814 { 815 u8 port_ctrl; 816 817 /* BCM5325 CPU port is at 8 */ 818 if ((is5325(dev) || is5365(dev)) && port == B53_CPU_PORT_25) 819 port = B53_CPU_PORT; 820 821 port_ctrl = PORT_CTRL_RX_BCST_EN | 822 PORT_CTRL_RX_MCST_EN | 823 PORT_CTRL_RX_UCST_EN; 824 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl); 825 826 b53_brcm_hdr_setup(dev->ds, port); 827 } 828 829 static void b53_enable_mib(struct b53_device *dev) 830 { 831 u8 gc; 832 833 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); 834 gc &= ~(GC_RESET_MIB | GC_MIB_AC_EN); 835 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); 836 } 837 838 static void b53_enable_stp(struct b53_device *dev) 839 { 840 u8 gc; 841 842 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); 843 gc |= GC_RX_BPDU_EN; 844 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); 845 } 846 847 static u16 b53_default_pvid(struct b53_device *dev) 848 { 849 if (is5325(dev) || is5365(dev)) 850 return 1; 851 else 852 return 0; 853 } 854 855 static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port) 856 { 857 struct b53_device *dev = ds->priv; 858 859 return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port); 860 } 861 862 static bool b53_vlan_port_may_join_untagged(struct dsa_switch *ds, int port) 863 { 864 struct b53_device *dev = ds->priv; 865 struct dsa_port *dp; 866 867 if (!dev->vlan_filtering) 868 return true; 869 870 dp = dsa_to_port(ds, port); 871 872 if (dsa_port_is_cpu(dp)) 873 return true; 874 875 return dp->bridge == NULL; 876 } 877 878 int b53_configure_vlan(struct dsa_switch *ds) 879 { 880 struct b53_device *dev = ds->priv; 881 struct b53_vlan vl = { 0 }; 882 struct b53_vlan *v; 883 int i, def_vid; 884 u16 vid; 885 886 def_vid = b53_default_pvid(dev); 887 888 /* clear all vlan entries */ 889 if (is5325(dev) || is5365(dev)) { 890 for (i = def_vid; i < dev->num_vlans; i++) 891 b53_set_vlan_entry(dev, i, &vl); 892 } else { 893 b53_do_vlan_op(dev, VTA_CMD_CLEAR); 894 } 895 896 b53_enable_vlan(dev, -1, dev->vlan_enabled, dev->vlan_filtering); 897 898 /* Create an untagged VLAN entry for the default PVID in case 899 * CONFIG_VLAN_8021Q is disabled and there are no calls to 900 * dsa_user_vlan_rx_add_vid() to create the default VLAN 901 * entry. Do this only when the tagging protocol is not 902 * DSA_TAG_PROTO_NONE 903 */ 904 v = &dev->vlans[def_vid]; 905 b53_for_each_port(dev, i) { 906 if (!b53_vlan_port_may_join_untagged(ds, i)) 907 continue; 908 909 vl.members |= BIT(i); 910 if (!b53_vlan_port_needs_forced_tagged(ds, i)) 911 vl.untag = vl.members; 912 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(i), 913 def_vid); 914 } 915 b53_set_vlan_entry(dev, def_vid, &vl); 916 917 if (dev->vlan_filtering) { 918 /* Upon initial call we have not set-up any VLANs, but upon 919 * system resume, we need to restore all VLAN entries. 920 */ 921 for (vid = def_vid + 1; vid < dev->num_vlans; vid++) { 922 v = &dev->vlans[vid]; 923 924 if (!v->members) 925 continue; 926 927 b53_set_vlan_entry(dev, vid, v); 928 b53_fast_age_vlan(dev, vid); 929 } 930 931 b53_for_each_port(dev, i) { 932 if (!dsa_is_cpu_port(ds, i)) 933 b53_write16(dev, B53_VLAN_PAGE, 934 B53_VLAN_PORT_DEF_TAG(i), 935 dev->ports[i].pvid); 936 } 937 } 938 939 return 0; 940 } 941 EXPORT_SYMBOL(b53_configure_vlan); 942 943 static void b53_switch_reset_gpio(struct b53_device *dev) 944 { 945 int gpio = dev->reset_gpio; 946 947 if (gpio < 0) 948 return; 949 950 /* Reset sequence: RESET low(50ms)->high(20ms) 951 */ 952 gpio_set_value(gpio, 0); 953 mdelay(50); 954 955 gpio_set_value(gpio, 1); 956 mdelay(20); 957 958 dev->current_page = 0xff; 959 } 960 961 static int b53_switch_reset(struct b53_device *dev) 962 { 963 unsigned int timeout = 1000; 964 u8 mgmt, reg; 965 966 b53_switch_reset_gpio(dev); 967 968 if (is539x(dev)) { 969 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83); 970 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00); 971 } 972 973 /* This is specific to 58xx devices here, do not use is58xx() which 974 * covers the larger Starfigther 2 family, including 7445/7278 which 975 * still use this driver as a library and need to perform the reset 976 * earlier. 977 */ 978 if (dev->chip_id == BCM58XX_DEVICE_ID || 979 dev->chip_id == BCM583XX_DEVICE_ID) { 980 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®); 981 reg |= SW_RST | EN_SW_RST | EN_CH_RST; 982 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg); 983 984 do { 985 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®); 986 if (!(reg & SW_RST)) 987 break; 988 989 usleep_range(1000, 2000); 990 } while (timeout-- > 0); 991 992 if (timeout == 0) { 993 dev_err(dev->dev, 994 "Timeout waiting for SW_RST to clear!\n"); 995 return -ETIMEDOUT; 996 } 997 } 998 999 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 1000 1001 if (!(mgmt & SM_SW_FWD_EN)) { 1002 mgmt &= ~SM_SW_FWD_MODE; 1003 mgmt |= SM_SW_FWD_EN; 1004 1005 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 1006 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 1007 1008 if (!(mgmt & SM_SW_FWD_EN)) { 1009 dev_err(dev->dev, "Failed to enable switch!\n"); 1010 return -EINVAL; 1011 } 1012 } 1013 1014 b53_enable_mib(dev); 1015 b53_enable_stp(dev); 1016 1017 return b53_flush_arl(dev, FAST_AGE_STATIC); 1018 } 1019 1020 static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg) 1021 { 1022 struct b53_device *priv = ds->priv; 1023 u16 value = 0; 1024 int ret; 1025 1026 if (priv->ops->phy_read16) 1027 ret = priv->ops->phy_read16(priv, addr, reg, &value); 1028 else 1029 ret = b53_read16(priv, B53_PORT_MII_PAGE(addr), 1030 reg * 2, &value); 1031 1032 return ret ? ret : value; 1033 } 1034 1035 static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val) 1036 { 1037 struct b53_device *priv = ds->priv; 1038 1039 if (priv->ops->phy_write16) 1040 return priv->ops->phy_write16(priv, addr, reg, val); 1041 1042 return b53_write16(priv, B53_PORT_MII_PAGE(addr), reg * 2, val); 1043 } 1044 1045 static int b53_reset_switch(struct b53_device *priv) 1046 { 1047 /* reset vlans */ 1048 memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans); 1049 memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports); 1050 1051 priv->serdes_lane = B53_INVALID_LANE; 1052 1053 return b53_switch_reset(priv); 1054 } 1055 1056 static int b53_apply_config(struct b53_device *priv) 1057 { 1058 /* disable switching */ 1059 b53_set_forwarding(priv, 0); 1060 1061 b53_configure_vlan(priv->ds); 1062 1063 /* enable switching */ 1064 b53_set_forwarding(priv, 1); 1065 1066 return 0; 1067 } 1068 1069 static void b53_reset_mib(struct b53_device *priv) 1070 { 1071 u8 gc; 1072 1073 b53_read8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); 1074 1075 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc | GC_RESET_MIB); 1076 msleep(1); 1077 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc & ~GC_RESET_MIB); 1078 msleep(1); 1079 } 1080 1081 static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev) 1082 { 1083 if (is5365(dev)) 1084 return b53_mibs_65; 1085 else if (is63xx(dev)) 1086 return b53_mibs_63xx; 1087 else if (is58xx(dev)) 1088 return b53_mibs_58xx; 1089 else 1090 return b53_mibs; 1091 } 1092 1093 static unsigned int b53_get_mib_size(struct b53_device *dev) 1094 { 1095 if (is5365(dev)) 1096 return B53_MIBS_65_SIZE; 1097 else if (is63xx(dev)) 1098 return B53_MIBS_63XX_SIZE; 1099 else if (is58xx(dev)) 1100 return B53_MIBS_58XX_SIZE; 1101 else 1102 return B53_MIBS_SIZE; 1103 } 1104 1105 static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port) 1106 { 1107 /* These ports typically do not have built-in PHYs */ 1108 switch (port) { 1109 case B53_CPU_PORT_25: 1110 case 7: 1111 case B53_CPU_PORT: 1112 return NULL; 1113 } 1114 1115 return mdiobus_get_phy(ds->user_mii_bus, port); 1116 } 1117 1118 void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset, 1119 uint8_t *data) 1120 { 1121 struct b53_device *dev = ds->priv; 1122 const struct b53_mib_desc *mibs = b53_get_mib(dev); 1123 unsigned int mib_size = b53_get_mib_size(dev); 1124 struct phy_device *phydev; 1125 unsigned int i; 1126 1127 if (stringset == ETH_SS_STATS) { 1128 for (i = 0; i < mib_size; i++) 1129 ethtool_puts(&data, mibs[i].name); 1130 } else if (stringset == ETH_SS_PHY_STATS) { 1131 phydev = b53_get_phy_device(ds, port); 1132 if (!phydev) 1133 return; 1134 1135 phy_ethtool_get_strings(phydev, data); 1136 } 1137 } 1138 EXPORT_SYMBOL(b53_get_strings); 1139 1140 void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) 1141 { 1142 struct b53_device *dev = ds->priv; 1143 const struct b53_mib_desc *mibs = b53_get_mib(dev); 1144 unsigned int mib_size = b53_get_mib_size(dev); 1145 const struct b53_mib_desc *s; 1146 unsigned int i; 1147 u64 val = 0; 1148 1149 if (is5365(dev) && port == 5) 1150 port = 8; 1151 1152 mutex_lock(&dev->stats_mutex); 1153 1154 for (i = 0; i < mib_size; i++) { 1155 s = &mibs[i]; 1156 1157 if (s->size == 8) { 1158 b53_read64(dev, B53_MIB_PAGE(port), s->offset, &val); 1159 } else { 1160 u32 val32; 1161 1162 b53_read32(dev, B53_MIB_PAGE(port), s->offset, 1163 &val32); 1164 val = val32; 1165 } 1166 data[i] = (u64)val; 1167 } 1168 1169 mutex_unlock(&dev->stats_mutex); 1170 } 1171 EXPORT_SYMBOL(b53_get_ethtool_stats); 1172 1173 void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data) 1174 { 1175 struct phy_device *phydev; 1176 1177 phydev = b53_get_phy_device(ds, port); 1178 if (!phydev) 1179 return; 1180 1181 phy_ethtool_get_stats(phydev, NULL, data); 1182 } 1183 EXPORT_SYMBOL(b53_get_ethtool_phy_stats); 1184 1185 int b53_get_sset_count(struct dsa_switch *ds, int port, int sset) 1186 { 1187 struct b53_device *dev = ds->priv; 1188 struct phy_device *phydev; 1189 1190 if (sset == ETH_SS_STATS) { 1191 return b53_get_mib_size(dev); 1192 } else if (sset == ETH_SS_PHY_STATS) { 1193 phydev = b53_get_phy_device(ds, port); 1194 if (!phydev) 1195 return 0; 1196 1197 return phy_ethtool_get_sset_count(phydev); 1198 } 1199 1200 return 0; 1201 } 1202 EXPORT_SYMBOL(b53_get_sset_count); 1203 1204 enum b53_devlink_resource_id { 1205 B53_DEVLINK_PARAM_ID_VLAN_TABLE, 1206 }; 1207 1208 static u64 b53_devlink_vlan_table_get(void *priv) 1209 { 1210 struct b53_device *dev = priv; 1211 struct b53_vlan *vl; 1212 unsigned int i; 1213 u64 count = 0; 1214 1215 for (i = 0; i < dev->num_vlans; i++) { 1216 vl = &dev->vlans[i]; 1217 if (vl->members) 1218 count++; 1219 } 1220 1221 return count; 1222 } 1223 1224 int b53_setup_devlink_resources(struct dsa_switch *ds) 1225 { 1226 struct devlink_resource_size_params size_params; 1227 struct b53_device *dev = ds->priv; 1228 int err; 1229 1230 devlink_resource_size_params_init(&size_params, dev->num_vlans, 1231 dev->num_vlans, 1232 1, DEVLINK_RESOURCE_UNIT_ENTRY); 1233 1234 err = dsa_devlink_resource_register(ds, "VLAN", dev->num_vlans, 1235 B53_DEVLINK_PARAM_ID_VLAN_TABLE, 1236 DEVLINK_RESOURCE_ID_PARENT_TOP, 1237 &size_params); 1238 if (err) 1239 goto out; 1240 1241 dsa_devlink_resource_occ_get_register(ds, 1242 B53_DEVLINK_PARAM_ID_VLAN_TABLE, 1243 b53_devlink_vlan_table_get, dev); 1244 1245 return 0; 1246 out: 1247 dsa_devlink_resources_unregister(ds); 1248 return err; 1249 } 1250 EXPORT_SYMBOL(b53_setup_devlink_resources); 1251 1252 static int b53_setup(struct dsa_switch *ds) 1253 { 1254 struct b53_device *dev = ds->priv; 1255 struct b53_vlan *vl; 1256 unsigned int port; 1257 u16 pvid; 1258 int ret; 1259 1260 /* Request bridge PVID untagged when DSA_TAG_PROTO_NONE is set 1261 * which forces the CPU port to be tagged in all VLANs. 1262 */ 1263 ds->untag_bridge_pvid = dev->tag_protocol == DSA_TAG_PROTO_NONE; 1264 1265 /* The switch does not tell us the original VLAN for untagged 1266 * packets, so keep the CPU port always tagged. 1267 */ 1268 ds->untag_vlan_aware_bridge_pvid = true; 1269 1270 /* Ageing time is set in seconds */ 1271 ds->ageing_time_min = 1 * 1000; 1272 ds->ageing_time_max = AGE_TIME_MAX * 1000; 1273 1274 ret = b53_reset_switch(dev); 1275 if (ret) { 1276 dev_err(ds->dev, "failed to reset switch\n"); 1277 return ret; 1278 } 1279 1280 /* setup default vlan for filtering mode */ 1281 pvid = b53_default_pvid(dev); 1282 vl = &dev->vlans[pvid]; 1283 b53_for_each_port(dev, port) { 1284 vl->members |= BIT(port); 1285 if (!b53_vlan_port_needs_forced_tagged(ds, port)) 1286 vl->untag |= BIT(port); 1287 } 1288 1289 b53_reset_mib(dev); 1290 1291 ret = b53_apply_config(dev); 1292 if (ret) { 1293 dev_err(ds->dev, "failed to apply configuration\n"); 1294 return ret; 1295 } 1296 1297 /* Configure IMP/CPU port, disable all other ports. Enabled 1298 * ports will be configured with .port_enable 1299 */ 1300 for (port = 0; port < dev->num_ports; port++) { 1301 if (dsa_is_cpu_port(ds, port)) 1302 b53_enable_cpu_port(dev, port); 1303 else 1304 b53_disable_port(ds, port); 1305 } 1306 1307 return b53_setup_devlink_resources(ds); 1308 } 1309 1310 static void b53_teardown(struct dsa_switch *ds) 1311 { 1312 dsa_devlink_resources_unregister(ds); 1313 } 1314 1315 static void b53_force_link(struct b53_device *dev, int port, int link) 1316 { 1317 u8 reg, val, off; 1318 1319 /* Override the port settings */ 1320 if (port == dev->imp_port) { 1321 off = B53_PORT_OVERRIDE_CTRL; 1322 val = PORT_OVERRIDE_EN; 1323 } else if (is5325(dev)) { 1324 return; 1325 } else { 1326 off = B53_GMII_PORT_OVERRIDE_CTRL(port); 1327 val = GMII_PO_EN; 1328 } 1329 1330 b53_read8(dev, B53_CTRL_PAGE, off, ®); 1331 reg |= val; 1332 if (link) 1333 reg |= PORT_OVERRIDE_LINK; 1334 else 1335 reg &= ~PORT_OVERRIDE_LINK; 1336 b53_write8(dev, B53_CTRL_PAGE, off, reg); 1337 } 1338 1339 static void b53_force_port_config(struct b53_device *dev, int port, 1340 int speed, int duplex, 1341 bool tx_pause, bool rx_pause) 1342 { 1343 u8 reg, val, off; 1344 1345 /* Override the port settings */ 1346 if (port == dev->imp_port) { 1347 off = B53_PORT_OVERRIDE_CTRL; 1348 val = PORT_OVERRIDE_EN; 1349 } else if (is5325(dev)) { 1350 return; 1351 } else { 1352 off = B53_GMII_PORT_OVERRIDE_CTRL(port); 1353 val = GMII_PO_EN; 1354 } 1355 1356 b53_read8(dev, B53_CTRL_PAGE, off, ®); 1357 reg |= val; 1358 if (duplex == DUPLEX_FULL) 1359 reg |= PORT_OVERRIDE_FULL_DUPLEX; 1360 else 1361 reg &= ~PORT_OVERRIDE_FULL_DUPLEX; 1362 1363 switch (speed) { 1364 case 2000: 1365 reg |= PORT_OVERRIDE_SPEED_2000M; 1366 fallthrough; 1367 case SPEED_1000: 1368 reg |= PORT_OVERRIDE_SPEED_1000M; 1369 break; 1370 case SPEED_100: 1371 reg |= PORT_OVERRIDE_SPEED_100M; 1372 break; 1373 case SPEED_10: 1374 reg |= PORT_OVERRIDE_SPEED_10M; 1375 break; 1376 default: 1377 dev_err(dev->dev, "unknown speed: %d\n", speed); 1378 return; 1379 } 1380 1381 if (rx_pause) { 1382 if (is5325(dev)) 1383 reg |= PORT_OVERRIDE_LP_FLOW_25; 1384 else 1385 reg |= PORT_OVERRIDE_RX_FLOW; 1386 } 1387 1388 if (tx_pause) { 1389 if (is5325(dev)) 1390 reg |= PORT_OVERRIDE_LP_FLOW_25; 1391 else 1392 reg |= PORT_OVERRIDE_TX_FLOW; 1393 } 1394 1395 b53_write8(dev, B53_CTRL_PAGE, off, reg); 1396 } 1397 1398 static void b53_adjust_63xx_rgmii(struct dsa_switch *ds, int port, 1399 phy_interface_t interface) 1400 { 1401 struct b53_device *dev = ds->priv; 1402 u8 rgmii_ctrl = 0; 1403 1404 b53_read8(dev, B53_CTRL_PAGE, B53_RGMII_CTRL_P(port), &rgmii_ctrl); 1405 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC); 1406 1407 if (is63268(dev)) 1408 rgmii_ctrl |= RGMII_CTRL_MII_OVERRIDE; 1409 1410 rgmii_ctrl |= RGMII_CTRL_ENABLE_GMII; 1411 1412 b53_write8(dev, B53_CTRL_PAGE, B53_RGMII_CTRL_P(port), rgmii_ctrl); 1413 1414 dev_dbg(ds->dev, "Configured port %d for %s\n", port, 1415 phy_modes(interface)); 1416 } 1417 1418 static void b53_adjust_531x5_rgmii(struct dsa_switch *ds, int port, 1419 phy_interface_t interface) 1420 { 1421 struct b53_device *dev = ds->priv; 1422 u8 rgmii_ctrl = 0, off; 1423 1424 if (port == dev->imp_port) 1425 off = B53_RGMII_CTRL_IMP; 1426 else 1427 off = B53_RGMII_CTRL_P(port); 1428 1429 /* Configure the port RGMII clock delay by DLL disabled and 1430 * tx_clk aligned timing (restoring to reset defaults) 1431 */ 1432 b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl); 1433 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC); 1434 1435 /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make 1436 * sure that we enable the port TX clock internal delay to 1437 * account for this internal delay that is inserted, otherwise 1438 * the switch won't be able to receive correctly. 1439 * 1440 * PHY_INTERFACE_MODE_RGMII means that we are not introducing 1441 * any delay neither on transmission nor reception, so the 1442 * BCM53125 must also be configured accordingly to account for 1443 * the lack of delay and introduce 1444 * 1445 * The BCM53125 switch has its RX clock and TX clock control 1446 * swapped, hence the reason why we modify the TX clock path in 1447 * the "RGMII" case 1448 */ 1449 if (interface == PHY_INTERFACE_MODE_RGMII_TXID) 1450 rgmii_ctrl |= RGMII_CTRL_DLL_TXC; 1451 if (interface == PHY_INTERFACE_MODE_RGMII) 1452 rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC; 1453 1454 if (dev->chip_id != BCM53115_DEVICE_ID) 1455 rgmii_ctrl |= RGMII_CTRL_TIMING_SEL; 1456 1457 b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl); 1458 1459 dev_info(ds->dev, "Configured port %d for %s\n", port, 1460 phy_modes(interface)); 1461 } 1462 1463 static void b53_adjust_5325_mii(struct dsa_switch *ds, int port) 1464 { 1465 struct b53_device *dev = ds->priv; 1466 u8 reg = 0; 1467 1468 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 1469 ®); 1470 1471 /* reverse mii needs to be enabled */ 1472 if (!(reg & PORT_OVERRIDE_RV_MII_25)) { 1473 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 1474 reg | PORT_OVERRIDE_RV_MII_25); 1475 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 1476 ®); 1477 1478 if (!(reg & PORT_OVERRIDE_RV_MII_25)) { 1479 dev_err(ds->dev, 1480 "Failed to enable reverse MII mode\n"); 1481 return; 1482 } 1483 } 1484 } 1485 1486 void b53_port_event(struct dsa_switch *ds, int port) 1487 { 1488 struct b53_device *dev = ds->priv; 1489 bool link; 1490 u16 sts; 1491 1492 b53_read16(dev, B53_STAT_PAGE, B53_LINK_STAT, &sts); 1493 link = !!(sts & BIT(port)); 1494 dsa_port_phylink_mac_change(ds, port, link); 1495 } 1496 EXPORT_SYMBOL(b53_port_event); 1497 1498 static void b53_phylink_get_caps(struct dsa_switch *ds, int port, 1499 struct phylink_config *config) 1500 { 1501 struct b53_device *dev = ds->priv; 1502 1503 /* Internal ports need GMII for PHYLIB */ 1504 __set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces); 1505 1506 /* These switches appear to support MII and RevMII too, but beyond 1507 * this, the code gives very few clues. FIXME: We probably need more 1508 * interface modes here. 1509 * 1510 * According to b53_srab_mux_init(), ports 3..5 can support: 1511 * SGMII, MII, GMII, RGMII or INTERNAL depending on the MUX setting. 1512 * However, the interface mode read from the MUX configuration is 1513 * not passed back to DSA, so phylink uses NA. 1514 * DT can specify RGMII for ports 0, 1. 1515 * For MDIO, port 8 can be RGMII_TXID. 1516 */ 1517 __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces); 1518 __set_bit(PHY_INTERFACE_MODE_REVMII, config->supported_interfaces); 1519 1520 /* BCM63xx RGMII ports support RGMII */ 1521 if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4)) 1522 phy_interface_set_rgmii(config->supported_interfaces); 1523 1524 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | 1525 MAC_10 | MAC_100; 1526 1527 /* 5325/5365 are not capable of gigabit speeds, everything else is. 1528 * Note: the original code also exclulded Gigagbit for MII, RevMII 1529 * and 802.3z modes. MII and RevMII are not able to work above 100M, 1530 * so will be excluded by the generic validator implementation. 1531 * However, the exclusion of Gigabit for 802.3z just seems wrong. 1532 */ 1533 if (!(is5325(dev) || is5365(dev))) 1534 config->mac_capabilities |= MAC_1000; 1535 1536 /* Get the implementation specific capabilities */ 1537 if (dev->ops->phylink_get_caps) 1538 dev->ops->phylink_get_caps(dev, port, config); 1539 } 1540 1541 static struct phylink_pcs *b53_phylink_mac_select_pcs(struct phylink_config *config, 1542 phy_interface_t interface) 1543 { 1544 struct dsa_port *dp = dsa_phylink_to_port(config); 1545 struct b53_device *dev = dp->ds->priv; 1546 1547 if (!dev->ops->phylink_mac_select_pcs) 1548 return NULL; 1549 1550 return dev->ops->phylink_mac_select_pcs(dev, dp->index, interface); 1551 } 1552 1553 static void b53_phylink_mac_config(struct phylink_config *config, 1554 unsigned int mode, 1555 const struct phylink_link_state *state) 1556 { 1557 struct dsa_port *dp = dsa_phylink_to_port(config); 1558 phy_interface_t interface = state->interface; 1559 struct dsa_switch *ds = dp->ds; 1560 struct b53_device *dev = ds->priv; 1561 int port = dp->index; 1562 1563 if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4)) 1564 b53_adjust_63xx_rgmii(ds, port, interface); 1565 1566 if (mode == MLO_AN_FIXED) { 1567 if (is531x5(dev) && phy_interface_mode_is_rgmii(interface)) 1568 b53_adjust_531x5_rgmii(ds, port, interface); 1569 1570 /* configure MII port if necessary */ 1571 if (is5325(dev)) 1572 b53_adjust_5325_mii(ds, port); 1573 } 1574 } 1575 1576 static void b53_phylink_mac_link_down(struct phylink_config *config, 1577 unsigned int mode, 1578 phy_interface_t interface) 1579 { 1580 struct dsa_port *dp = dsa_phylink_to_port(config); 1581 struct b53_device *dev = dp->ds->priv; 1582 int port = dp->index; 1583 1584 if (mode == MLO_AN_PHY) 1585 return; 1586 1587 if (mode == MLO_AN_FIXED) { 1588 b53_force_link(dev, port, false); 1589 return; 1590 } 1591 1592 if (phy_interface_mode_is_8023z(interface) && 1593 dev->ops->serdes_link_set) 1594 dev->ops->serdes_link_set(dev, port, mode, interface, false); 1595 } 1596 1597 static void b53_phylink_mac_link_up(struct phylink_config *config, 1598 struct phy_device *phydev, 1599 unsigned int mode, 1600 phy_interface_t interface, 1601 int speed, int duplex, 1602 bool tx_pause, bool rx_pause) 1603 { 1604 struct dsa_port *dp = dsa_phylink_to_port(config); 1605 struct dsa_switch *ds = dp->ds; 1606 struct b53_device *dev = ds->priv; 1607 struct ethtool_keee *p = &dev->ports[dp->index].eee; 1608 int port = dp->index; 1609 1610 if (mode == MLO_AN_PHY) { 1611 /* Re-negotiate EEE if it was enabled already */ 1612 p->eee_enabled = b53_eee_init(ds, port, phydev); 1613 return; 1614 } 1615 1616 if (mode == MLO_AN_FIXED) { 1617 /* Force flow control on BCM5301x's CPU port */ 1618 if (is5301x(dev) && dsa_is_cpu_port(ds, port)) 1619 tx_pause = rx_pause = true; 1620 1621 b53_force_port_config(dev, port, speed, duplex, 1622 tx_pause, rx_pause); 1623 b53_force_link(dev, port, true); 1624 return; 1625 } 1626 1627 if (phy_interface_mode_is_8023z(interface) && 1628 dev->ops->serdes_link_set) 1629 dev->ops->serdes_link_set(dev, port, mode, interface, true); 1630 } 1631 1632 int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, 1633 struct netlink_ext_ack *extack) 1634 { 1635 struct b53_device *dev = ds->priv; 1636 1637 if (dev->vlan_filtering != vlan_filtering) { 1638 dev->vlan_filtering = vlan_filtering; 1639 b53_apply_config(dev); 1640 } 1641 1642 return 0; 1643 } 1644 EXPORT_SYMBOL(b53_vlan_filtering); 1645 1646 static int b53_vlan_prepare(struct dsa_switch *ds, int port, 1647 const struct switchdev_obj_port_vlan *vlan) 1648 { 1649 struct b53_device *dev = ds->priv; 1650 1651 if ((is5325(dev) || is5365(dev)) && vlan->vid == 0) 1652 return -EOPNOTSUPP; 1653 1654 /* Port 7 on 7278 connects to the ASP's UniMAC which is not capable of 1655 * receiving VLAN tagged frames at all, we can still allow the port to 1656 * be configured for egress untagged. 1657 */ 1658 if (dev->chip_id == BCM7278_DEVICE_ID && port == 7 && 1659 !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) 1660 return -EINVAL; 1661 1662 if (vlan->vid >= dev->num_vlans) 1663 return -ERANGE; 1664 1665 b53_enable_vlan(dev, port, true, dev->vlan_filtering); 1666 1667 return 0; 1668 } 1669 1670 int b53_vlan_add(struct dsa_switch *ds, int port, 1671 const struct switchdev_obj_port_vlan *vlan, 1672 struct netlink_ext_ack *extack) 1673 { 1674 struct b53_device *dev = ds->priv; 1675 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1676 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 1677 struct b53_vlan *vl; 1678 u16 old_pvid, new_pvid; 1679 int err; 1680 1681 err = b53_vlan_prepare(ds, port, vlan); 1682 if (err) 1683 return err; 1684 1685 if (vlan->vid == 0) 1686 return 0; 1687 1688 old_pvid = dev->ports[port].pvid; 1689 if (pvid) 1690 new_pvid = vlan->vid; 1691 else if (!pvid && vlan->vid == old_pvid) 1692 new_pvid = b53_default_pvid(dev); 1693 else 1694 new_pvid = old_pvid; 1695 dev->ports[port].pvid = new_pvid; 1696 1697 vl = &dev->vlans[vlan->vid]; 1698 1699 if (dsa_is_cpu_port(ds, port)) 1700 untagged = false; 1701 1702 vl->members |= BIT(port); 1703 if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port)) 1704 vl->untag |= BIT(port); 1705 else 1706 vl->untag &= ~BIT(port); 1707 1708 if (!dev->vlan_filtering) 1709 return 0; 1710 1711 b53_set_vlan_entry(dev, vlan->vid, vl); 1712 b53_fast_age_vlan(dev, vlan->vid); 1713 1714 if (!dsa_is_cpu_port(ds, port) && new_pvid != old_pvid) { 1715 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), 1716 new_pvid); 1717 b53_fast_age_vlan(dev, old_pvid); 1718 } 1719 1720 return 0; 1721 } 1722 EXPORT_SYMBOL(b53_vlan_add); 1723 1724 int b53_vlan_del(struct dsa_switch *ds, int port, 1725 const struct switchdev_obj_port_vlan *vlan) 1726 { 1727 struct b53_device *dev = ds->priv; 1728 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1729 struct b53_vlan *vl; 1730 u16 pvid; 1731 1732 if (vlan->vid == 0) 1733 return 0; 1734 1735 pvid = dev->ports[port].pvid; 1736 1737 vl = &dev->vlans[vlan->vid]; 1738 1739 vl->members &= ~BIT(port); 1740 1741 if (pvid == vlan->vid) 1742 pvid = b53_default_pvid(dev); 1743 dev->ports[port].pvid = pvid; 1744 1745 if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port)) 1746 vl->untag &= ~(BIT(port)); 1747 1748 if (!dev->vlan_filtering) 1749 return 0; 1750 1751 b53_set_vlan_entry(dev, vlan->vid, vl); 1752 b53_fast_age_vlan(dev, vlan->vid); 1753 1754 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid); 1755 b53_fast_age_vlan(dev, pvid); 1756 1757 return 0; 1758 } 1759 EXPORT_SYMBOL(b53_vlan_del); 1760 1761 /* Address Resolution Logic routines. Caller must hold &dev->arl_mutex. */ 1762 static int b53_arl_op_wait(struct b53_device *dev) 1763 { 1764 unsigned int timeout = 10; 1765 u8 reg; 1766 1767 do { 1768 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®); 1769 if (!(reg & ARLTBL_START_DONE)) 1770 return 0; 1771 1772 usleep_range(1000, 2000); 1773 } while (timeout--); 1774 1775 dev_warn(dev->dev, "timeout waiting for ARL to finish: 0x%02x\n", reg); 1776 1777 return -ETIMEDOUT; 1778 } 1779 1780 static int b53_arl_rw_op(struct b53_device *dev, unsigned int op) 1781 { 1782 u8 reg; 1783 1784 if (op > ARLTBL_RW) 1785 return -EINVAL; 1786 1787 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®); 1788 reg |= ARLTBL_START_DONE; 1789 if (op) 1790 reg |= ARLTBL_RW; 1791 else 1792 reg &= ~ARLTBL_RW; 1793 if (dev->vlan_enabled) 1794 reg &= ~ARLTBL_IVL_SVL_SELECT; 1795 else 1796 reg |= ARLTBL_IVL_SVL_SELECT; 1797 b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg); 1798 1799 return b53_arl_op_wait(dev); 1800 } 1801 1802 static int b53_arl_read(struct b53_device *dev, u64 mac, 1803 u16 vid, struct b53_arl_entry *ent, u8 *idx) 1804 { 1805 DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES); 1806 unsigned int i; 1807 int ret; 1808 1809 ret = b53_arl_op_wait(dev); 1810 if (ret) 1811 return ret; 1812 1813 bitmap_zero(free_bins, dev->num_arl_bins); 1814 1815 /* Read the bins */ 1816 for (i = 0; i < dev->num_arl_bins; i++) { 1817 u64 mac_vid; 1818 u32 fwd_entry; 1819 1820 b53_read64(dev, B53_ARLIO_PAGE, 1821 B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid); 1822 b53_read32(dev, B53_ARLIO_PAGE, 1823 B53_ARLTBL_DATA_ENTRY(i), &fwd_entry); 1824 b53_arl_to_entry(ent, mac_vid, fwd_entry); 1825 1826 if (!(fwd_entry & ARLTBL_VALID)) { 1827 set_bit(i, free_bins); 1828 continue; 1829 } 1830 if ((mac_vid & ARLTBL_MAC_MASK) != mac) 1831 continue; 1832 if (dev->vlan_enabled && 1833 ((mac_vid >> ARLTBL_VID_S) & ARLTBL_VID_MASK) != vid) 1834 continue; 1835 *idx = i; 1836 return 0; 1837 } 1838 1839 *idx = find_first_bit(free_bins, dev->num_arl_bins); 1840 return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT; 1841 } 1842 1843 static int b53_arl_read_25(struct b53_device *dev, u64 mac, 1844 u16 vid, struct b53_arl_entry *ent, u8 *idx) 1845 { 1846 DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES); 1847 unsigned int i; 1848 int ret; 1849 1850 ret = b53_arl_op_wait(dev); 1851 if (ret) 1852 return ret; 1853 1854 bitmap_zero(free_bins, dev->num_arl_bins); 1855 1856 /* Read the bins */ 1857 for (i = 0; i < dev->num_arl_bins; i++) { 1858 u64 mac_vid; 1859 1860 b53_read64(dev, B53_ARLIO_PAGE, 1861 B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid); 1862 1863 b53_arl_to_entry_25(ent, mac_vid); 1864 1865 if (!(mac_vid & ARLTBL_VALID_25)) { 1866 set_bit(i, free_bins); 1867 continue; 1868 } 1869 if ((mac_vid & ARLTBL_MAC_MASK) != mac) 1870 continue; 1871 if (dev->vlan_enabled && 1872 ((mac_vid >> ARLTBL_VID_S_65) & ARLTBL_VID_MASK_25) != vid) 1873 continue; 1874 *idx = i; 1875 return 0; 1876 } 1877 1878 *idx = find_first_bit(free_bins, dev->num_arl_bins); 1879 return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT; 1880 } 1881 1882 static int b53_arl_op(struct b53_device *dev, int op, int port, 1883 const unsigned char *addr, u16 vid, bool is_valid) 1884 { 1885 struct b53_arl_entry ent; 1886 u32 fwd_entry; 1887 u64 mac, mac_vid = 0; 1888 u8 idx = 0; 1889 int ret; 1890 1891 /* Convert the array into a 64-bit MAC */ 1892 mac = ether_addr_to_u64(addr); 1893 1894 /* Perform a read for the given MAC and VID */ 1895 b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac); 1896 if (!is5325m(dev)) 1897 b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid); 1898 1899 /* Issue a read operation for this MAC */ 1900 ret = b53_arl_rw_op(dev, 1); 1901 if (ret) 1902 return ret; 1903 1904 if (is5325(dev) || is5365(dev)) 1905 ret = b53_arl_read_25(dev, mac, vid, &ent, &idx); 1906 else 1907 ret = b53_arl_read(dev, mac, vid, &ent, &idx); 1908 1909 /* If this is a read, just finish now */ 1910 if (op) 1911 return ret; 1912 1913 switch (ret) { 1914 case -ETIMEDOUT: 1915 return ret; 1916 case -ENOSPC: 1917 dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n", 1918 addr, vid); 1919 return is_valid ? ret : 0; 1920 case -ENOENT: 1921 /* We could not find a matching MAC, so reset to a new entry */ 1922 dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n", 1923 addr, vid, idx); 1924 fwd_entry = 0; 1925 break; 1926 default: 1927 dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n", 1928 addr, vid, idx); 1929 break; 1930 } 1931 1932 /* For multicast address, the port is a bitmask and the validity 1933 * is determined by having at least one port being still active 1934 */ 1935 if (!is_multicast_ether_addr(addr)) { 1936 ent.port = port; 1937 ent.is_valid = is_valid; 1938 } else { 1939 if (is_valid) 1940 ent.port |= BIT(port); 1941 else 1942 ent.port &= ~BIT(port); 1943 1944 ent.is_valid = !!(ent.port); 1945 } 1946 1947 ent.vid = vid; 1948 ent.is_static = true; 1949 ent.is_age = false; 1950 memcpy(ent.mac, addr, ETH_ALEN); 1951 if (is5325(dev) || is5365(dev)) 1952 b53_arl_from_entry_25(&mac_vid, &ent); 1953 else 1954 b53_arl_from_entry(&mac_vid, &fwd_entry, &ent); 1955 1956 b53_write64(dev, B53_ARLIO_PAGE, 1957 B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid); 1958 1959 if (!is5325(dev) && !is5365(dev)) 1960 b53_write32(dev, B53_ARLIO_PAGE, 1961 B53_ARLTBL_DATA_ENTRY(idx), fwd_entry); 1962 1963 return b53_arl_rw_op(dev, 0); 1964 } 1965 1966 int b53_fdb_add(struct dsa_switch *ds, int port, 1967 const unsigned char *addr, u16 vid, 1968 struct dsa_db db) 1969 { 1970 struct b53_device *priv = ds->priv; 1971 int ret; 1972 1973 mutex_lock(&priv->arl_mutex); 1974 ret = b53_arl_op(priv, 0, port, addr, vid, true); 1975 mutex_unlock(&priv->arl_mutex); 1976 1977 return ret; 1978 } 1979 EXPORT_SYMBOL(b53_fdb_add); 1980 1981 int b53_fdb_del(struct dsa_switch *ds, int port, 1982 const unsigned char *addr, u16 vid, 1983 struct dsa_db db) 1984 { 1985 struct b53_device *priv = ds->priv; 1986 int ret; 1987 1988 mutex_lock(&priv->arl_mutex); 1989 ret = b53_arl_op(priv, 0, port, addr, vid, false); 1990 mutex_unlock(&priv->arl_mutex); 1991 1992 return ret; 1993 } 1994 EXPORT_SYMBOL(b53_fdb_del); 1995 1996 static int b53_arl_search_wait(struct b53_device *dev) 1997 { 1998 unsigned int timeout = 1000; 1999 u8 reg, offset; 2000 2001 if (is5325(dev) || is5365(dev)) 2002 offset = B53_ARL_SRCH_CTL_25; 2003 else 2004 offset = B53_ARL_SRCH_CTL; 2005 2006 do { 2007 b53_read8(dev, B53_ARLIO_PAGE, offset, ®); 2008 if (!(reg & ARL_SRCH_STDN)) 2009 return 0; 2010 2011 if (reg & ARL_SRCH_VLID) 2012 return 0; 2013 2014 usleep_range(1000, 2000); 2015 } while (timeout--); 2016 2017 return -ETIMEDOUT; 2018 } 2019 2020 static void b53_arl_search_rd(struct b53_device *dev, u8 idx, 2021 struct b53_arl_entry *ent) 2022 { 2023 u64 mac_vid; 2024 2025 if (is5325(dev)) { 2026 b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_0_MACVID_25, 2027 &mac_vid); 2028 b53_arl_to_entry_25(ent, mac_vid); 2029 } else if (is5365(dev)) { 2030 b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_0_MACVID_65, 2031 &mac_vid); 2032 b53_arl_to_entry_25(ent, mac_vid); 2033 } else { 2034 u32 fwd_entry; 2035 2036 b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_MACVID(idx), 2037 &mac_vid); 2038 b53_read32(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL(idx), 2039 &fwd_entry); 2040 b53_arl_to_entry(ent, mac_vid, fwd_entry); 2041 } 2042 } 2043 2044 static int b53_fdb_copy(int port, const struct b53_arl_entry *ent, 2045 dsa_fdb_dump_cb_t *cb, void *data) 2046 { 2047 if (!ent->is_valid) 2048 return 0; 2049 2050 if (port != ent->port) 2051 return 0; 2052 2053 return cb(ent->mac, ent->vid, ent->is_static, data); 2054 } 2055 2056 int b53_fdb_dump(struct dsa_switch *ds, int port, 2057 dsa_fdb_dump_cb_t *cb, void *data) 2058 { 2059 struct b53_device *priv = ds->priv; 2060 struct b53_arl_entry results[2]; 2061 unsigned int count = 0; 2062 u8 offset; 2063 int ret; 2064 u8 reg; 2065 2066 mutex_lock(&priv->arl_mutex); 2067 2068 if (is5325(priv) || is5365(priv)) 2069 offset = B53_ARL_SRCH_CTL_25; 2070 else 2071 offset = B53_ARL_SRCH_CTL; 2072 2073 /* Start search operation */ 2074 reg = ARL_SRCH_STDN; 2075 b53_write8(priv, offset, B53_ARL_SRCH_CTL, reg); 2076 2077 do { 2078 ret = b53_arl_search_wait(priv); 2079 if (ret) 2080 break; 2081 2082 b53_arl_search_rd(priv, 0, &results[0]); 2083 ret = b53_fdb_copy(port, &results[0], cb, data); 2084 if (ret) 2085 break; 2086 2087 if (priv->num_arl_bins > 2) { 2088 b53_arl_search_rd(priv, 1, &results[1]); 2089 ret = b53_fdb_copy(port, &results[1], cb, data); 2090 if (ret) 2091 break; 2092 2093 if (!results[0].is_valid && !results[1].is_valid) 2094 break; 2095 } 2096 2097 } while (count++ < b53_max_arl_entries(priv) / 2); 2098 2099 mutex_unlock(&priv->arl_mutex); 2100 2101 return 0; 2102 } 2103 EXPORT_SYMBOL(b53_fdb_dump); 2104 2105 int b53_mdb_add(struct dsa_switch *ds, int port, 2106 const struct switchdev_obj_port_mdb *mdb, 2107 struct dsa_db db) 2108 { 2109 struct b53_device *priv = ds->priv; 2110 int ret; 2111 2112 /* 5325 and 5365 require some more massaging, but could 2113 * be supported eventually 2114 */ 2115 if (is5325(priv) || is5365(priv)) 2116 return -EOPNOTSUPP; 2117 2118 mutex_lock(&priv->arl_mutex); 2119 ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true); 2120 mutex_unlock(&priv->arl_mutex); 2121 2122 return ret; 2123 } 2124 EXPORT_SYMBOL(b53_mdb_add); 2125 2126 int b53_mdb_del(struct dsa_switch *ds, int port, 2127 const struct switchdev_obj_port_mdb *mdb, 2128 struct dsa_db db) 2129 { 2130 struct b53_device *priv = ds->priv; 2131 int ret; 2132 2133 mutex_lock(&priv->arl_mutex); 2134 ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false); 2135 mutex_unlock(&priv->arl_mutex); 2136 if (ret) 2137 dev_err(ds->dev, "failed to delete MDB entry\n"); 2138 2139 return ret; 2140 } 2141 EXPORT_SYMBOL(b53_mdb_del); 2142 2143 int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, 2144 bool *tx_fwd_offload, struct netlink_ext_ack *extack) 2145 { 2146 struct b53_device *dev = ds->priv; 2147 struct b53_vlan *vl; 2148 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 2149 u16 pvlan, reg, pvid; 2150 unsigned int i; 2151 2152 /* On 7278, port 7 which connects to the ASP should only receive 2153 * traffic from matching CFP rules. 2154 */ 2155 if (dev->chip_id == BCM7278_DEVICE_ID && port == 7) 2156 return -EINVAL; 2157 2158 pvid = b53_default_pvid(dev); 2159 vl = &dev->vlans[pvid]; 2160 2161 if (dev->vlan_filtering) { 2162 /* Make this port leave the all VLANs join since we will have 2163 * proper VLAN entries from now on 2164 */ 2165 if (is58xx(dev)) { 2166 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, 2167 ®); 2168 reg &= ~BIT(port); 2169 if ((reg & BIT(cpu_port)) == BIT(cpu_port)) 2170 reg &= ~BIT(cpu_port); 2171 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, 2172 reg); 2173 } 2174 2175 b53_get_vlan_entry(dev, pvid, vl); 2176 vl->members &= ~BIT(port); 2177 b53_set_vlan_entry(dev, pvid, vl); 2178 } 2179 2180 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 2181 2182 b53_for_each_port(dev, i) { 2183 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) 2184 continue; 2185 2186 /* Add this local port to the remote port VLAN control 2187 * membership and update the remote port bitmask 2188 */ 2189 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); 2190 reg |= BIT(port); 2191 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg); 2192 dev->ports[i].vlan_ctl_mask = reg; 2193 2194 pvlan |= BIT(i); 2195 } 2196 2197 /* Disable redirection of unknown SA to the CPU port */ 2198 b53_set_eap_mode(dev, port, EAP_MODE_BASIC); 2199 2200 /* Configure the local port VLAN control membership to include 2201 * remote ports and update the local port bitmask 2202 */ 2203 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 2204 dev->ports[port].vlan_ctl_mask = pvlan; 2205 2206 return 0; 2207 } 2208 EXPORT_SYMBOL(b53_br_join); 2209 2210 void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge) 2211 { 2212 struct b53_device *dev = ds->priv; 2213 struct b53_vlan *vl; 2214 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 2215 unsigned int i; 2216 u16 pvlan, reg, pvid; 2217 2218 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 2219 2220 b53_for_each_port(dev, i) { 2221 /* Don't touch the remaining ports */ 2222 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) 2223 continue; 2224 2225 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); 2226 reg &= ~BIT(port); 2227 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg); 2228 dev->ports[port].vlan_ctl_mask = reg; 2229 2230 /* Prevent self removal to preserve isolation */ 2231 if (port != i) 2232 pvlan &= ~BIT(i); 2233 } 2234 2235 /* Enable redirection of unknown SA to the CPU port */ 2236 b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED); 2237 2238 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 2239 dev->ports[port].vlan_ctl_mask = pvlan; 2240 2241 pvid = b53_default_pvid(dev); 2242 vl = &dev->vlans[pvid]; 2243 2244 if (dev->vlan_filtering) { 2245 /* Make this port join all VLANs without VLAN entries */ 2246 if (is58xx(dev)) { 2247 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®); 2248 reg |= BIT(port); 2249 if (!(reg & BIT(cpu_port))) 2250 reg |= BIT(cpu_port); 2251 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg); 2252 } 2253 2254 b53_get_vlan_entry(dev, pvid, vl); 2255 vl->members |= BIT(port); 2256 b53_set_vlan_entry(dev, pvid, vl); 2257 } 2258 } 2259 EXPORT_SYMBOL(b53_br_leave); 2260 2261 void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state) 2262 { 2263 struct b53_device *dev = ds->priv; 2264 u8 hw_state; 2265 u8 reg; 2266 2267 switch (state) { 2268 case BR_STATE_DISABLED: 2269 hw_state = PORT_CTRL_DIS_STATE; 2270 break; 2271 case BR_STATE_LISTENING: 2272 hw_state = PORT_CTRL_LISTEN_STATE; 2273 break; 2274 case BR_STATE_LEARNING: 2275 hw_state = PORT_CTRL_LEARN_STATE; 2276 break; 2277 case BR_STATE_FORWARDING: 2278 hw_state = PORT_CTRL_FWD_STATE; 2279 break; 2280 case BR_STATE_BLOCKING: 2281 hw_state = PORT_CTRL_BLOCK_STATE; 2282 break; 2283 default: 2284 dev_err(ds->dev, "invalid STP state: %d\n", state); 2285 return; 2286 } 2287 2288 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); 2289 reg &= ~PORT_CTRL_STP_STATE_MASK; 2290 reg |= hw_state; 2291 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); 2292 } 2293 EXPORT_SYMBOL(b53_br_set_stp_state); 2294 2295 void b53_br_fast_age(struct dsa_switch *ds, int port) 2296 { 2297 struct b53_device *dev = ds->priv; 2298 2299 if (b53_fast_age_port(dev, port)) 2300 dev_err(ds->dev, "fast ageing failed\n"); 2301 } 2302 EXPORT_SYMBOL(b53_br_fast_age); 2303 2304 int b53_br_flags_pre(struct dsa_switch *ds, int port, 2305 struct switchdev_brport_flags flags, 2306 struct netlink_ext_ack *extack) 2307 { 2308 struct b53_device *dev = ds->priv; 2309 unsigned long mask = (BR_FLOOD | BR_MCAST_FLOOD); 2310 2311 if (!is5325(dev)) 2312 mask |= BR_LEARNING; 2313 2314 if (flags.mask & ~mask) 2315 return -EINVAL; 2316 2317 return 0; 2318 } 2319 EXPORT_SYMBOL(b53_br_flags_pre); 2320 2321 int b53_br_flags(struct dsa_switch *ds, int port, 2322 struct switchdev_brport_flags flags, 2323 struct netlink_ext_ack *extack) 2324 { 2325 if (flags.mask & BR_FLOOD) 2326 b53_port_set_ucast_flood(ds->priv, port, 2327 !!(flags.val & BR_FLOOD)); 2328 if (flags.mask & BR_MCAST_FLOOD) 2329 b53_port_set_mcast_flood(ds->priv, port, 2330 !!(flags.val & BR_MCAST_FLOOD)); 2331 if (flags.mask & BR_LEARNING) 2332 b53_port_set_learning(ds->priv, port, 2333 !!(flags.val & BR_LEARNING)); 2334 2335 return 0; 2336 } 2337 EXPORT_SYMBOL(b53_br_flags); 2338 2339 static bool b53_possible_cpu_port(struct dsa_switch *ds, int port) 2340 { 2341 /* Broadcom switches will accept enabling Broadcom tags on the 2342 * following ports: 5, 7 and 8, any other port is not supported 2343 */ 2344 switch (port) { 2345 case B53_CPU_PORT_25: 2346 case 7: 2347 case B53_CPU_PORT: 2348 return true; 2349 } 2350 2351 return false; 2352 } 2353 2354 static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port, 2355 enum dsa_tag_protocol tag_protocol) 2356 { 2357 bool ret = b53_possible_cpu_port(ds, port); 2358 2359 if (!ret) { 2360 dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n", 2361 port); 2362 return ret; 2363 } 2364 2365 switch (tag_protocol) { 2366 case DSA_TAG_PROTO_BRCM: 2367 case DSA_TAG_PROTO_BRCM_PREPEND: 2368 dev_warn(ds->dev, 2369 "Port %d is stacked to Broadcom tag switch\n", port); 2370 ret = false; 2371 break; 2372 default: 2373 ret = true; 2374 break; 2375 } 2376 2377 return ret; 2378 } 2379 2380 enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port, 2381 enum dsa_tag_protocol mprot) 2382 { 2383 struct b53_device *dev = ds->priv; 2384 2385 if (!b53_can_enable_brcm_tags(ds, port, mprot)) { 2386 dev->tag_protocol = DSA_TAG_PROTO_NONE; 2387 goto out; 2388 } 2389 2390 /* Older models require different 6 byte tags */ 2391 if (is5325(dev) || is5365(dev)) { 2392 dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY_FCS; 2393 goto out; 2394 } else if (is63xx(dev)) { 2395 dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY; 2396 goto out; 2397 } 2398 2399 /* Broadcom BCM58xx chips have a flow accelerator on Port 8 2400 * which requires us to use the prepended Broadcom tag type 2401 */ 2402 if (dev->chip_id == BCM58XX_DEVICE_ID && port == B53_CPU_PORT) { 2403 dev->tag_protocol = DSA_TAG_PROTO_BRCM_PREPEND; 2404 goto out; 2405 } 2406 2407 dev->tag_protocol = DSA_TAG_PROTO_BRCM; 2408 out: 2409 return dev->tag_protocol; 2410 } 2411 EXPORT_SYMBOL(b53_get_tag_protocol); 2412 2413 int b53_mirror_add(struct dsa_switch *ds, int port, 2414 struct dsa_mall_mirror_tc_entry *mirror, bool ingress, 2415 struct netlink_ext_ack *extack) 2416 { 2417 struct b53_device *dev = ds->priv; 2418 u16 reg, loc; 2419 2420 if (ingress) 2421 loc = B53_IG_MIR_CTL; 2422 else 2423 loc = B53_EG_MIR_CTL; 2424 2425 b53_read16(dev, B53_MGMT_PAGE, loc, ®); 2426 reg |= BIT(port); 2427 b53_write16(dev, B53_MGMT_PAGE, loc, reg); 2428 2429 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®); 2430 reg &= ~CAP_PORT_MASK; 2431 reg |= mirror->to_local_port; 2432 reg |= MIRROR_EN; 2433 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg); 2434 2435 return 0; 2436 } 2437 EXPORT_SYMBOL(b53_mirror_add); 2438 2439 void b53_mirror_del(struct dsa_switch *ds, int port, 2440 struct dsa_mall_mirror_tc_entry *mirror) 2441 { 2442 struct b53_device *dev = ds->priv; 2443 bool loc_disable = false, other_loc_disable = false; 2444 u16 reg, loc; 2445 2446 if (mirror->ingress) 2447 loc = B53_IG_MIR_CTL; 2448 else 2449 loc = B53_EG_MIR_CTL; 2450 2451 /* Update the desired ingress/egress register */ 2452 b53_read16(dev, B53_MGMT_PAGE, loc, ®); 2453 reg &= ~BIT(port); 2454 if (!(reg & MIRROR_MASK)) 2455 loc_disable = true; 2456 b53_write16(dev, B53_MGMT_PAGE, loc, reg); 2457 2458 /* Now look at the other one to know if we can disable mirroring 2459 * entirely 2460 */ 2461 if (mirror->ingress) 2462 b53_read16(dev, B53_MGMT_PAGE, B53_EG_MIR_CTL, ®); 2463 else 2464 b53_read16(dev, B53_MGMT_PAGE, B53_IG_MIR_CTL, ®); 2465 if (!(reg & MIRROR_MASK)) 2466 other_loc_disable = true; 2467 2468 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®); 2469 /* Both no longer have ports, let's disable mirroring */ 2470 if (loc_disable && other_loc_disable) { 2471 reg &= ~MIRROR_EN; 2472 reg &= ~mirror->to_local_port; 2473 } 2474 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg); 2475 } 2476 EXPORT_SYMBOL(b53_mirror_del); 2477 2478 /* Returns 0 if EEE was not enabled, or 1 otherwise 2479 */ 2480 int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy) 2481 { 2482 int ret; 2483 2484 if (!b53_support_eee(ds, port)) 2485 return 0; 2486 2487 ret = phy_init_eee(phy, false); 2488 if (ret) 2489 return 0; 2490 2491 b53_eee_enable_set(ds, port, true); 2492 2493 return 1; 2494 } 2495 EXPORT_SYMBOL(b53_eee_init); 2496 2497 bool b53_support_eee(struct dsa_switch *ds, int port) 2498 { 2499 struct b53_device *dev = ds->priv; 2500 2501 return !is5325(dev) && !is5365(dev) && !is63xx(dev); 2502 } 2503 EXPORT_SYMBOL(b53_support_eee); 2504 2505 int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e) 2506 { 2507 struct b53_device *dev = ds->priv; 2508 struct ethtool_keee *p = &dev->ports[port].eee; 2509 2510 p->eee_enabled = e->eee_enabled; 2511 b53_eee_enable_set(ds, port, e->eee_enabled); 2512 2513 return 0; 2514 } 2515 EXPORT_SYMBOL(b53_set_mac_eee); 2516 2517 static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu) 2518 { 2519 struct b53_device *dev = ds->priv; 2520 bool enable_jumbo; 2521 bool allow_10_100; 2522 2523 if (is5325(dev) || is5365(dev)) 2524 return 0; 2525 2526 if (!dsa_is_cpu_port(ds, port)) 2527 return 0; 2528 2529 enable_jumbo = (mtu > ETH_DATA_LEN); 2530 allow_10_100 = !is63xx(dev); 2531 2532 return b53_set_jumbo(dev, enable_jumbo, allow_10_100); 2533 } 2534 2535 static int b53_get_max_mtu(struct dsa_switch *ds, int port) 2536 { 2537 struct b53_device *dev = ds->priv; 2538 2539 if (is5325(dev) || is5365(dev)) 2540 return B53_MAX_MTU_25; 2541 2542 return B53_MAX_MTU; 2543 } 2544 2545 int b53_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) 2546 { 2547 struct b53_device *dev = ds->priv; 2548 u32 atc; 2549 int reg; 2550 2551 if (is63xx(dev)) 2552 reg = B53_AGING_TIME_CONTROL_63XX; 2553 else 2554 reg = B53_AGING_TIME_CONTROL; 2555 2556 atc = DIV_ROUND_CLOSEST(msecs, 1000); 2557 2558 if (!is5325(dev) && !is5365(dev)) 2559 atc |= AGE_CHANGE; 2560 2561 b53_write32(dev, B53_MGMT_PAGE, reg, atc); 2562 2563 return 0; 2564 } 2565 EXPORT_SYMBOL_GPL(b53_set_ageing_time); 2566 2567 static const struct phylink_mac_ops b53_phylink_mac_ops = { 2568 .mac_select_pcs = b53_phylink_mac_select_pcs, 2569 .mac_config = b53_phylink_mac_config, 2570 .mac_link_down = b53_phylink_mac_link_down, 2571 .mac_link_up = b53_phylink_mac_link_up, 2572 }; 2573 2574 static const struct dsa_switch_ops b53_switch_ops = { 2575 .get_tag_protocol = b53_get_tag_protocol, 2576 .setup = b53_setup, 2577 .teardown = b53_teardown, 2578 .get_strings = b53_get_strings, 2579 .get_ethtool_stats = b53_get_ethtool_stats, 2580 .get_sset_count = b53_get_sset_count, 2581 .get_ethtool_phy_stats = b53_get_ethtool_phy_stats, 2582 .phy_read = b53_phy_read16, 2583 .phy_write = b53_phy_write16, 2584 .phylink_get_caps = b53_phylink_get_caps, 2585 .port_setup = b53_setup_port, 2586 .port_enable = b53_enable_port, 2587 .port_disable = b53_disable_port, 2588 .support_eee = b53_support_eee, 2589 .set_mac_eee = b53_set_mac_eee, 2590 .set_ageing_time = b53_set_ageing_time, 2591 .port_bridge_join = b53_br_join, 2592 .port_bridge_leave = b53_br_leave, 2593 .port_pre_bridge_flags = b53_br_flags_pre, 2594 .port_bridge_flags = b53_br_flags, 2595 .port_stp_state_set = b53_br_set_stp_state, 2596 .port_fast_age = b53_br_fast_age, 2597 .port_vlan_filtering = b53_vlan_filtering, 2598 .port_vlan_add = b53_vlan_add, 2599 .port_vlan_del = b53_vlan_del, 2600 .port_fdb_dump = b53_fdb_dump, 2601 .port_fdb_add = b53_fdb_add, 2602 .port_fdb_del = b53_fdb_del, 2603 .port_mirror_add = b53_mirror_add, 2604 .port_mirror_del = b53_mirror_del, 2605 .port_mdb_add = b53_mdb_add, 2606 .port_mdb_del = b53_mdb_del, 2607 .port_max_mtu = b53_get_max_mtu, 2608 .port_change_mtu = b53_change_mtu, 2609 }; 2610 2611 struct b53_chip_data { 2612 u32 chip_id; 2613 const char *dev_name; 2614 u16 vlans; 2615 u16 enabled_ports; 2616 u8 imp_port; 2617 u8 cpu_port; 2618 u8 vta_regs[3]; 2619 u8 arl_bins; 2620 u16 arl_buckets; 2621 u8 duplex_reg; 2622 u8 jumbo_pm_reg; 2623 u8 jumbo_size_reg; 2624 }; 2625 2626 #define B53_VTA_REGS \ 2627 { B53_VT_ACCESS, B53_VT_INDEX, B53_VT_ENTRY } 2628 #define B53_VTA_REGS_9798 \ 2629 { B53_VT_ACCESS_9798, B53_VT_INDEX_9798, B53_VT_ENTRY_9798 } 2630 #define B53_VTA_REGS_63XX \ 2631 { B53_VT_ACCESS_63XX, B53_VT_INDEX_63XX, B53_VT_ENTRY_63XX } 2632 2633 static const struct b53_chip_data b53_switch_chips[] = { 2634 { 2635 .chip_id = BCM5325_DEVICE_ID, 2636 .dev_name = "BCM5325", 2637 .vlans = 16, 2638 .enabled_ports = 0x3f, 2639 .arl_bins = 2, 2640 .arl_buckets = 1024, 2641 .imp_port = 5, 2642 .duplex_reg = B53_DUPLEX_STAT_FE, 2643 }, 2644 { 2645 .chip_id = BCM5365_DEVICE_ID, 2646 .dev_name = "BCM5365", 2647 .vlans = 256, 2648 .enabled_ports = 0x3f, 2649 .arl_bins = 2, 2650 .arl_buckets = 1024, 2651 .imp_port = 5, 2652 .duplex_reg = B53_DUPLEX_STAT_FE, 2653 }, 2654 { 2655 .chip_id = BCM5389_DEVICE_ID, 2656 .dev_name = "BCM5389", 2657 .vlans = 4096, 2658 .enabled_ports = 0x11f, 2659 .arl_bins = 4, 2660 .arl_buckets = 1024, 2661 .imp_port = 8, 2662 .vta_regs = B53_VTA_REGS, 2663 .duplex_reg = B53_DUPLEX_STAT_GE, 2664 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2665 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2666 }, 2667 { 2668 .chip_id = BCM5395_DEVICE_ID, 2669 .dev_name = "BCM5395", 2670 .vlans = 4096, 2671 .enabled_ports = 0x11f, 2672 .arl_bins = 4, 2673 .arl_buckets = 1024, 2674 .imp_port = 8, 2675 .vta_regs = B53_VTA_REGS, 2676 .duplex_reg = B53_DUPLEX_STAT_GE, 2677 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2678 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2679 }, 2680 { 2681 .chip_id = BCM5397_DEVICE_ID, 2682 .dev_name = "BCM5397", 2683 .vlans = 4096, 2684 .enabled_ports = 0x11f, 2685 .arl_bins = 4, 2686 .arl_buckets = 1024, 2687 .imp_port = 8, 2688 .vta_regs = B53_VTA_REGS_9798, 2689 .duplex_reg = B53_DUPLEX_STAT_GE, 2690 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2691 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2692 }, 2693 { 2694 .chip_id = BCM5398_DEVICE_ID, 2695 .dev_name = "BCM5398", 2696 .vlans = 4096, 2697 .enabled_ports = 0x17f, 2698 .arl_bins = 4, 2699 .arl_buckets = 1024, 2700 .imp_port = 8, 2701 .vta_regs = B53_VTA_REGS_9798, 2702 .duplex_reg = B53_DUPLEX_STAT_GE, 2703 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2704 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2705 }, 2706 { 2707 .chip_id = BCM53101_DEVICE_ID, 2708 .dev_name = "BCM53101", 2709 .vlans = 4096, 2710 .enabled_ports = 0x11f, 2711 .arl_bins = 4, 2712 .arl_buckets = 512, 2713 .vta_regs = B53_VTA_REGS, 2714 .imp_port = 8, 2715 .duplex_reg = B53_DUPLEX_STAT_GE, 2716 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2717 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2718 }, 2719 { 2720 .chip_id = BCM53115_DEVICE_ID, 2721 .dev_name = "BCM53115", 2722 .vlans = 4096, 2723 .enabled_ports = 0x11f, 2724 .arl_bins = 4, 2725 .arl_buckets = 1024, 2726 .vta_regs = B53_VTA_REGS, 2727 .imp_port = 8, 2728 .duplex_reg = B53_DUPLEX_STAT_GE, 2729 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2730 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2731 }, 2732 { 2733 .chip_id = BCM53125_DEVICE_ID, 2734 .dev_name = "BCM53125", 2735 .vlans = 4096, 2736 .enabled_ports = 0x1ff, 2737 .arl_bins = 4, 2738 .arl_buckets = 1024, 2739 .imp_port = 8, 2740 .vta_regs = B53_VTA_REGS, 2741 .duplex_reg = B53_DUPLEX_STAT_GE, 2742 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2743 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2744 }, 2745 { 2746 .chip_id = BCM53128_DEVICE_ID, 2747 .dev_name = "BCM53128", 2748 .vlans = 4096, 2749 .enabled_ports = 0x1ff, 2750 .arl_bins = 4, 2751 .arl_buckets = 1024, 2752 .imp_port = 8, 2753 .vta_regs = B53_VTA_REGS, 2754 .duplex_reg = B53_DUPLEX_STAT_GE, 2755 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2756 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2757 }, 2758 { 2759 .chip_id = BCM63XX_DEVICE_ID, 2760 .dev_name = "BCM63xx", 2761 .vlans = 4096, 2762 .enabled_ports = 0, /* pdata must provide them */ 2763 .arl_bins = 4, 2764 .arl_buckets = 1024, 2765 .imp_port = 8, 2766 .vta_regs = B53_VTA_REGS_63XX, 2767 .duplex_reg = B53_DUPLEX_STAT_63XX, 2768 .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX, 2769 .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX, 2770 }, 2771 { 2772 .chip_id = BCM63268_DEVICE_ID, 2773 .dev_name = "BCM63268", 2774 .vlans = 4096, 2775 .enabled_ports = 0, /* pdata must provide them */ 2776 .arl_bins = 4, 2777 .arl_buckets = 1024, 2778 .imp_port = 8, 2779 .vta_regs = B53_VTA_REGS_63XX, 2780 .duplex_reg = B53_DUPLEX_STAT_63XX, 2781 .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX, 2782 .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX, 2783 }, 2784 { 2785 .chip_id = BCM53010_DEVICE_ID, 2786 .dev_name = "BCM53010", 2787 .vlans = 4096, 2788 .enabled_ports = 0x1bf, 2789 .arl_bins = 4, 2790 .arl_buckets = 1024, 2791 .imp_port = 8, 2792 .vta_regs = B53_VTA_REGS, 2793 .duplex_reg = B53_DUPLEX_STAT_GE, 2794 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2795 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2796 }, 2797 { 2798 .chip_id = BCM53011_DEVICE_ID, 2799 .dev_name = "BCM53011", 2800 .vlans = 4096, 2801 .enabled_ports = 0x1bf, 2802 .arl_bins = 4, 2803 .arl_buckets = 1024, 2804 .imp_port = 8, 2805 .vta_regs = B53_VTA_REGS, 2806 .duplex_reg = B53_DUPLEX_STAT_GE, 2807 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2808 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2809 }, 2810 { 2811 .chip_id = BCM53012_DEVICE_ID, 2812 .dev_name = "BCM53012", 2813 .vlans = 4096, 2814 .enabled_ports = 0x1bf, 2815 .arl_bins = 4, 2816 .arl_buckets = 1024, 2817 .imp_port = 8, 2818 .vta_regs = B53_VTA_REGS, 2819 .duplex_reg = B53_DUPLEX_STAT_GE, 2820 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2821 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2822 }, 2823 { 2824 .chip_id = BCM53018_DEVICE_ID, 2825 .dev_name = "BCM53018", 2826 .vlans = 4096, 2827 .enabled_ports = 0x1bf, 2828 .arl_bins = 4, 2829 .arl_buckets = 1024, 2830 .imp_port = 8, 2831 .vta_regs = B53_VTA_REGS, 2832 .duplex_reg = B53_DUPLEX_STAT_GE, 2833 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2834 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2835 }, 2836 { 2837 .chip_id = BCM53019_DEVICE_ID, 2838 .dev_name = "BCM53019", 2839 .vlans = 4096, 2840 .enabled_ports = 0x1bf, 2841 .arl_bins = 4, 2842 .arl_buckets = 1024, 2843 .imp_port = 8, 2844 .vta_regs = B53_VTA_REGS, 2845 .duplex_reg = B53_DUPLEX_STAT_GE, 2846 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2847 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2848 }, 2849 { 2850 .chip_id = BCM58XX_DEVICE_ID, 2851 .dev_name = "BCM585xx/586xx/88312", 2852 .vlans = 4096, 2853 .enabled_ports = 0x1ff, 2854 .arl_bins = 4, 2855 .arl_buckets = 1024, 2856 .imp_port = 8, 2857 .vta_regs = B53_VTA_REGS, 2858 .duplex_reg = B53_DUPLEX_STAT_GE, 2859 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2860 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2861 }, 2862 { 2863 .chip_id = BCM583XX_DEVICE_ID, 2864 .dev_name = "BCM583xx/11360", 2865 .vlans = 4096, 2866 .enabled_ports = 0x103, 2867 .arl_bins = 4, 2868 .arl_buckets = 1024, 2869 .imp_port = 8, 2870 .vta_regs = B53_VTA_REGS, 2871 .duplex_reg = B53_DUPLEX_STAT_GE, 2872 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2873 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2874 }, 2875 /* Starfighter 2 */ 2876 { 2877 .chip_id = BCM4908_DEVICE_ID, 2878 .dev_name = "BCM4908", 2879 .vlans = 4096, 2880 .enabled_ports = 0x1bf, 2881 .arl_bins = 4, 2882 .arl_buckets = 256, 2883 .imp_port = 8, 2884 .vta_regs = B53_VTA_REGS, 2885 .duplex_reg = B53_DUPLEX_STAT_GE, 2886 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2887 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2888 }, 2889 { 2890 .chip_id = BCM7445_DEVICE_ID, 2891 .dev_name = "BCM7445", 2892 .vlans = 4096, 2893 .enabled_ports = 0x1ff, 2894 .arl_bins = 4, 2895 .arl_buckets = 1024, 2896 .imp_port = 8, 2897 .vta_regs = B53_VTA_REGS, 2898 .duplex_reg = B53_DUPLEX_STAT_GE, 2899 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2900 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2901 }, 2902 { 2903 .chip_id = BCM7278_DEVICE_ID, 2904 .dev_name = "BCM7278", 2905 .vlans = 4096, 2906 .enabled_ports = 0x1ff, 2907 .arl_bins = 4, 2908 .arl_buckets = 256, 2909 .imp_port = 8, 2910 .vta_regs = B53_VTA_REGS, 2911 .duplex_reg = B53_DUPLEX_STAT_GE, 2912 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2913 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2914 }, 2915 { 2916 .chip_id = BCM53134_DEVICE_ID, 2917 .dev_name = "BCM53134", 2918 .vlans = 4096, 2919 .enabled_ports = 0x12f, 2920 .imp_port = 8, 2921 .cpu_port = B53_CPU_PORT, 2922 .vta_regs = B53_VTA_REGS, 2923 .arl_bins = 4, 2924 .arl_buckets = 1024, 2925 .duplex_reg = B53_DUPLEX_STAT_GE, 2926 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2927 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2928 }, 2929 }; 2930 2931 static int b53_switch_init(struct b53_device *dev) 2932 { 2933 unsigned int i; 2934 int ret; 2935 2936 for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) { 2937 const struct b53_chip_data *chip = &b53_switch_chips[i]; 2938 2939 if (chip->chip_id == dev->chip_id) { 2940 if (!dev->enabled_ports) 2941 dev->enabled_ports = chip->enabled_ports; 2942 dev->name = chip->dev_name; 2943 dev->duplex_reg = chip->duplex_reg; 2944 dev->vta_regs[0] = chip->vta_regs[0]; 2945 dev->vta_regs[1] = chip->vta_regs[1]; 2946 dev->vta_regs[2] = chip->vta_regs[2]; 2947 dev->jumbo_pm_reg = chip->jumbo_pm_reg; 2948 dev->imp_port = chip->imp_port; 2949 dev->num_vlans = chip->vlans; 2950 dev->num_arl_bins = chip->arl_bins; 2951 dev->num_arl_buckets = chip->arl_buckets; 2952 break; 2953 } 2954 } 2955 2956 /* check which BCM5325x version we have */ 2957 if (is5325(dev)) { 2958 u8 vc4; 2959 2960 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4); 2961 2962 /* check reserved bits */ 2963 switch (vc4 & 3) { 2964 case 1: 2965 /* BCM5325E */ 2966 break; 2967 case 3: 2968 /* BCM5325F - do not use port 4 */ 2969 dev->enabled_ports &= ~BIT(4); 2970 break; 2971 default: 2972 /* On the BCM47XX SoCs this is the supported internal switch.*/ 2973 #ifndef CONFIG_BCM47XX 2974 /* BCM5325M */ 2975 return -EINVAL; 2976 #else 2977 break; 2978 #endif 2979 } 2980 } 2981 2982 if (is5325e(dev)) 2983 dev->num_arl_buckets = 512; 2984 2985 dev->num_ports = fls(dev->enabled_ports); 2986 2987 dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS); 2988 2989 /* Include non standard CPU port built-in PHYs to be probed */ 2990 if (is539x(dev) || is531x5(dev)) { 2991 for (i = 0; i < dev->num_ports; i++) { 2992 if (!(dev->ds->phys_mii_mask & BIT(i)) && 2993 !b53_possible_cpu_port(dev->ds, i)) 2994 dev->ds->phys_mii_mask |= BIT(i); 2995 } 2996 } 2997 2998 dev->ports = devm_kcalloc(dev->dev, 2999 dev->num_ports, sizeof(struct b53_port), 3000 GFP_KERNEL); 3001 if (!dev->ports) 3002 return -ENOMEM; 3003 3004 dev->vlans = devm_kcalloc(dev->dev, 3005 dev->num_vlans, sizeof(struct b53_vlan), 3006 GFP_KERNEL); 3007 if (!dev->vlans) 3008 return -ENOMEM; 3009 3010 dev->reset_gpio = b53_switch_get_reset_gpio(dev); 3011 if (dev->reset_gpio >= 0) { 3012 ret = devm_gpio_request_one(dev->dev, dev->reset_gpio, 3013 GPIOF_OUT_INIT_HIGH, "robo_reset"); 3014 if (ret) 3015 return ret; 3016 } 3017 3018 return 0; 3019 } 3020 3021 struct b53_device *b53_switch_alloc(struct device *base, 3022 const struct b53_io_ops *ops, 3023 void *priv) 3024 { 3025 struct dsa_switch *ds; 3026 struct b53_device *dev; 3027 3028 ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL); 3029 if (!ds) 3030 return NULL; 3031 3032 ds->dev = base; 3033 3034 dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL); 3035 if (!dev) 3036 return NULL; 3037 3038 ds->priv = dev; 3039 dev->dev = base; 3040 3041 dev->ds = ds; 3042 dev->priv = priv; 3043 dev->ops = ops; 3044 ds->ops = &b53_switch_ops; 3045 ds->phylink_mac_ops = &b53_phylink_mac_ops; 3046 dev->vlan_enabled = true; 3047 dev->vlan_filtering = false; 3048 /* Let DSA handle the case were multiple bridges span the same switch 3049 * device and different VLAN awareness settings are requested, which 3050 * would be breaking filtering semantics for any of the other bridge 3051 * devices. (not hardware supported) 3052 */ 3053 ds->vlan_filtering_is_global = true; 3054 3055 mutex_init(&dev->reg_mutex); 3056 mutex_init(&dev->stats_mutex); 3057 mutex_init(&dev->arl_mutex); 3058 3059 return dev; 3060 } 3061 EXPORT_SYMBOL(b53_switch_alloc); 3062 3063 int b53_switch_detect(struct b53_device *dev) 3064 { 3065 u32 id32; 3066 u16 tmp; 3067 u8 id8; 3068 int ret; 3069 3070 ret = b53_read8(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id8); 3071 if (ret) 3072 return ret; 3073 3074 switch (id8) { 3075 case 0: 3076 /* BCM5325 and BCM5365 do not have this register so reads 3077 * return 0. But the read operation did succeed, so assume this 3078 * is one of them. 3079 * 3080 * Next check if we can write to the 5325's VTA register; for 3081 * 5365 it is read only. 3082 */ 3083 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf); 3084 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp); 3085 3086 if (tmp == 0xf) { 3087 u32 phy_id; 3088 int val; 3089 3090 dev->chip_id = BCM5325_DEVICE_ID; 3091 3092 val = b53_phy_read16(dev->ds, 0, MII_PHYSID1); 3093 phy_id = (val & 0xffff) << 16; 3094 val = b53_phy_read16(dev->ds, 0, MII_PHYSID2); 3095 phy_id |= (val & 0xfff0); 3096 3097 if (phy_id == 0x00406330) 3098 dev->variant_id = B53_VARIANT_5325M; 3099 else if (phy_id == 0x0143bc30) 3100 dev->variant_id = B53_VARIANT_5325E; 3101 } else { 3102 dev->chip_id = BCM5365_DEVICE_ID; 3103 } 3104 break; 3105 case BCM5389_DEVICE_ID: 3106 case BCM5395_DEVICE_ID: 3107 case BCM5397_DEVICE_ID: 3108 case BCM5398_DEVICE_ID: 3109 dev->chip_id = id8; 3110 break; 3111 default: 3112 ret = b53_read32(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id32); 3113 if (ret) 3114 return ret; 3115 3116 switch (id32) { 3117 case BCM53101_DEVICE_ID: 3118 case BCM53115_DEVICE_ID: 3119 case BCM53125_DEVICE_ID: 3120 case BCM53128_DEVICE_ID: 3121 case BCM53010_DEVICE_ID: 3122 case BCM53011_DEVICE_ID: 3123 case BCM53012_DEVICE_ID: 3124 case BCM53018_DEVICE_ID: 3125 case BCM53019_DEVICE_ID: 3126 case BCM53134_DEVICE_ID: 3127 dev->chip_id = id32; 3128 break; 3129 default: 3130 dev_err(dev->dev, 3131 "unsupported switch detected (BCM53%02x/BCM%x)\n", 3132 id8, id32); 3133 return -ENODEV; 3134 } 3135 } 3136 3137 if (dev->chip_id == BCM5325_DEVICE_ID) 3138 return b53_read8(dev, B53_STAT_PAGE, B53_REV_ID_25, 3139 &dev->core_rev); 3140 else 3141 return b53_read8(dev, B53_MGMT_PAGE, B53_REV_ID, 3142 &dev->core_rev); 3143 } 3144 EXPORT_SYMBOL(b53_switch_detect); 3145 3146 int b53_switch_register(struct b53_device *dev) 3147 { 3148 int ret; 3149 3150 if (dev->pdata) { 3151 dev->chip_id = dev->pdata->chip_id; 3152 dev->enabled_ports = dev->pdata->enabled_ports; 3153 } 3154 3155 if (!dev->chip_id && b53_switch_detect(dev)) 3156 return -EINVAL; 3157 3158 ret = b53_switch_init(dev); 3159 if (ret) 3160 return ret; 3161 3162 dev_info(dev->dev, "found switch: %s, rev %i\n", 3163 dev->name, dev->core_rev); 3164 3165 return dsa_register_switch(dev->ds); 3166 } 3167 EXPORT_SYMBOL(b53_switch_register); 3168 3169 MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>"); 3170 MODULE_DESCRIPTION("B53 switch library"); 3171 MODULE_LICENSE("Dual BSD/GPL"); 3172