1 /* 2 * B53 switch driver main logic 3 * 4 * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org> 5 * Copyright (C) 2016 Florian Fainelli <f.fainelli@gmail.com> 6 * 7 * Permission to use, copy, modify, and/or distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <linux/delay.h> 21 #include <linux/export.h> 22 #include <linux/gpio.h> 23 #include <linux/kernel.h> 24 #include <linux/math.h> 25 #include <linux/minmax.h> 26 #include <linux/module.h> 27 #include <linux/platform_data/b53.h> 28 #include <linux/phy.h> 29 #include <linux/phylink.h> 30 #include <linux/etherdevice.h> 31 #include <linux/if_bridge.h> 32 #include <linux/if_vlan.h> 33 #include <net/dsa.h> 34 35 #include "b53_regs.h" 36 #include "b53_priv.h" 37 38 struct b53_mib_desc { 39 u8 size; 40 u8 offset; 41 const char *name; 42 }; 43 44 /* BCM5365 MIB counters */ 45 static const struct b53_mib_desc b53_mibs_65[] = { 46 { 8, 0x00, "TxOctets" }, 47 { 4, 0x08, "TxDropPkts" }, 48 { 4, 0x10, "TxBroadcastPkts" }, 49 { 4, 0x14, "TxMulticastPkts" }, 50 { 4, 0x18, "TxUnicastPkts" }, 51 { 4, 0x1c, "TxCollisions" }, 52 { 4, 0x20, "TxSingleCollision" }, 53 { 4, 0x24, "TxMultipleCollision" }, 54 { 4, 0x28, "TxDeferredTransmit" }, 55 { 4, 0x2c, "TxLateCollision" }, 56 { 4, 0x30, "TxExcessiveCollision" }, 57 { 4, 0x38, "TxPausePkts" }, 58 { 8, 0x44, "RxOctets" }, 59 { 4, 0x4c, "RxUndersizePkts" }, 60 { 4, 0x50, "RxPausePkts" }, 61 { 4, 0x54, "Pkts64Octets" }, 62 { 4, 0x58, "Pkts65to127Octets" }, 63 { 4, 0x5c, "Pkts128to255Octets" }, 64 { 4, 0x60, "Pkts256to511Octets" }, 65 { 4, 0x64, "Pkts512to1023Octets" }, 66 { 4, 0x68, "Pkts1024to1522Octets" }, 67 { 4, 0x6c, "RxOversizePkts" }, 68 { 4, 0x70, "RxJabbers" }, 69 { 4, 0x74, "RxAlignmentErrors" }, 70 { 4, 0x78, "RxFCSErrors" }, 71 { 8, 0x7c, "RxGoodOctets" }, 72 { 4, 0x84, "RxDropPkts" }, 73 { 4, 0x88, "RxUnicastPkts" }, 74 { 4, 0x8c, "RxMulticastPkts" }, 75 { 4, 0x90, "RxBroadcastPkts" }, 76 { 4, 0x94, "RxSAChanges" }, 77 { 4, 0x98, "RxFragments" }, 78 }; 79 80 #define B53_MIBS_65_SIZE ARRAY_SIZE(b53_mibs_65) 81 82 /* BCM63xx MIB counters */ 83 static const struct b53_mib_desc b53_mibs_63xx[] = { 84 { 8, 0x00, "TxOctets" }, 85 { 4, 0x08, "TxDropPkts" }, 86 { 4, 0x0c, "TxQoSPkts" }, 87 { 4, 0x10, "TxBroadcastPkts" }, 88 { 4, 0x14, "TxMulticastPkts" }, 89 { 4, 0x18, "TxUnicastPkts" }, 90 { 4, 0x1c, "TxCollisions" }, 91 { 4, 0x20, "TxSingleCollision" }, 92 { 4, 0x24, "TxMultipleCollision" }, 93 { 4, 0x28, "TxDeferredTransmit" }, 94 { 4, 0x2c, "TxLateCollision" }, 95 { 4, 0x30, "TxExcessiveCollision" }, 96 { 4, 0x38, "TxPausePkts" }, 97 { 8, 0x3c, "TxQoSOctets" }, 98 { 8, 0x44, "RxOctets" }, 99 { 4, 0x4c, "RxUndersizePkts" }, 100 { 4, 0x50, "RxPausePkts" }, 101 { 4, 0x54, "Pkts64Octets" }, 102 { 4, 0x58, "Pkts65to127Octets" }, 103 { 4, 0x5c, "Pkts128to255Octets" }, 104 { 4, 0x60, "Pkts256to511Octets" }, 105 { 4, 0x64, "Pkts512to1023Octets" }, 106 { 4, 0x68, "Pkts1024to1522Octets" }, 107 { 4, 0x6c, "RxOversizePkts" }, 108 { 4, 0x70, "RxJabbers" }, 109 { 4, 0x74, "RxAlignmentErrors" }, 110 { 4, 0x78, "RxFCSErrors" }, 111 { 8, 0x7c, "RxGoodOctets" }, 112 { 4, 0x84, "RxDropPkts" }, 113 { 4, 0x88, "RxUnicastPkts" }, 114 { 4, 0x8c, "RxMulticastPkts" }, 115 { 4, 0x90, "RxBroadcastPkts" }, 116 { 4, 0x94, "RxSAChanges" }, 117 { 4, 0x98, "RxFragments" }, 118 { 4, 0xa0, "RxSymbolErrors" }, 119 { 4, 0xa4, "RxQoSPkts" }, 120 { 8, 0xa8, "RxQoSOctets" }, 121 { 4, 0xb0, "Pkts1523to2047Octets" }, 122 { 4, 0xb4, "Pkts2048to4095Octets" }, 123 { 4, 0xb8, "Pkts4096to8191Octets" }, 124 { 4, 0xbc, "Pkts8192to9728Octets" }, 125 { 4, 0xc0, "RxDiscarded" }, 126 }; 127 128 #define B53_MIBS_63XX_SIZE ARRAY_SIZE(b53_mibs_63xx) 129 130 /* MIB counters */ 131 static const struct b53_mib_desc b53_mibs[] = { 132 { 8, 0x00, "TxOctets" }, 133 { 4, 0x08, "TxDropPkts" }, 134 { 4, 0x10, "TxBroadcastPkts" }, 135 { 4, 0x14, "TxMulticastPkts" }, 136 { 4, 0x18, "TxUnicastPkts" }, 137 { 4, 0x1c, "TxCollisions" }, 138 { 4, 0x20, "TxSingleCollision" }, 139 { 4, 0x24, "TxMultipleCollision" }, 140 { 4, 0x28, "TxDeferredTransmit" }, 141 { 4, 0x2c, "TxLateCollision" }, 142 { 4, 0x30, "TxExcessiveCollision" }, 143 { 4, 0x38, "TxPausePkts" }, 144 { 8, 0x50, "RxOctets" }, 145 { 4, 0x58, "RxUndersizePkts" }, 146 { 4, 0x5c, "RxPausePkts" }, 147 { 4, 0x60, "Pkts64Octets" }, 148 { 4, 0x64, "Pkts65to127Octets" }, 149 { 4, 0x68, "Pkts128to255Octets" }, 150 { 4, 0x6c, "Pkts256to511Octets" }, 151 { 4, 0x70, "Pkts512to1023Octets" }, 152 { 4, 0x74, "Pkts1024to1522Octets" }, 153 { 4, 0x78, "RxOversizePkts" }, 154 { 4, 0x7c, "RxJabbers" }, 155 { 4, 0x80, "RxAlignmentErrors" }, 156 { 4, 0x84, "RxFCSErrors" }, 157 { 8, 0x88, "RxGoodOctets" }, 158 { 4, 0x90, "RxDropPkts" }, 159 { 4, 0x94, "RxUnicastPkts" }, 160 { 4, 0x98, "RxMulticastPkts" }, 161 { 4, 0x9c, "RxBroadcastPkts" }, 162 { 4, 0xa0, "RxSAChanges" }, 163 { 4, 0xa4, "RxFragments" }, 164 { 4, 0xa8, "RxJumboPkts" }, 165 { 4, 0xac, "RxSymbolErrors" }, 166 { 4, 0xc0, "RxDiscarded" }, 167 }; 168 169 #define B53_MIBS_SIZE ARRAY_SIZE(b53_mibs) 170 171 static const struct b53_mib_desc b53_mibs_58xx[] = { 172 { 8, 0x00, "TxOctets" }, 173 { 4, 0x08, "TxDropPkts" }, 174 { 4, 0x0c, "TxQPKTQ0" }, 175 { 4, 0x10, "TxBroadcastPkts" }, 176 { 4, 0x14, "TxMulticastPkts" }, 177 { 4, 0x18, "TxUnicastPKts" }, 178 { 4, 0x1c, "TxCollisions" }, 179 { 4, 0x20, "TxSingleCollision" }, 180 { 4, 0x24, "TxMultipleCollision" }, 181 { 4, 0x28, "TxDeferredCollision" }, 182 { 4, 0x2c, "TxLateCollision" }, 183 { 4, 0x30, "TxExcessiveCollision" }, 184 { 4, 0x34, "TxFrameInDisc" }, 185 { 4, 0x38, "TxPausePkts" }, 186 { 4, 0x3c, "TxQPKTQ1" }, 187 { 4, 0x40, "TxQPKTQ2" }, 188 { 4, 0x44, "TxQPKTQ3" }, 189 { 4, 0x48, "TxQPKTQ4" }, 190 { 4, 0x4c, "TxQPKTQ5" }, 191 { 8, 0x50, "RxOctets" }, 192 { 4, 0x58, "RxUndersizePkts" }, 193 { 4, 0x5c, "RxPausePkts" }, 194 { 4, 0x60, "RxPkts64Octets" }, 195 { 4, 0x64, "RxPkts65to127Octets" }, 196 { 4, 0x68, "RxPkts128to255Octets" }, 197 { 4, 0x6c, "RxPkts256to511Octets" }, 198 { 4, 0x70, "RxPkts512to1023Octets" }, 199 { 4, 0x74, "RxPkts1024toMaxPktsOctets" }, 200 { 4, 0x78, "RxOversizePkts" }, 201 { 4, 0x7c, "RxJabbers" }, 202 { 4, 0x80, "RxAlignmentErrors" }, 203 { 4, 0x84, "RxFCSErrors" }, 204 { 8, 0x88, "RxGoodOctets" }, 205 { 4, 0x90, "RxDropPkts" }, 206 { 4, 0x94, "RxUnicastPkts" }, 207 { 4, 0x98, "RxMulticastPkts" }, 208 { 4, 0x9c, "RxBroadcastPkts" }, 209 { 4, 0xa0, "RxSAChanges" }, 210 { 4, 0xa4, "RxFragments" }, 211 { 4, 0xa8, "RxJumboPkt" }, 212 { 4, 0xac, "RxSymblErr" }, 213 { 4, 0xb0, "InRangeErrCount" }, 214 { 4, 0xb4, "OutRangeErrCount" }, 215 { 4, 0xb8, "EEELpiEvent" }, 216 { 4, 0xbc, "EEELpiDuration" }, 217 { 4, 0xc0, "RxDiscard" }, 218 { 4, 0xc8, "TxQPKTQ6" }, 219 { 4, 0xcc, "TxQPKTQ7" }, 220 { 4, 0xd0, "TxPkts64Octets" }, 221 { 4, 0xd4, "TxPkts65to127Octets" }, 222 { 4, 0xd8, "TxPkts128to255Octets" }, 223 { 4, 0xdc, "TxPkts256to511Ocets" }, 224 { 4, 0xe0, "TxPkts512to1023Ocets" }, 225 { 4, 0xe4, "TxPkts1024toMaxPktOcets" }, 226 }; 227 228 #define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx) 229 230 #define B53_MAX_MTU_25 (1536 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) 231 #define B53_MAX_MTU (9720 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) 232 233 static int b53_do_vlan_op(struct b53_device *dev, u8 op) 234 { 235 unsigned int i; 236 237 b53_write8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], VTA_START_CMD | op); 238 239 for (i = 0; i < 10; i++) { 240 u8 vta; 241 242 b53_read8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], &vta); 243 if (!(vta & VTA_START_CMD)) 244 return 0; 245 246 usleep_range(100, 200); 247 } 248 249 return -EIO; 250 } 251 252 static void b53_set_vlan_entry(struct b53_device *dev, u16 vid, 253 struct b53_vlan *vlan) 254 { 255 if (is5325(dev)) { 256 u32 entry = 0; 257 258 if (vlan->members) { 259 entry = ((vlan->untag & VA_UNTAG_MASK_25) << 260 VA_UNTAG_S_25) | vlan->members; 261 if (dev->core_rev >= 3) 262 entry |= VA_VALID_25_R4 | vid << VA_VID_HIGH_S; 263 else 264 entry |= VA_VALID_25; 265 } 266 267 b53_write32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, entry); 268 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid | 269 VTA_RW_STATE_WR | VTA_RW_OP_EN); 270 } else if (is5365(dev)) { 271 u16 entry = 0; 272 273 if (vlan->members) 274 entry = ((vlan->untag & VA_UNTAG_MASK_65) << 275 VA_UNTAG_S_65) | vlan->members | VA_VALID_65; 276 277 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, entry); 278 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid | 279 VTA_RW_STATE_WR | VTA_RW_OP_EN); 280 } else { 281 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid); 282 b53_write32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], 283 (vlan->untag << VTE_UNTAG_S) | vlan->members); 284 285 b53_do_vlan_op(dev, VTA_CMD_WRITE); 286 } 287 288 dev_dbg(dev->ds->dev, "VID: %d, members: 0x%04x, untag: 0x%04x\n", 289 vid, vlan->members, vlan->untag); 290 } 291 292 static void b53_get_vlan_entry(struct b53_device *dev, u16 vid, 293 struct b53_vlan *vlan) 294 { 295 if (is5325(dev)) { 296 u32 entry = 0; 297 298 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid | 299 VTA_RW_STATE_RD | VTA_RW_OP_EN); 300 b53_read32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, &entry); 301 302 if (dev->core_rev >= 3) 303 vlan->valid = !!(entry & VA_VALID_25_R4); 304 else 305 vlan->valid = !!(entry & VA_VALID_25); 306 vlan->members = entry & VA_MEMBER_MASK; 307 vlan->untag = (entry >> VA_UNTAG_S_25) & VA_UNTAG_MASK_25; 308 309 } else if (is5365(dev)) { 310 u16 entry = 0; 311 312 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid | 313 VTA_RW_STATE_WR | VTA_RW_OP_EN); 314 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, &entry); 315 316 vlan->valid = !!(entry & VA_VALID_65); 317 vlan->members = entry & VA_MEMBER_MASK; 318 vlan->untag = (entry >> VA_UNTAG_S_65) & VA_UNTAG_MASK_65; 319 } else { 320 u32 entry = 0; 321 322 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid); 323 b53_do_vlan_op(dev, VTA_CMD_READ); 324 b53_read32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], &entry); 325 vlan->members = entry & VTE_MEMBERS; 326 vlan->untag = (entry >> VTE_UNTAG_S) & VTE_MEMBERS; 327 vlan->valid = true; 328 } 329 } 330 331 static void b53_set_eap_mode(struct b53_device *dev, int port, int mode) 332 { 333 u64 eap_conf; 334 335 if (is5325(dev) || is5365(dev) || dev->chip_id == BCM5389_DEVICE_ID) 336 return; 337 338 b53_read64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), &eap_conf); 339 340 if (is63xx(dev)) { 341 eap_conf &= ~EAP_MODE_MASK_63XX; 342 eap_conf |= (u64)mode << EAP_MODE_SHIFT_63XX; 343 } else { 344 eap_conf &= ~EAP_MODE_MASK; 345 eap_conf |= (u64)mode << EAP_MODE_SHIFT; 346 } 347 348 b53_write64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), eap_conf); 349 } 350 351 static void b53_set_forwarding(struct b53_device *dev, int enable) 352 { 353 u8 mgmt; 354 355 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 356 357 if (enable) 358 mgmt |= SM_SW_FWD_EN; 359 else 360 mgmt &= ~SM_SW_FWD_EN; 361 362 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 363 364 if (!is5325(dev)) { 365 /* Include IMP port in dumb forwarding mode */ 366 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt); 367 mgmt |= B53_MII_DUMB_FWDG_EN; 368 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); 369 370 /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether 371 * frames should be flooded or not. 372 */ 373 b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); 374 mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IP_MC; 375 b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); 376 } else { 377 b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); 378 mgmt |= B53_IP_MC; 379 b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); 380 } 381 } 382 383 static void b53_enable_vlan(struct b53_device *dev, int port, bool enable, 384 bool enable_filtering) 385 { 386 u8 mgmt, vc0, vc1, vc4 = 0, vc5; 387 388 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 389 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, &vc0); 390 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, &vc1); 391 392 if (is5325(dev) || is5365(dev)) { 393 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4); 394 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, &vc5); 395 } else if (is63xx(dev)) { 396 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, &vc4); 397 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, &vc5); 398 } else { 399 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, &vc4); 400 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5); 401 } 402 403 vc1 &= ~VC1_RX_MCST_FWD_EN; 404 405 if (enable) { 406 vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; 407 vc1 |= VC1_RX_MCST_UNTAG_EN; 408 vc4 &= ~VC4_ING_VID_CHECK_MASK; 409 if (enable_filtering) { 410 vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; 411 vc5 |= VC5_DROP_VTABLE_MISS; 412 } else { 413 vc4 |= VC4_NO_ING_VID_CHK << VC4_ING_VID_CHECK_S; 414 vc5 &= ~VC5_DROP_VTABLE_MISS; 415 } 416 417 if (is5325(dev)) 418 vc0 &= ~VC0_RESERVED_1; 419 420 if (is5325(dev) || is5365(dev)) 421 vc1 |= VC1_RX_MCST_TAG_EN; 422 423 } else { 424 vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID); 425 vc1 &= ~VC1_RX_MCST_UNTAG_EN; 426 vc4 &= ~VC4_ING_VID_CHECK_MASK; 427 vc5 &= ~VC5_DROP_VTABLE_MISS; 428 429 if (is5325(dev) || is5365(dev)) 430 vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S; 431 else 432 vc4 |= VC4_ING_VID_VIO_TO_IMP << VC4_ING_VID_CHECK_S; 433 434 if (is5325(dev) || is5365(dev)) 435 vc1 &= ~VC1_RX_MCST_TAG_EN; 436 } 437 438 if (!is5325(dev) && !is5365(dev)) 439 vc5 &= ~VC5_VID_FFF_EN; 440 441 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, vc0); 442 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, vc1); 443 444 if (is5325(dev) || is5365(dev)) { 445 /* enable the high 8 bit vid check on 5325 */ 446 if (is5325(dev) && enable) 447 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 448 VC3_HIGH_8BIT_EN); 449 else 450 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0); 451 452 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, vc4); 453 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, vc5); 454 } else if (is63xx(dev)) { 455 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3_63XX, 0); 456 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, vc4); 457 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, vc5); 458 } else { 459 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0); 460 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, vc4); 461 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, vc5); 462 } 463 464 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 465 466 dev->vlan_enabled = enable; 467 468 dev_dbg(dev->dev, "Port %d VLAN enabled: %d, filtering: %d\n", 469 port, enable, enable_filtering); 470 } 471 472 static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) 473 { 474 u32 port_mask = 0; 475 u16 max_size = JMS_MIN_SIZE; 476 477 if (is5325(dev) || is5365(dev)) 478 return -EINVAL; 479 480 if (enable) { 481 port_mask = dev->enabled_ports; 482 max_size = JMS_MAX_SIZE; 483 if (allow_10_100) 484 port_mask |= JPM_10_100_JUMBO_EN; 485 } 486 487 b53_write32(dev, B53_JUMBO_PAGE, dev->jumbo_pm_reg, port_mask); 488 return b53_write16(dev, B53_JUMBO_PAGE, dev->jumbo_size_reg, max_size); 489 } 490 491 static int b53_flush_arl(struct b53_device *dev, u8 mask) 492 { 493 unsigned int i; 494 495 if (is5325(dev)) 496 return 0; 497 498 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, 499 FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask); 500 501 for (i = 0; i < 10; i++) { 502 u8 fast_age_ctrl; 503 504 b53_read8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, 505 &fast_age_ctrl); 506 507 if (!(fast_age_ctrl & FAST_AGE_DONE)) 508 goto out; 509 510 msleep(1); 511 } 512 513 return -ETIMEDOUT; 514 out: 515 /* Only age dynamic entries (default behavior) */ 516 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, FAST_AGE_DYNAMIC); 517 return 0; 518 } 519 520 static int b53_fast_age_port(struct b53_device *dev, int port) 521 { 522 if (is5325(dev)) 523 return 0; 524 525 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port); 526 527 return b53_flush_arl(dev, FAST_AGE_PORT); 528 } 529 530 static int b53_fast_age_vlan(struct b53_device *dev, u16 vid) 531 { 532 if (is5325(dev)) 533 return 0; 534 535 b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid); 536 537 return b53_flush_arl(dev, FAST_AGE_VLAN); 538 } 539 540 void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) 541 { 542 struct b53_device *dev = ds->priv; 543 unsigned int i; 544 u16 pvlan; 545 546 /* BCM5325 CPU port is at 8 */ 547 if ((is5325(dev) || is5365(dev)) && cpu_port == B53_CPU_PORT_25) 548 cpu_port = B53_CPU_PORT; 549 550 /* Enable the IMP port to be in the same VLAN as the other ports 551 * on a per-port basis such that we only have Port i and IMP in 552 * the same VLAN. 553 */ 554 b53_for_each_port(dev, i) { 555 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &pvlan); 556 pvlan |= BIT(cpu_port); 557 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan); 558 } 559 } 560 EXPORT_SYMBOL(b53_imp_vlan_setup); 561 562 static void b53_port_set_ucast_flood(struct b53_device *dev, int port, 563 bool unicast) 564 { 565 u16 uc; 566 567 if (is5325(dev)) { 568 if (port == B53_CPU_PORT_25) 569 port = B53_CPU_PORT; 570 571 b53_read16(dev, B53_IEEE_PAGE, B53_IEEE_UCAST_DLF, &uc); 572 if (unicast) 573 uc |= BIT(port) | B53_IEEE_UCAST_DROP_EN; 574 else 575 uc &= ~BIT(port); 576 b53_write16(dev, B53_IEEE_PAGE, B53_IEEE_UCAST_DLF, uc); 577 } else { 578 b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc); 579 if (unicast) 580 uc |= BIT(port); 581 else 582 uc &= ~BIT(port); 583 b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc); 584 } 585 } 586 587 static void b53_port_set_mcast_flood(struct b53_device *dev, int port, 588 bool multicast) 589 { 590 u16 mc; 591 592 if (is5325(dev)) { 593 if (port == B53_CPU_PORT_25) 594 port = B53_CPU_PORT; 595 596 b53_read16(dev, B53_IEEE_PAGE, B53_IEEE_MCAST_DLF, &mc); 597 if (multicast) 598 mc |= BIT(port) | B53_IEEE_MCAST_DROP_EN; 599 else 600 mc &= ~BIT(port); 601 b53_write16(dev, B53_IEEE_PAGE, B53_IEEE_MCAST_DLF, mc); 602 } else { 603 b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc); 604 if (multicast) 605 mc |= BIT(port); 606 else 607 mc &= ~BIT(port); 608 b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc); 609 610 b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc); 611 if (multicast) 612 mc |= BIT(port); 613 else 614 mc &= ~BIT(port); 615 b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc); 616 } 617 } 618 619 static void b53_port_set_learning(struct b53_device *dev, int port, 620 bool learning) 621 { 622 u16 reg; 623 624 if (is5325(dev)) 625 return; 626 627 b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, ®); 628 if (learning) 629 reg &= ~BIT(port); 630 else 631 reg |= BIT(port); 632 b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg); 633 } 634 635 static void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable) 636 { 637 struct b53_device *dev = ds->priv; 638 u16 reg; 639 640 b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, ®); 641 if (enable) 642 reg |= BIT(port); 643 else 644 reg &= ~BIT(port); 645 b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg); 646 } 647 648 int b53_setup_port(struct dsa_switch *ds, int port) 649 { 650 struct b53_device *dev = ds->priv; 651 652 b53_port_set_ucast_flood(dev, port, true); 653 b53_port_set_mcast_flood(dev, port, true); 654 b53_port_set_learning(dev, port, false); 655 656 /* Force all traffic to go to the CPU port to prevent the ASIC from 657 * trying to forward to bridged ports on matching FDB entries, then 658 * dropping frames because it isn't allowed to forward there. 659 */ 660 if (dsa_is_user_port(ds, port)) 661 b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED); 662 663 if (is5325(dev) && 664 in_range(port, 1, 4)) { 665 u8 reg; 666 667 b53_read8(dev, B53_CTRL_PAGE, B53_PD_MODE_CTRL_25, ®); 668 reg &= ~PD_MODE_POWER_DOWN_PORT(0); 669 if (dsa_is_unused_port(ds, port)) 670 reg |= PD_MODE_POWER_DOWN_PORT(port); 671 else 672 reg &= ~PD_MODE_POWER_DOWN_PORT(port); 673 b53_write8(dev, B53_CTRL_PAGE, B53_PD_MODE_CTRL_25, reg); 674 } 675 676 return 0; 677 } 678 EXPORT_SYMBOL(b53_setup_port); 679 680 int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) 681 { 682 struct b53_device *dev = ds->priv; 683 unsigned int cpu_port; 684 int ret = 0; 685 u16 pvlan; 686 687 if (!dsa_is_user_port(ds, port)) 688 return 0; 689 690 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 691 692 if (dev->ops->phy_enable) 693 dev->ops->phy_enable(dev, port); 694 695 if (dev->ops->irq_enable) 696 ret = dev->ops->irq_enable(dev, port); 697 if (ret) 698 return ret; 699 700 /* Clear the Rx and Tx disable bits and set to no spanning tree */ 701 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0); 702 703 /* Set this port, and only this one to be in the default VLAN, 704 * if member of a bridge, restore its membership prior to 705 * bringing down this port. 706 */ 707 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 708 pvlan &= ~0x1ff; 709 pvlan |= BIT(port); 710 pvlan |= dev->ports[port].vlan_ctl_mask; 711 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 712 713 b53_imp_vlan_setup(ds, cpu_port); 714 715 /* If EEE was enabled, restore it */ 716 if (dev->ports[port].eee.eee_enabled) 717 b53_eee_enable_set(ds, port, true); 718 719 return 0; 720 } 721 EXPORT_SYMBOL(b53_enable_port); 722 723 void b53_disable_port(struct dsa_switch *ds, int port) 724 { 725 struct b53_device *dev = ds->priv; 726 u8 reg; 727 728 /* Disable Tx/Rx for the port */ 729 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); 730 reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE; 731 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); 732 733 if (dev->ops->phy_disable) 734 dev->ops->phy_disable(dev, port); 735 736 if (dev->ops->irq_disable) 737 dev->ops->irq_disable(dev, port); 738 } 739 EXPORT_SYMBOL(b53_disable_port); 740 741 void b53_brcm_hdr_setup(struct dsa_switch *ds, int port) 742 { 743 struct b53_device *dev = ds->priv; 744 bool tag_en = !(dev->tag_protocol == DSA_TAG_PROTO_NONE); 745 u8 hdr_ctl, val; 746 u16 reg; 747 748 /* Resolve which bit controls the Broadcom tag */ 749 switch (port) { 750 case 8: 751 val = BRCM_HDR_P8_EN; 752 break; 753 case 7: 754 val = BRCM_HDR_P7_EN; 755 break; 756 case 5: 757 val = BRCM_HDR_P5_EN; 758 break; 759 default: 760 val = 0; 761 break; 762 } 763 764 /* Enable management mode if tagging is requested */ 765 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &hdr_ctl); 766 if (tag_en) 767 hdr_ctl |= SM_SW_FWD_MODE; 768 else 769 hdr_ctl &= ~SM_SW_FWD_MODE; 770 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, hdr_ctl); 771 772 /* Configure the appropriate IMP port */ 773 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &hdr_ctl); 774 if (port == 8) 775 hdr_ctl |= GC_FRM_MGMT_PORT_MII; 776 else if (port == 5) 777 hdr_ctl |= GC_FRM_MGMT_PORT_M; 778 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, hdr_ctl); 779 780 /* B53_BRCM_HDR not present on devices with legacy tags */ 781 if (dev->tag_protocol == DSA_TAG_PROTO_BRCM_LEGACY || 782 dev->tag_protocol == DSA_TAG_PROTO_BRCM_LEGACY_FCS) 783 return; 784 785 /* Enable Broadcom tags for IMP port */ 786 b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl); 787 if (tag_en) 788 hdr_ctl |= val; 789 else 790 hdr_ctl &= ~val; 791 b53_write8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, hdr_ctl); 792 793 /* Registers below are only accessible on newer devices */ 794 if (!is58xx(dev)) 795 return; 796 797 /* Enable reception Broadcom tag for CPU TX (switch RX) to 798 * allow us to tag outgoing frames 799 */ 800 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, ®); 801 if (tag_en) 802 reg &= ~BIT(port); 803 else 804 reg |= BIT(port); 805 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, reg); 806 807 /* Enable transmission of Broadcom tags from the switch (CPU RX) to 808 * allow delivering frames to the per-port net_devices 809 */ 810 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, ®); 811 if (tag_en) 812 reg &= ~BIT(port); 813 else 814 reg |= BIT(port); 815 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, reg); 816 } 817 EXPORT_SYMBOL(b53_brcm_hdr_setup); 818 819 static void b53_enable_cpu_port(struct b53_device *dev, int port) 820 { 821 u8 port_ctrl; 822 823 /* BCM5325 CPU port is at 8 */ 824 if ((is5325(dev) || is5365(dev)) && port == B53_CPU_PORT_25) 825 port = B53_CPU_PORT; 826 827 port_ctrl = PORT_CTRL_RX_BCST_EN | 828 PORT_CTRL_RX_MCST_EN | 829 PORT_CTRL_RX_UCST_EN; 830 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl); 831 832 b53_brcm_hdr_setup(dev->ds, port); 833 } 834 835 static void b53_enable_mib(struct b53_device *dev) 836 { 837 u8 gc; 838 839 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); 840 gc &= ~(GC_RESET_MIB | GC_MIB_AC_EN); 841 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); 842 } 843 844 static void b53_enable_stp(struct b53_device *dev) 845 { 846 u8 gc; 847 848 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); 849 gc |= GC_RX_BPDU_EN; 850 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); 851 } 852 853 static u16 b53_default_pvid(struct b53_device *dev) 854 { 855 if (is5325(dev) || is5365(dev)) 856 return 1; 857 else 858 return 0; 859 } 860 861 static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port) 862 { 863 struct b53_device *dev = ds->priv; 864 865 return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port); 866 } 867 868 static bool b53_vlan_port_may_join_untagged(struct dsa_switch *ds, int port) 869 { 870 struct b53_device *dev = ds->priv; 871 struct dsa_port *dp; 872 873 if (!dev->vlan_filtering) 874 return true; 875 876 dp = dsa_to_port(ds, port); 877 878 if (dsa_port_is_cpu(dp)) 879 return true; 880 881 return dp->bridge == NULL; 882 } 883 884 int b53_configure_vlan(struct dsa_switch *ds) 885 { 886 struct b53_device *dev = ds->priv; 887 struct b53_vlan vl = { 0 }; 888 struct b53_vlan *v; 889 int i, def_vid; 890 u16 vid; 891 892 def_vid = b53_default_pvid(dev); 893 894 /* clear all vlan entries */ 895 if (is5325(dev) || is5365(dev)) { 896 for (i = def_vid; i < dev->num_vlans; i++) 897 b53_set_vlan_entry(dev, i, &vl); 898 } else { 899 b53_do_vlan_op(dev, VTA_CMD_CLEAR); 900 } 901 902 b53_enable_vlan(dev, -1, dev->vlan_enabled, dev->vlan_filtering); 903 904 /* Create an untagged VLAN entry for the default PVID in case 905 * CONFIG_VLAN_8021Q is disabled and there are no calls to 906 * dsa_user_vlan_rx_add_vid() to create the default VLAN 907 * entry. Do this only when the tagging protocol is not 908 * DSA_TAG_PROTO_NONE 909 */ 910 v = &dev->vlans[def_vid]; 911 b53_for_each_port(dev, i) { 912 if (!b53_vlan_port_may_join_untagged(ds, i)) 913 continue; 914 915 vl.members |= BIT(i); 916 if (!b53_vlan_port_needs_forced_tagged(ds, i)) 917 vl.untag = vl.members; 918 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(i), 919 def_vid); 920 } 921 b53_set_vlan_entry(dev, def_vid, &vl); 922 923 if (dev->vlan_filtering) { 924 /* Upon initial call we have not set-up any VLANs, but upon 925 * system resume, we need to restore all VLAN entries. 926 */ 927 for (vid = def_vid + 1; vid < dev->num_vlans; vid++) { 928 v = &dev->vlans[vid]; 929 930 if (!v->members) 931 continue; 932 933 b53_set_vlan_entry(dev, vid, v); 934 b53_fast_age_vlan(dev, vid); 935 } 936 937 b53_for_each_port(dev, i) { 938 if (!dsa_is_cpu_port(ds, i)) 939 b53_write16(dev, B53_VLAN_PAGE, 940 B53_VLAN_PORT_DEF_TAG(i), 941 dev->ports[i].pvid); 942 } 943 } 944 945 return 0; 946 } 947 EXPORT_SYMBOL(b53_configure_vlan); 948 949 static void b53_switch_reset_gpio(struct b53_device *dev) 950 { 951 int gpio = dev->reset_gpio; 952 953 if (gpio < 0) 954 return; 955 956 /* Reset sequence: RESET low(50ms)->high(20ms) 957 */ 958 gpio_set_value(gpio, 0); 959 mdelay(50); 960 961 gpio_set_value(gpio, 1); 962 mdelay(20); 963 964 dev->current_page = 0xff; 965 } 966 967 static int b53_switch_reset(struct b53_device *dev) 968 { 969 unsigned int timeout = 1000; 970 u8 mgmt, reg; 971 972 b53_switch_reset_gpio(dev); 973 974 if (is539x(dev)) { 975 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83); 976 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00); 977 } 978 979 /* This is specific to 58xx devices here, do not use is58xx() which 980 * covers the larger Starfigther 2 family, including 7445/7278 which 981 * still use this driver as a library and need to perform the reset 982 * earlier. 983 */ 984 if (dev->chip_id == BCM58XX_DEVICE_ID || 985 dev->chip_id == BCM583XX_DEVICE_ID) { 986 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®); 987 reg |= SW_RST | EN_SW_RST | EN_CH_RST; 988 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg); 989 990 do { 991 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®); 992 if (!(reg & SW_RST)) 993 break; 994 995 usleep_range(1000, 2000); 996 } while (timeout-- > 0); 997 998 if (timeout == 0) { 999 dev_err(dev->dev, 1000 "Timeout waiting for SW_RST to clear!\n"); 1001 return -ETIMEDOUT; 1002 } 1003 } 1004 1005 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 1006 1007 if (!(mgmt & SM_SW_FWD_EN)) { 1008 mgmt &= ~SM_SW_FWD_MODE; 1009 mgmt |= SM_SW_FWD_EN; 1010 1011 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); 1012 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); 1013 1014 if (!(mgmt & SM_SW_FWD_EN)) { 1015 dev_err(dev->dev, "Failed to enable switch!\n"); 1016 return -EINVAL; 1017 } 1018 } 1019 1020 b53_enable_mib(dev); 1021 b53_enable_stp(dev); 1022 1023 return b53_flush_arl(dev, FAST_AGE_STATIC); 1024 } 1025 1026 static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg) 1027 { 1028 struct b53_device *priv = ds->priv; 1029 u16 value = 0; 1030 int ret; 1031 1032 if (priv->ops->phy_read16) 1033 ret = priv->ops->phy_read16(priv, addr, reg, &value); 1034 else 1035 ret = b53_read16(priv, B53_PORT_MII_PAGE(addr), 1036 reg * 2, &value); 1037 1038 return ret ? ret : value; 1039 } 1040 1041 static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val) 1042 { 1043 struct b53_device *priv = ds->priv; 1044 1045 if (priv->ops->phy_write16) 1046 return priv->ops->phy_write16(priv, addr, reg, val); 1047 1048 return b53_write16(priv, B53_PORT_MII_PAGE(addr), reg * 2, val); 1049 } 1050 1051 static int b53_reset_switch(struct b53_device *priv) 1052 { 1053 /* reset vlans */ 1054 memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans); 1055 memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports); 1056 1057 priv->serdes_lane = B53_INVALID_LANE; 1058 1059 return b53_switch_reset(priv); 1060 } 1061 1062 static int b53_apply_config(struct b53_device *priv) 1063 { 1064 /* disable switching */ 1065 b53_set_forwarding(priv, 0); 1066 1067 b53_configure_vlan(priv->ds); 1068 1069 /* enable switching */ 1070 b53_set_forwarding(priv, 1); 1071 1072 return 0; 1073 } 1074 1075 static void b53_reset_mib(struct b53_device *priv) 1076 { 1077 u8 gc; 1078 1079 b53_read8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); 1080 1081 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc | GC_RESET_MIB); 1082 msleep(1); 1083 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc & ~GC_RESET_MIB); 1084 msleep(1); 1085 } 1086 1087 static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev) 1088 { 1089 if (is5365(dev)) 1090 return b53_mibs_65; 1091 else if (is63xx(dev)) 1092 return b53_mibs_63xx; 1093 else if (is58xx(dev)) 1094 return b53_mibs_58xx; 1095 else 1096 return b53_mibs; 1097 } 1098 1099 static unsigned int b53_get_mib_size(struct b53_device *dev) 1100 { 1101 if (is5365(dev)) 1102 return B53_MIBS_65_SIZE; 1103 else if (is63xx(dev)) 1104 return B53_MIBS_63XX_SIZE; 1105 else if (is58xx(dev)) 1106 return B53_MIBS_58XX_SIZE; 1107 else 1108 return B53_MIBS_SIZE; 1109 } 1110 1111 static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port) 1112 { 1113 /* These ports typically do not have built-in PHYs */ 1114 switch (port) { 1115 case B53_CPU_PORT_25: 1116 case 7: 1117 case B53_CPU_PORT: 1118 return NULL; 1119 } 1120 1121 return mdiobus_get_phy(ds->user_mii_bus, port); 1122 } 1123 1124 void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset, 1125 uint8_t *data) 1126 { 1127 struct b53_device *dev = ds->priv; 1128 const struct b53_mib_desc *mibs = b53_get_mib(dev); 1129 unsigned int mib_size = b53_get_mib_size(dev); 1130 struct phy_device *phydev; 1131 unsigned int i; 1132 1133 if (stringset == ETH_SS_STATS) { 1134 for (i = 0; i < mib_size; i++) 1135 ethtool_puts(&data, mibs[i].name); 1136 } else if (stringset == ETH_SS_PHY_STATS) { 1137 phydev = b53_get_phy_device(ds, port); 1138 if (!phydev) 1139 return; 1140 1141 phy_ethtool_get_strings(phydev, data); 1142 } 1143 } 1144 EXPORT_SYMBOL(b53_get_strings); 1145 1146 void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) 1147 { 1148 struct b53_device *dev = ds->priv; 1149 const struct b53_mib_desc *mibs = b53_get_mib(dev); 1150 unsigned int mib_size = b53_get_mib_size(dev); 1151 const struct b53_mib_desc *s; 1152 unsigned int i; 1153 u64 val = 0; 1154 1155 if (is5365(dev) && port == 5) 1156 port = 8; 1157 1158 mutex_lock(&dev->stats_mutex); 1159 1160 for (i = 0; i < mib_size; i++) { 1161 s = &mibs[i]; 1162 1163 if (s->size == 8) { 1164 b53_read64(dev, B53_MIB_PAGE(port), s->offset, &val); 1165 } else { 1166 u32 val32; 1167 1168 b53_read32(dev, B53_MIB_PAGE(port), s->offset, 1169 &val32); 1170 val = val32; 1171 } 1172 data[i] = (u64)val; 1173 } 1174 1175 mutex_unlock(&dev->stats_mutex); 1176 } 1177 EXPORT_SYMBOL(b53_get_ethtool_stats); 1178 1179 void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data) 1180 { 1181 struct phy_device *phydev; 1182 1183 phydev = b53_get_phy_device(ds, port); 1184 if (!phydev) 1185 return; 1186 1187 phy_ethtool_get_stats(phydev, NULL, data); 1188 } 1189 EXPORT_SYMBOL(b53_get_ethtool_phy_stats); 1190 1191 int b53_get_sset_count(struct dsa_switch *ds, int port, int sset) 1192 { 1193 struct b53_device *dev = ds->priv; 1194 struct phy_device *phydev; 1195 1196 if (sset == ETH_SS_STATS) { 1197 return b53_get_mib_size(dev); 1198 } else if (sset == ETH_SS_PHY_STATS) { 1199 phydev = b53_get_phy_device(ds, port); 1200 if (!phydev) 1201 return 0; 1202 1203 return phy_ethtool_get_sset_count(phydev); 1204 } 1205 1206 return 0; 1207 } 1208 EXPORT_SYMBOL(b53_get_sset_count); 1209 1210 enum b53_devlink_resource_id { 1211 B53_DEVLINK_PARAM_ID_VLAN_TABLE, 1212 }; 1213 1214 static u64 b53_devlink_vlan_table_get(void *priv) 1215 { 1216 struct b53_device *dev = priv; 1217 struct b53_vlan *vl; 1218 unsigned int i; 1219 u64 count = 0; 1220 1221 for (i = 0; i < dev->num_vlans; i++) { 1222 vl = &dev->vlans[i]; 1223 if (vl->members) 1224 count++; 1225 } 1226 1227 return count; 1228 } 1229 1230 int b53_setup_devlink_resources(struct dsa_switch *ds) 1231 { 1232 struct devlink_resource_size_params size_params; 1233 struct b53_device *dev = ds->priv; 1234 int err; 1235 1236 devlink_resource_size_params_init(&size_params, dev->num_vlans, 1237 dev->num_vlans, 1238 1, DEVLINK_RESOURCE_UNIT_ENTRY); 1239 1240 err = dsa_devlink_resource_register(ds, "VLAN", dev->num_vlans, 1241 B53_DEVLINK_PARAM_ID_VLAN_TABLE, 1242 DEVLINK_RESOURCE_ID_PARENT_TOP, 1243 &size_params); 1244 if (err) 1245 goto out; 1246 1247 dsa_devlink_resource_occ_get_register(ds, 1248 B53_DEVLINK_PARAM_ID_VLAN_TABLE, 1249 b53_devlink_vlan_table_get, dev); 1250 1251 return 0; 1252 out: 1253 dsa_devlink_resources_unregister(ds); 1254 return err; 1255 } 1256 EXPORT_SYMBOL(b53_setup_devlink_resources); 1257 1258 static int b53_setup(struct dsa_switch *ds) 1259 { 1260 struct b53_device *dev = ds->priv; 1261 struct b53_vlan *vl; 1262 unsigned int port; 1263 u16 pvid; 1264 int ret; 1265 1266 /* Request bridge PVID untagged when DSA_TAG_PROTO_NONE is set 1267 * which forces the CPU port to be tagged in all VLANs. 1268 */ 1269 ds->untag_bridge_pvid = dev->tag_protocol == DSA_TAG_PROTO_NONE; 1270 1271 /* The switch does not tell us the original VLAN for untagged 1272 * packets, so keep the CPU port always tagged. 1273 */ 1274 ds->untag_vlan_aware_bridge_pvid = true; 1275 1276 if (dev->chip_id == BCM53101_DEVICE_ID) { 1277 /* BCM53101 uses 0.5 second increments */ 1278 ds->ageing_time_min = 1 * 500; 1279 ds->ageing_time_max = AGE_TIME_MAX * 500; 1280 } else { 1281 /* Everything else uses 1 second increments */ 1282 ds->ageing_time_min = 1 * 1000; 1283 ds->ageing_time_max = AGE_TIME_MAX * 1000; 1284 } 1285 1286 ret = b53_reset_switch(dev); 1287 if (ret) { 1288 dev_err(ds->dev, "failed to reset switch\n"); 1289 return ret; 1290 } 1291 1292 /* setup default vlan for filtering mode */ 1293 pvid = b53_default_pvid(dev); 1294 vl = &dev->vlans[pvid]; 1295 b53_for_each_port(dev, port) { 1296 vl->members |= BIT(port); 1297 if (!b53_vlan_port_needs_forced_tagged(ds, port)) 1298 vl->untag |= BIT(port); 1299 } 1300 1301 b53_reset_mib(dev); 1302 1303 ret = b53_apply_config(dev); 1304 if (ret) { 1305 dev_err(ds->dev, "failed to apply configuration\n"); 1306 return ret; 1307 } 1308 1309 /* Configure IMP/CPU port, disable all other ports. Enabled 1310 * ports will be configured with .port_enable 1311 */ 1312 for (port = 0; port < dev->num_ports; port++) { 1313 if (dsa_is_cpu_port(ds, port)) 1314 b53_enable_cpu_port(dev, port); 1315 else 1316 b53_disable_port(ds, port); 1317 } 1318 1319 return b53_setup_devlink_resources(ds); 1320 } 1321 1322 static void b53_teardown(struct dsa_switch *ds) 1323 { 1324 dsa_devlink_resources_unregister(ds); 1325 } 1326 1327 static void b53_force_link(struct b53_device *dev, int port, int link) 1328 { 1329 u8 reg, val, off; 1330 1331 /* Override the port settings */ 1332 if (port == dev->imp_port) { 1333 off = B53_PORT_OVERRIDE_CTRL; 1334 val = PORT_OVERRIDE_EN; 1335 } else if (is5325(dev)) { 1336 return; 1337 } else { 1338 off = B53_GMII_PORT_OVERRIDE_CTRL(port); 1339 val = GMII_PO_EN; 1340 } 1341 1342 b53_read8(dev, B53_CTRL_PAGE, off, ®); 1343 reg |= val; 1344 if (link) 1345 reg |= PORT_OVERRIDE_LINK; 1346 else 1347 reg &= ~PORT_OVERRIDE_LINK; 1348 b53_write8(dev, B53_CTRL_PAGE, off, reg); 1349 } 1350 1351 static void b53_force_port_config(struct b53_device *dev, int port, 1352 int speed, int duplex, 1353 bool tx_pause, bool rx_pause) 1354 { 1355 u8 reg, val, off; 1356 1357 /* Override the port settings */ 1358 if (port == dev->imp_port) { 1359 off = B53_PORT_OVERRIDE_CTRL; 1360 val = PORT_OVERRIDE_EN; 1361 } else if (is5325(dev)) { 1362 return; 1363 } else { 1364 off = B53_GMII_PORT_OVERRIDE_CTRL(port); 1365 val = GMII_PO_EN; 1366 } 1367 1368 b53_read8(dev, B53_CTRL_PAGE, off, ®); 1369 reg |= val; 1370 if (duplex == DUPLEX_FULL) 1371 reg |= PORT_OVERRIDE_FULL_DUPLEX; 1372 else 1373 reg &= ~PORT_OVERRIDE_FULL_DUPLEX; 1374 1375 reg &= ~(0x3 << GMII_PO_SPEED_S); 1376 if (is5301x(dev) || is58xx(dev)) 1377 reg &= ~PORT_OVERRIDE_SPEED_2000M; 1378 1379 switch (speed) { 1380 case 2000: 1381 reg |= PORT_OVERRIDE_SPEED_2000M; 1382 fallthrough; 1383 case SPEED_1000: 1384 reg |= PORT_OVERRIDE_SPEED_1000M; 1385 break; 1386 case SPEED_100: 1387 reg |= PORT_OVERRIDE_SPEED_100M; 1388 break; 1389 case SPEED_10: 1390 reg |= PORT_OVERRIDE_SPEED_10M; 1391 break; 1392 default: 1393 dev_err(dev->dev, "unknown speed: %d\n", speed); 1394 return; 1395 } 1396 1397 if (is5325(dev)) 1398 reg &= ~PORT_OVERRIDE_LP_FLOW_25; 1399 else 1400 reg &= ~(PORT_OVERRIDE_RX_FLOW | PORT_OVERRIDE_TX_FLOW); 1401 1402 if (rx_pause) { 1403 if (is5325(dev)) 1404 reg |= PORT_OVERRIDE_LP_FLOW_25; 1405 else 1406 reg |= PORT_OVERRIDE_RX_FLOW; 1407 } 1408 1409 if (tx_pause) { 1410 if (is5325(dev)) 1411 reg |= PORT_OVERRIDE_LP_FLOW_25; 1412 else 1413 reg |= PORT_OVERRIDE_TX_FLOW; 1414 } 1415 1416 b53_write8(dev, B53_CTRL_PAGE, off, reg); 1417 } 1418 1419 static void b53_adjust_63xx_rgmii(struct dsa_switch *ds, int port, 1420 phy_interface_t interface) 1421 { 1422 struct b53_device *dev = ds->priv; 1423 u8 rgmii_ctrl = 0; 1424 1425 b53_read8(dev, B53_CTRL_PAGE, B53_RGMII_CTRL_P(port), &rgmii_ctrl); 1426 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC); 1427 1428 if (is6318_268(dev)) 1429 rgmii_ctrl |= RGMII_CTRL_MII_OVERRIDE; 1430 1431 rgmii_ctrl |= RGMII_CTRL_ENABLE_GMII; 1432 1433 b53_write8(dev, B53_CTRL_PAGE, B53_RGMII_CTRL_P(port), rgmii_ctrl); 1434 1435 dev_dbg(ds->dev, "Configured port %d for %s\n", port, 1436 phy_modes(interface)); 1437 } 1438 1439 static void b53_adjust_531x5_rgmii(struct dsa_switch *ds, int port, 1440 phy_interface_t interface) 1441 { 1442 struct b53_device *dev = ds->priv; 1443 u8 rgmii_ctrl = 0, off; 1444 1445 if (port == dev->imp_port) 1446 off = B53_RGMII_CTRL_IMP; 1447 else 1448 off = B53_RGMII_CTRL_P(port); 1449 1450 /* Configure the port RGMII clock delay by DLL disabled and 1451 * tx_clk aligned timing (restoring to reset defaults) 1452 */ 1453 b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl); 1454 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC); 1455 1456 /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make 1457 * sure that we enable the port TX clock internal delay to 1458 * account for this internal delay that is inserted, otherwise 1459 * the switch won't be able to receive correctly. 1460 * 1461 * PHY_INTERFACE_MODE_RGMII means that we are not introducing 1462 * any delay neither on transmission nor reception, so the 1463 * BCM53125 must also be configured accordingly to account for 1464 * the lack of delay and introduce 1465 * 1466 * The BCM53125 switch has its RX clock and TX clock control 1467 * swapped, hence the reason why we modify the TX clock path in 1468 * the "RGMII" case 1469 */ 1470 if (interface == PHY_INTERFACE_MODE_RGMII_TXID) 1471 rgmii_ctrl |= RGMII_CTRL_DLL_TXC; 1472 if (interface == PHY_INTERFACE_MODE_RGMII) 1473 rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC; 1474 1475 if (dev->chip_id != BCM53115_DEVICE_ID) 1476 rgmii_ctrl |= RGMII_CTRL_TIMING_SEL; 1477 1478 b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl); 1479 1480 dev_info(ds->dev, "Configured port %d for %s\n", port, 1481 phy_modes(interface)); 1482 } 1483 1484 static void b53_adjust_5325_mii(struct dsa_switch *ds, int port) 1485 { 1486 struct b53_device *dev = ds->priv; 1487 u8 reg = 0; 1488 1489 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 1490 ®); 1491 1492 /* reverse mii needs to be enabled */ 1493 if (!(reg & PORT_OVERRIDE_RV_MII_25)) { 1494 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 1495 reg | PORT_OVERRIDE_RV_MII_25); 1496 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, 1497 ®); 1498 1499 if (!(reg & PORT_OVERRIDE_RV_MII_25)) { 1500 dev_err(ds->dev, 1501 "Failed to enable reverse MII mode\n"); 1502 return; 1503 } 1504 } 1505 } 1506 1507 void b53_port_event(struct dsa_switch *ds, int port) 1508 { 1509 struct b53_device *dev = ds->priv; 1510 bool link; 1511 u16 sts; 1512 1513 b53_read16(dev, B53_STAT_PAGE, B53_LINK_STAT, &sts); 1514 link = !!(sts & BIT(port)); 1515 dsa_port_phylink_mac_change(ds, port, link); 1516 } 1517 EXPORT_SYMBOL(b53_port_event); 1518 1519 static void b53_phylink_get_caps(struct dsa_switch *ds, int port, 1520 struct phylink_config *config) 1521 { 1522 struct b53_device *dev = ds->priv; 1523 1524 /* Internal ports need GMII for PHYLIB */ 1525 __set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces); 1526 1527 /* These switches appear to support MII and RevMII too, but beyond 1528 * this, the code gives very few clues. FIXME: We probably need more 1529 * interface modes here. 1530 * 1531 * According to b53_srab_mux_init(), ports 3..5 can support: 1532 * SGMII, MII, GMII, RGMII or INTERNAL depending on the MUX setting. 1533 * However, the interface mode read from the MUX configuration is 1534 * not passed back to DSA, so phylink uses NA. 1535 * DT can specify RGMII for ports 0, 1. 1536 * For MDIO, port 8 can be RGMII_TXID. 1537 */ 1538 __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces); 1539 __set_bit(PHY_INTERFACE_MODE_REVMII, config->supported_interfaces); 1540 1541 /* BCM63xx RGMII ports support RGMII */ 1542 if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4)) 1543 phy_interface_set_rgmii(config->supported_interfaces); 1544 1545 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | 1546 MAC_10 | MAC_100; 1547 1548 /* 5325/5365 are not capable of gigabit speeds, everything else is. 1549 * Note: the original code also exclulded Gigagbit for MII, RevMII 1550 * and 802.3z modes. MII and RevMII are not able to work above 100M, 1551 * so will be excluded by the generic validator implementation. 1552 * However, the exclusion of Gigabit for 802.3z just seems wrong. 1553 */ 1554 if (!(is5325(dev) || is5365(dev))) 1555 config->mac_capabilities |= MAC_1000; 1556 1557 /* Get the implementation specific capabilities */ 1558 if (dev->ops->phylink_get_caps) 1559 dev->ops->phylink_get_caps(dev, port, config); 1560 } 1561 1562 static struct phylink_pcs *b53_phylink_mac_select_pcs(struct phylink_config *config, 1563 phy_interface_t interface) 1564 { 1565 struct dsa_port *dp = dsa_phylink_to_port(config); 1566 struct b53_device *dev = dp->ds->priv; 1567 1568 if (!dev->ops->phylink_mac_select_pcs) 1569 return NULL; 1570 1571 return dev->ops->phylink_mac_select_pcs(dev, dp->index, interface); 1572 } 1573 1574 static void b53_phylink_mac_config(struct phylink_config *config, 1575 unsigned int mode, 1576 const struct phylink_link_state *state) 1577 { 1578 struct dsa_port *dp = dsa_phylink_to_port(config); 1579 phy_interface_t interface = state->interface; 1580 struct dsa_switch *ds = dp->ds; 1581 struct b53_device *dev = ds->priv; 1582 int port = dp->index; 1583 1584 if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4)) 1585 b53_adjust_63xx_rgmii(ds, port, interface); 1586 1587 if (mode == MLO_AN_FIXED) { 1588 if (is531x5(dev) && phy_interface_mode_is_rgmii(interface)) 1589 b53_adjust_531x5_rgmii(ds, port, interface); 1590 1591 /* configure MII port if necessary */ 1592 if (is5325(dev)) 1593 b53_adjust_5325_mii(ds, port); 1594 } 1595 } 1596 1597 static void b53_phylink_mac_link_down(struct phylink_config *config, 1598 unsigned int mode, 1599 phy_interface_t interface) 1600 { 1601 struct dsa_port *dp = dsa_phylink_to_port(config); 1602 struct b53_device *dev = dp->ds->priv; 1603 int port = dp->index; 1604 1605 if (mode == MLO_AN_PHY) { 1606 if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4)) 1607 b53_force_link(dev, port, false); 1608 return; 1609 } 1610 1611 if (mode == MLO_AN_FIXED) { 1612 b53_force_link(dev, port, false); 1613 return; 1614 } 1615 1616 if (phy_interface_mode_is_8023z(interface) && 1617 dev->ops->serdes_link_set) 1618 dev->ops->serdes_link_set(dev, port, mode, interface, false); 1619 } 1620 1621 static void b53_phylink_mac_link_up(struct phylink_config *config, 1622 struct phy_device *phydev, 1623 unsigned int mode, 1624 phy_interface_t interface, 1625 int speed, int duplex, 1626 bool tx_pause, bool rx_pause) 1627 { 1628 struct dsa_port *dp = dsa_phylink_to_port(config); 1629 struct dsa_switch *ds = dp->ds; 1630 struct b53_device *dev = ds->priv; 1631 struct ethtool_keee *p = &dev->ports[dp->index].eee; 1632 int port = dp->index; 1633 1634 if (mode == MLO_AN_PHY) { 1635 /* Re-negotiate EEE if it was enabled already */ 1636 p->eee_enabled = b53_eee_init(ds, port, phydev); 1637 1638 if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4)) { 1639 b53_force_port_config(dev, port, speed, duplex, 1640 tx_pause, rx_pause); 1641 b53_force_link(dev, port, true); 1642 } 1643 1644 return; 1645 } 1646 1647 if (mode == MLO_AN_FIXED) { 1648 /* Force flow control on BCM5301x's CPU port */ 1649 if (is5301x(dev) && dsa_is_cpu_port(ds, port)) 1650 tx_pause = rx_pause = true; 1651 1652 b53_force_port_config(dev, port, speed, duplex, 1653 tx_pause, rx_pause); 1654 b53_force_link(dev, port, true); 1655 return; 1656 } 1657 1658 if (phy_interface_mode_is_8023z(interface) && 1659 dev->ops->serdes_link_set) 1660 dev->ops->serdes_link_set(dev, port, mode, interface, true); 1661 } 1662 1663 int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, 1664 struct netlink_ext_ack *extack) 1665 { 1666 struct b53_device *dev = ds->priv; 1667 1668 if (dev->vlan_filtering != vlan_filtering) { 1669 dev->vlan_filtering = vlan_filtering; 1670 b53_apply_config(dev); 1671 } 1672 1673 return 0; 1674 } 1675 EXPORT_SYMBOL(b53_vlan_filtering); 1676 1677 static int b53_vlan_prepare(struct dsa_switch *ds, int port, 1678 const struct switchdev_obj_port_vlan *vlan) 1679 { 1680 struct b53_device *dev = ds->priv; 1681 1682 if ((is5325(dev) || is5365(dev)) && vlan->vid == 0) 1683 return -EOPNOTSUPP; 1684 1685 /* Port 7 on 7278 connects to the ASP's UniMAC which is not capable of 1686 * receiving VLAN tagged frames at all, we can still allow the port to 1687 * be configured for egress untagged. 1688 */ 1689 if (dev->chip_id == BCM7278_DEVICE_ID && port == 7 && 1690 !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) 1691 return -EINVAL; 1692 1693 if (vlan->vid >= dev->num_vlans) 1694 return -ERANGE; 1695 1696 b53_enable_vlan(dev, port, true, dev->vlan_filtering); 1697 1698 return 0; 1699 } 1700 1701 int b53_vlan_add(struct dsa_switch *ds, int port, 1702 const struct switchdev_obj_port_vlan *vlan, 1703 struct netlink_ext_ack *extack) 1704 { 1705 struct b53_device *dev = ds->priv; 1706 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1707 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 1708 struct b53_vlan *vl; 1709 u16 old_pvid, new_pvid; 1710 int err; 1711 1712 err = b53_vlan_prepare(ds, port, vlan); 1713 if (err) 1714 return err; 1715 1716 if (vlan->vid == 0) 1717 return 0; 1718 1719 old_pvid = dev->ports[port].pvid; 1720 if (pvid) 1721 new_pvid = vlan->vid; 1722 else if (!pvid && vlan->vid == old_pvid) 1723 new_pvid = b53_default_pvid(dev); 1724 else 1725 new_pvid = old_pvid; 1726 dev->ports[port].pvid = new_pvid; 1727 1728 vl = &dev->vlans[vlan->vid]; 1729 1730 if (dsa_is_cpu_port(ds, port)) 1731 untagged = false; 1732 1733 vl->members |= BIT(port); 1734 if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port)) 1735 vl->untag |= BIT(port); 1736 else 1737 vl->untag &= ~BIT(port); 1738 1739 if (!dev->vlan_filtering) 1740 return 0; 1741 1742 b53_set_vlan_entry(dev, vlan->vid, vl); 1743 b53_fast_age_vlan(dev, vlan->vid); 1744 1745 if (!dsa_is_cpu_port(ds, port) && new_pvid != old_pvid) { 1746 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), 1747 new_pvid); 1748 b53_fast_age_vlan(dev, old_pvid); 1749 } 1750 1751 return 0; 1752 } 1753 EXPORT_SYMBOL(b53_vlan_add); 1754 1755 int b53_vlan_del(struct dsa_switch *ds, int port, 1756 const struct switchdev_obj_port_vlan *vlan) 1757 { 1758 struct b53_device *dev = ds->priv; 1759 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1760 struct b53_vlan *vl; 1761 u16 pvid; 1762 1763 if (vlan->vid == 0) 1764 return 0; 1765 1766 pvid = dev->ports[port].pvid; 1767 1768 vl = &dev->vlans[vlan->vid]; 1769 1770 vl->members &= ~BIT(port); 1771 1772 if (pvid == vlan->vid) 1773 pvid = b53_default_pvid(dev); 1774 dev->ports[port].pvid = pvid; 1775 1776 if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port)) 1777 vl->untag &= ~(BIT(port)); 1778 1779 if (!dev->vlan_filtering) 1780 return 0; 1781 1782 b53_set_vlan_entry(dev, vlan->vid, vl); 1783 b53_fast_age_vlan(dev, vlan->vid); 1784 1785 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid); 1786 b53_fast_age_vlan(dev, pvid); 1787 1788 return 0; 1789 } 1790 EXPORT_SYMBOL(b53_vlan_del); 1791 1792 /* Address Resolution Logic routines. Caller must hold &dev->arl_mutex. */ 1793 static int b53_arl_op_wait(struct b53_device *dev) 1794 { 1795 unsigned int timeout = 10; 1796 u8 reg; 1797 1798 do { 1799 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®); 1800 if (!(reg & ARLTBL_START_DONE)) 1801 return 0; 1802 1803 usleep_range(1000, 2000); 1804 } while (timeout--); 1805 1806 dev_warn(dev->dev, "timeout waiting for ARL to finish: 0x%02x\n", reg); 1807 1808 return -ETIMEDOUT; 1809 } 1810 1811 static int b53_arl_rw_op(struct b53_device *dev, unsigned int op) 1812 { 1813 u8 reg; 1814 1815 if (op > ARLTBL_RW) 1816 return -EINVAL; 1817 1818 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®); 1819 reg |= ARLTBL_START_DONE; 1820 if (op) 1821 reg |= ARLTBL_RW; 1822 else 1823 reg &= ~ARLTBL_RW; 1824 if (dev->vlan_enabled) 1825 reg &= ~ARLTBL_IVL_SVL_SELECT; 1826 else 1827 reg |= ARLTBL_IVL_SVL_SELECT; 1828 b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg); 1829 1830 return b53_arl_op_wait(dev); 1831 } 1832 1833 static int b53_arl_read(struct b53_device *dev, u64 mac, 1834 u16 vid, struct b53_arl_entry *ent, u8 *idx) 1835 { 1836 DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES); 1837 unsigned int i; 1838 int ret; 1839 1840 ret = b53_arl_op_wait(dev); 1841 if (ret) 1842 return ret; 1843 1844 bitmap_zero(free_bins, dev->num_arl_bins); 1845 1846 /* Read the bins */ 1847 for (i = 0; i < dev->num_arl_bins; i++) { 1848 u64 mac_vid; 1849 u32 fwd_entry; 1850 1851 b53_read64(dev, B53_ARLIO_PAGE, 1852 B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid); 1853 b53_read32(dev, B53_ARLIO_PAGE, 1854 B53_ARLTBL_DATA_ENTRY(i), &fwd_entry); 1855 b53_arl_to_entry(ent, mac_vid, fwd_entry); 1856 1857 if (!(fwd_entry & ARLTBL_VALID)) { 1858 set_bit(i, free_bins); 1859 continue; 1860 } 1861 if ((mac_vid & ARLTBL_MAC_MASK) != mac) 1862 continue; 1863 if (dev->vlan_enabled && 1864 ((mac_vid >> ARLTBL_VID_S) & ARLTBL_VID_MASK) != vid) 1865 continue; 1866 *idx = i; 1867 return 0; 1868 } 1869 1870 *idx = find_first_bit(free_bins, dev->num_arl_bins); 1871 return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT; 1872 } 1873 1874 static int b53_arl_read_25(struct b53_device *dev, u64 mac, 1875 u16 vid, struct b53_arl_entry *ent, u8 *idx) 1876 { 1877 DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES); 1878 unsigned int i; 1879 int ret; 1880 1881 ret = b53_arl_op_wait(dev); 1882 if (ret) 1883 return ret; 1884 1885 bitmap_zero(free_bins, dev->num_arl_bins); 1886 1887 /* Read the bins */ 1888 for (i = 0; i < dev->num_arl_bins; i++) { 1889 u64 mac_vid; 1890 1891 b53_read64(dev, B53_ARLIO_PAGE, 1892 B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid); 1893 1894 b53_arl_to_entry_25(ent, mac_vid); 1895 1896 if (!(mac_vid & ARLTBL_VALID_25)) { 1897 set_bit(i, free_bins); 1898 continue; 1899 } 1900 if ((mac_vid & ARLTBL_MAC_MASK) != mac) 1901 continue; 1902 if (dev->vlan_enabled && 1903 ((mac_vid >> ARLTBL_VID_S_65) & ARLTBL_VID_MASK_25) != vid) 1904 continue; 1905 *idx = i; 1906 return 0; 1907 } 1908 1909 *idx = find_first_bit(free_bins, dev->num_arl_bins); 1910 return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT; 1911 } 1912 1913 static int b53_arl_op(struct b53_device *dev, int op, int port, 1914 const unsigned char *addr, u16 vid, bool is_valid) 1915 { 1916 struct b53_arl_entry ent; 1917 u32 fwd_entry; 1918 u64 mac, mac_vid = 0; 1919 u8 idx = 0; 1920 int ret; 1921 1922 /* Convert the array into a 64-bit MAC */ 1923 mac = ether_addr_to_u64(addr); 1924 1925 /* Perform a read for the given MAC and VID */ 1926 b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac); 1927 if (!is5325m(dev)) 1928 b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid); 1929 1930 /* Issue a read operation for this MAC */ 1931 ret = b53_arl_rw_op(dev, 1); 1932 if (ret) 1933 return ret; 1934 1935 if (is5325(dev) || is5365(dev)) 1936 ret = b53_arl_read_25(dev, mac, vid, &ent, &idx); 1937 else 1938 ret = b53_arl_read(dev, mac, vid, &ent, &idx); 1939 1940 /* If this is a read, just finish now */ 1941 if (op) 1942 return ret; 1943 1944 switch (ret) { 1945 case -ETIMEDOUT: 1946 return ret; 1947 case -ENOSPC: 1948 dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n", 1949 addr, vid); 1950 return is_valid ? ret : 0; 1951 case -ENOENT: 1952 /* We could not find a matching MAC, so reset to a new entry */ 1953 dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n", 1954 addr, vid, idx); 1955 fwd_entry = 0; 1956 break; 1957 default: 1958 dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n", 1959 addr, vid, idx); 1960 break; 1961 } 1962 1963 /* For multicast address, the port is a bitmask and the validity 1964 * is determined by having at least one port being still active 1965 */ 1966 if (!is_multicast_ether_addr(addr)) { 1967 ent.port = port; 1968 ent.is_valid = is_valid; 1969 } else { 1970 if (is_valid) 1971 ent.port |= BIT(port); 1972 else 1973 ent.port &= ~BIT(port); 1974 1975 ent.is_valid = !!(ent.port); 1976 } 1977 1978 ent.vid = vid; 1979 ent.is_static = true; 1980 ent.is_age = false; 1981 memcpy(ent.mac, addr, ETH_ALEN); 1982 if (is5325(dev) || is5365(dev)) 1983 b53_arl_from_entry_25(&mac_vid, &ent); 1984 else 1985 b53_arl_from_entry(&mac_vid, &fwd_entry, &ent); 1986 1987 b53_write64(dev, B53_ARLIO_PAGE, 1988 B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid); 1989 1990 if (!is5325(dev) && !is5365(dev)) 1991 b53_write32(dev, B53_ARLIO_PAGE, 1992 B53_ARLTBL_DATA_ENTRY(idx), fwd_entry); 1993 1994 return b53_arl_rw_op(dev, 0); 1995 } 1996 1997 int b53_fdb_add(struct dsa_switch *ds, int port, 1998 const unsigned char *addr, u16 vid, 1999 struct dsa_db db) 2000 { 2001 struct b53_device *priv = ds->priv; 2002 int ret; 2003 2004 mutex_lock(&priv->arl_mutex); 2005 ret = b53_arl_op(priv, 0, port, addr, vid, true); 2006 mutex_unlock(&priv->arl_mutex); 2007 2008 return ret; 2009 } 2010 EXPORT_SYMBOL(b53_fdb_add); 2011 2012 int b53_fdb_del(struct dsa_switch *ds, int port, 2013 const unsigned char *addr, u16 vid, 2014 struct dsa_db db) 2015 { 2016 struct b53_device *priv = ds->priv; 2017 int ret; 2018 2019 mutex_lock(&priv->arl_mutex); 2020 ret = b53_arl_op(priv, 0, port, addr, vid, false); 2021 mutex_unlock(&priv->arl_mutex); 2022 2023 return ret; 2024 } 2025 EXPORT_SYMBOL(b53_fdb_del); 2026 2027 static int b53_arl_search_wait(struct b53_device *dev) 2028 { 2029 unsigned int timeout = 1000; 2030 u8 reg, offset; 2031 2032 if (is5325(dev) || is5365(dev)) 2033 offset = B53_ARL_SRCH_CTL_25; 2034 else 2035 offset = B53_ARL_SRCH_CTL; 2036 2037 do { 2038 b53_read8(dev, B53_ARLIO_PAGE, offset, ®); 2039 if (!(reg & ARL_SRCH_STDN)) 2040 return -ENOENT; 2041 2042 if (reg & ARL_SRCH_VLID) 2043 return 0; 2044 2045 usleep_range(1000, 2000); 2046 } while (timeout--); 2047 2048 return -ETIMEDOUT; 2049 } 2050 2051 static void b53_arl_search_rd(struct b53_device *dev, u8 idx, 2052 struct b53_arl_entry *ent) 2053 { 2054 u64 mac_vid; 2055 2056 if (is5325(dev)) { 2057 b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_0_MACVID_25, 2058 &mac_vid); 2059 b53_arl_to_entry_25(ent, mac_vid); 2060 } else if (is5365(dev)) { 2061 b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_0_MACVID_65, 2062 &mac_vid); 2063 b53_arl_to_entry_25(ent, mac_vid); 2064 } else { 2065 u32 fwd_entry; 2066 2067 b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_MACVID(idx), 2068 &mac_vid); 2069 b53_read32(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL(idx), 2070 &fwd_entry); 2071 b53_arl_to_entry(ent, mac_vid, fwd_entry); 2072 } 2073 } 2074 2075 static int b53_fdb_copy(int port, const struct b53_arl_entry *ent, 2076 dsa_fdb_dump_cb_t *cb, void *data) 2077 { 2078 if (!ent->is_valid) 2079 return 0; 2080 2081 if (port != ent->port) 2082 return 0; 2083 2084 return cb(ent->mac, ent->vid, ent->is_static, data); 2085 } 2086 2087 int b53_fdb_dump(struct dsa_switch *ds, int port, 2088 dsa_fdb_dump_cb_t *cb, void *data) 2089 { 2090 unsigned int count = 0, results_per_hit = 1; 2091 struct b53_device *priv = ds->priv; 2092 struct b53_arl_entry results[2]; 2093 u8 offset; 2094 int ret; 2095 u8 reg; 2096 2097 if (priv->num_arl_bins > 2) 2098 results_per_hit = 2; 2099 2100 mutex_lock(&priv->arl_mutex); 2101 2102 if (is5325(priv) || is5365(priv)) 2103 offset = B53_ARL_SRCH_CTL_25; 2104 else 2105 offset = B53_ARL_SRCH_CTL; 2106 2107 /* Start search operation */ 2108 reg = ARL_SRCH_STDN; 2109 b53_write8(priv, B53_ARLIO_PAGE, offset, reg); 2110 2111 do { 2112 ret = b53_arl_search_wait(priv); 2113 if (ret) 2114 break; 2115 2116 b53_arl_search_rd(priv, 0, &results[0]); 2117 ret = b53_fdb_copy(port, &results[0], cb, data); 2118 if (ret) 2119 break; 2120 2121 if (results_per_hit == 2) { 2122 b53_arl_search_rd(priv, 1, &results[1]); 2123 ret = b53_fdb_copy(port, &results[1], cb, data); 2124 if (ret) 2125 break; 2126 2127 if (!results[0].is_valid && !results[1].is_valid) 2128 break; 2129 } 2130 2131 } while (count++ < b53_max_arl_entries(priv) / results_per_hit); 2132 2133 mutex_unlock(&priv->arl_mutex); 2134 2135 return 0; 2136 } 2137 EXPORT_SYMBOL(b53_fdb_dump); 2138 2139 int b53_mdb_add(struct dsa_switch *ds, int port, 2140 const struct switchdev_obj_port_mdb *mdb, 2141 struct dsa_db db) 2142 { 2143 struct b53_device *priv = ds->priv; 2144 int ret; 2145 2146 /* 5325 and 5365 require some more massaging, but could 2147 * be supported eventually 2148 */ 2149 if (is5325(priv) || is5365(priv)) 2150 return -EOPNOTSUPP; 2151 2152 mutex_lock(&priv->arl_mutex); 2153 ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true); 2154 mutex_unlock(&priv->arl_mutex); 2155 2156 return ret; 2157 } 2158 EXPORT_SYMBOL(b53_mdb_add); 2159 2160 int b53_mdb_del(struct dsa_switch *ds, int port, 2161 const struct switchdev_obj_port_mdb *mdb, 2162 struct dsa_db db) 2163 { 2164 struct b53_device *priv = ds->priv; 2165 int ret; 2166 2167 mutex_lock(&priv->arl_mutex); 2168 ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false); 2169 mutex_unlock(&priv->arl_mutex); 2170 if (ret) 2171 dev_err(ds->dev, "failed to delete MDB entry\n"); 2172 2173 return ret; 2174 } 2175 EXPORT_SYMBOL(b53_mdb_del); 2176 2177 int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, 2178 bool *tx_fwd_offload, struct netlink_ext_ack *extack) 2179 { 2180 struct b53_device *dev = ds->priv; 2181 struct b53_vlan *vl; 2182 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 2183 u16 pvlan, reg, pvid; 2184 unsigned int i; 2185 2186 /* On 7278, port 7 which connects to the ASP should only receive 2187 * traffic from matching CFP rules. 2188 */ 2189 if (dev->chip_id == BCM7278_DEVICE_ID && port == 7) 2190 return -EINVAL; 2191 2192 pvid = b53_default_pvid(dev); 2193 vl = &dev->vlans[pvid]; 2194 2195 if (dev->vlan_filtering) { 2196 /* Make this port leave the all VLANs join since we will have 2197 * proper VLAN entries from now on 2198 */ 2199 if (is58xx(dev)) { 2200 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, 2201 ®); 2202 reg &= ~BIT(port); 2203 if ((reg & BIT(cpu_port)) == BIT(cpu_port)) 2204 reg &= ~BIT(cpu_port); 2205 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, 2206 reg); 2207 } 2208 2209 b53_get_vlan_entry(dev, pvid, vl); 2210 vl->members &= ~BIT(port); 2211 b53_set_vlan_entry(dev, pvid, vl); 2212 } 2213 2214 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 2215 2216 b53_for_each_port(dev, i) { 2217 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) 2218 continue; 2219 2220 /* Add this local port to the remote port VLAN control 2221 * membership and update the remote port bitmask 2222 */ 2223 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); 2224 reg |= BIT(port); 2225 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg); 2226 dev->ports[i].vlan_ctl_mask = reg; 2227 2228 pvlan |= BIT(i); 2229 } 2230 2231 /* Disable redirection of unknown SA to the CPU port */ 2232 b53_set_eap_mode(dev, port, EAP_MODE_BASIC); 2233 2234 /* Configure the local port VLAN control membership to include 2235 * remote ports and update the local port bitmask 2236 */ 2237 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 2238 dev->ports[port].vlan_ctl_mask = pvlan; 2239 2240 return 0; 2241 } 2242 EXPORT_SYMBOL(b53_br_join); 2243 2244 void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge) 2245 { 2246 struct b53_device *dev = ds->priv; 2247 struct b53_vlan *vl; 2248 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 2249 unsigned int i; 2250 u16 pvlan, reg, pvid; 2251 2252 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); 2253 2254 b53_for_each_port(dev, i) { 2255 /* Don't touch the remaining ports */ 2256 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) 2257 continue; 2258 2259 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); 2260 reg &= ~BIT(port); 2261 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg); 2262 dev->ports[port].vlan_ctl_mask = reg; 2263 2264 /* Prevent self removal to preserve isolation */ 2265 if (port != i) 2266 pvlan &= ~BIT(i); 2267 } 2268 2269 /* Enable redirection of unknown SA to the CPU port */ 2270 b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED); 2271 2272 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); 2273 dev->ports[port].vlan_ctl_mask = pvlan; 2274 2275 pvid = b53_default_pvid(dev); 2276 vl = &dev->vlans[pvid]; 2277 2278 if (dev->vlan_filtering) { 2279 /* Make this port join all VLANs without VLAN entries */ 2280 if (is58xx(dev)) { 2281 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®); 2282 reg |= BIT(port); 2283 if (!(reg & BIT(cpu_port))) 2284 reg |= BIT(cpu_port); 2285 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg); 2286 } 2287 2288 b53_get_vlan_entry(dev, pvid, vl); 2289 vl->members |= BIT(port); 2290 b53_set_vlan_entry(dev, pvid, vl); 2291 } 2292 } 2293 EXPORT_SYMBOL(b53_br_leave); 2294 2295 void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state) 2296 { 2297 struct b53_device *dev = ds->priv; 2298 u8 hw_state; 2299 u8 reg; 2300 2301 switch (state) { 2302 case BR_STATE_DISABLED: 2303 hw_state = PORT_CTRL_DIS_STATE; 2304 break; 2305 case BR_STATE_LISTENING: 2306 hw_state = PORT_CTRL_LISTEN_STATE; 2307 break; 2308 case BR_STATE_LEARNING: 2309 hw_state = PORT_CTRL_LEARN_STATE; 2310 break; 2311 case BR_STATE_FORWARDING: 2312 hw_state = PORT_CTRL_FWD_STATE; 2313 break; 2314 case BR_STATE_BLOCKING: 2315 hw_state = PORT_CTRL_BLOCK_STATE; 2316 break; 2317 default: 2318 dev_err(ds->dev, "invalid STP state: %d\n", state); 2319 return; 2320 } 2321 2322 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); 2323 reg &= ~PORT_CTRL_STP_STATE_MASK; 2324 reg |= hw_state; 2325 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); 2326 } 2327 EXPORT_SYMBOL(b53_br_set_stp_state); 2328 2329 void b53_br_fast_age(struct dsa_switch *ds, int port) 2330 { 2331 struct b53_device *dev = ds->priv; 2332 2333 if (b53_fast_age_port(dev, port)) 2334 dev_err(ds->dev, "fast ageing failed\n"); 2335 } 2336 EXPORT_SYMBOL(b53_br_fast_age); 2337 2338 int b53_br_flags_pre(struct dsa_switch *ds, int port, 2339 struct switchdev_brport_flags flags, 2340 struct netlink_ext_ack *extack) 2341 { 2342 struct b53_device *dev = ds->priv; 2343 unsigned long mask = (BR_FLOOD | BR_MCAST_FLOOD); 2344 2345 if (!is5325(dev)) 2346 mask |= BR_LEARNING; 2347 2348 if (flags.mask & ~mask) 2349 return -EINVAL; 2350 2351 return 0; 2352 } 2353 EXPORT_SYMBOL(b53_br_flags_pre); 2354 2355 int b53_br_flags(struct dsa_switch *ds, int port, 2356 struct switchdev_brport_flags flags, 2357 struct netlink_ext_ack *extack) 2358 { 2359 if (flags.mask & BR_FLOOD) 2360 b53_port_set_ucast_flood(ds->priv, port, 2361 !!(flags.val & BR_FLOOD)); 2362 if (flags.mask & BR_MCAST_FLOOD) 2363 b53_port_set_mcast_flood(ds->priv, port, 2364 !!(flags.val & BR_MCAST_FLOOD)); 2365 if (flags.mask & BR_LEARNING) 2366 b53_port_set_learning(ds->priv, port, 2367 !!(flags.val & BR_LEARNING)); 2368 2369 return 0; 2370 } 2371 EXPORT_SYMBOL(b53_br_flags); 2372 2373 static bool b53_possible_cpu_port(struct dsa_switch *ds, int port) 2374 { 2375 /* Broadcom switches will accept enabling Broadcom tags on the 2376 * following ports: 5, 7 and 8, any other port is not supported 2377 */ 2378 switch (port) { 2379 case B53_CPU_PORT_25: 2380 case 7: 2381 case B53_CPU_PORT: 2382 return true; 2383 } 2384 2385 return false; 2386 } 2387 2388 static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port, 2389 enum dsa_tag_protocol tag_protocol) 2390 { 2391 bool ret = b53_possible_cpu_port(ds, port); 2392 2393 if (!ret) { 2394 dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n", 2395 port); 2396 return ret; 2397 } 2398 2399 switch (tag_protocol) { 2400 case DSA_TAG_PROTO_BRCM: 2401 case DSA_TAG_PROTO_BRCM_PREPEND: 2402 dev_warn(ds->dev, 2403 "Port %d is stacked to Broadcom tag switch\n", port); 2404 ret = false; 2405 break; 2406 default: 2407 ret = true; 2408 break; 2409 } 2410 2411 return ret; 2412 } 2413 2414 enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port, 2415 enum dsa_tag_protocol mprot) 2416 { 2417 struct b53_device *dev = ds->priv; 2418 2419 if (!b53_can_enable_brcm_tags(ds, port, mprot)) { 2420 dev->tag_protocol = DSA_TAG_PROTO_NONE; 2421 goto out; 2422 } 2423 2424 /* Older models require different 6 byte tags */ 2425 if (is5325(dev) || is5365(dev)) { 2426 dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY_FCS; 2427 goto out; 2428 } else if (is63xx(dev)) { 2429 dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY; 2430 goto out; 2431 } 2432 2433 /* Broadcom BCM58xx chips have a flow accelerator on Port 8 2434 * which requires us to use the prepended Broadcom tag type 2435 */ 2436 if (dev->chip_id == BCM58XX_DEVICE_ID && port == B53_CPU_PORT) { 2437 dev->tag_protocol = DSA_TAG_PROTO_BRCM_PREPEND; 2438 goto out; 2439 } 2440 2441 dev->tag_protocol = DSA_TAG_PROTO_BRCM; 2442 out: 2443 return dev->tag_protocol; 2444 } 2445 EXPORT_SYMBOL(b53_get_tag_protocol); 2446 2447 int b53_mirror_add(struct dsa_switch *ds, int port, 2448 struct dsa_mall_mirror_tc_entry *mirror, bool ingress, 2449 struct netlink_ext_ack *extack) 2450 { 2451 struct b53_device *dev = ds->priv; 2452 u16 reg, loc; 2453 2454 if (ingress) 2455 loc = B53_IG_MIR_CTL; 2456 else 2457 loc = B53_EG_MIR_CTL; 2458 2459 b53_read16(dev, B53_MGMT_PAGE, loc, ®); 2460 reg |= BIT(port); 2461 b53_write16(dev, B53_MGMT_PAGE, loc, reg); 2462 2463 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®); 2464 reg &= ~CAP_PORT_MASK; 2465 reg |= mirror->to_local_port; 2466 reg |= MIRROR_EN; 2467 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg); 2468 2469 return 0; 2470 } 2471 EXPORT_SYMBOL(b53_mirror_add); 2472 2473 void b53_mirror_del(struct dsa_switch *ds, int port, 2474 struct dsa_mall_mirror_tc_entry *mirror) 2475 { 2476 struct b53_device *dev = ds->priv; 2477 bool loc_disable = false, other_loc_disable = false; 2478 u16 reg, loc; 2479 2480 if (mirror->ingress) 2481 loc = B53_IG_MIR_CTL; 2482 else 2483 loc = B53_EG_MIR_CTL; 2484 2485 /* Update the desired ingress/egress register */ 2486 b53_read16(dev, B53_MGMT_PAGE, loc, ®); 2487 reg &= ~BIT(port); 2488 if (!(reg & MIRROR_MASK)) 2489 loc_disable = true; 2490 b53_write16(dev, B53_MGMT_PAGE, loc, reg); 2491 2492 /* Now look at the other one to know if we can disable mirroring 2493 * entirely 2494 */ 2495 if (mirror->ingress) 2496 b53_read16(dev, B53_MGMT_PAGE, B53_EG_MIR_CTL, ®); 2497 else 2498 b53_read16(dev, B53_MGMT_PAGE, B53_IG_MIR_CTL, ®); 2499 if (!(reg & MIRROR_MASK)) 2500 other_loc_disable = true; 2501 2502 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®); 2503 /* Both no longer have ports, let's disable mirroring */ 2504 if (loc_disable && other_loc_disable) { 2505 reg &= ~MIRROR_EN; 2506 reg &= ~mirror->to_local_port; 2507 } 2508 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg); 2509 } 2510 EXPORT_SYMBOL(b53_mirror_del); 2511 2512 /* Returns 0 if EEE was not enabled, or 1 otherwise 2513 */ 2514 int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy) 2515 { 2516 int ret; 2517 2518 if (!b53_support_eee(ds, port)) 2519 return 0; 2520 2521 ret = phy_init_eee(phy, false); 2522 if (ret) 2523 return 0; 2524 2525 b53_eee_enable_set(ds, port, true); 2526 2527 return 1; 2528 } 2529 EXPORT_SYMBOL(b53_eee_init); 2530 2531 bool b53_support_eee(struct dsa_switch *ds, int port) 2532 { 2533 struct b53_device *dev = ds->priv; 2534 2535 return !is5325(dev) && !is5365(dev) && !is63xx(dev); 2536 } 2537 EXPORT_SYMBOL(b53_support_eee); 2538 2539 int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e) 2540 { 2541 struct b53_device *dev = ds->priv; 2542 struct ethtool_keee *p = &dev->ports[port].eee; 2543 2544 p->eee_enabled = e->eee_enabled; 2545 b53_eee_enable_set(ds, port, e->eee_enabled); 2546 2547 return 0; 2548 } 2549 EXPORT_SYMBOL(b53_set_mac_eee); 2550 2551 static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu) 2552 { 2553 struct b53_device *dev = ds->priv; 2554 bool enable_jumbo; 2555 bool allow_10_100; 2556 2557 if (is5325(dev) || is5365(dev)) 2558 return 0; 2559 2560 if (!dsa_is_cpu_port(ds, port)) 2561 return 0; 2562 2563 enable_jumbo = (mtu > ETH_DATA_LEN); 2564 allow_10_100 = !is63xx(dev); 2565 2566 return b53_set_jumbo(dev, enable_jumbo, allow_10_100); 2567 } 2568 2569 static int b53_get_max_mtu(struct dsa_switch *ds, int port) 2570 { 2571 struct b53_device *dev = ds->priv; 2572 2573 if (is5325(dev) || is5365(dev)) 2574 return B53_MAX_MTU_25; 2575 2576 return B53_MAX_MTU; 2577 } 2578 2579 int b53_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) 2580 { 2581 struct b53_device *dev = ds->priv; 2582 u32 atc; 2583 int reg; 2584 2585 if (is63xx(dev)) 2586 reg = B53_AGING_TIME_CONTROL_63XX; 2587 else 2588 reg = B53_AGING_TIME_CONTROL; 2589 2590 if (dev->chip_id == BCM53101_DEVICE_ID) 2591 atc = DIV_ROUND_CLOSEST(msecs, 500); 2592 else 2593 atc = DIV_ROUND_CLOSEST(msecs, 1000); 2594 2595 if (!is5325(dev) && !is5365(dev)) 2596 atc |= AGE_CHANGE; 2597 2598 b53_write32(dev, B53_MGMT_PAGE, reg, atc); 2599 2600 return 0; 2601 } 2602 EXPORT_SYMBOL_GPL(b53_set_ageing_time); 2603 2604 static const struct phylink_mac_ops b53_phylink_mac_ops = { 2605 .mac_select_pcs = b53_phylink_mac_select_pcs, 2606 .mac_config = b53_phylink_mac_config, 2607 .mac_link_down = b53_phylink_mac_link_down, 2608 .mac_link_up = b53_phylink_mac_link_up, 2609 }; 2610 2611 static const struct dsa_switch_ops b53_switch_ops = { 2612 .get_tag_protocol = b53_get_tag_protocol, 2613 .setup = b53_setup, 2614 .teardown = b53_teardown, 2615 .get_strings = b53_get_strings, 2616 .get_ethtool_stats = b53_get_ethtool_stats, 2617 .get_sset_count = b53_get_sset_count, 2618 .get_ethtool_phy_stats = b53_get_ethtool_phy_stats, 2619 .phy_read = b53_phy_read16, 2620 .phy_write = b53_phy_write16, 2621 .phylink_get_caps = b53_phylink_get_caps, 2622 .port_setup = b53_setup_port, 2623 .port_enable = b53_enable_port, 2624 .port_disable = b53_disable_port, 2625 .support_eee = b53_support_eee, 2626 .set_mac_eee = b53_set_mac_eee, 2627 .set_ageing_time = b53_set_ageing_time, 2628 .port_bridge_join = b53_br_join, 2629 .port_bridge_leave = b53_br_leave, 2630 .port_pre_bridge_flags = b53_br_flags_pre, 2631 .port_bridge_flags = b53_br_flags, 2632 .port_stp_state_set = b53_br_set_stp_state, 2633 .port_fast_age = b53_br_fast_age, 2634 .port_vlan_filtering = b53_vlan_filtering, 2635 .port_vlan_add = b53_vlan_add, 2636 .port_vlan_del = b53_vlan_del, 2637 .port_fdb_dump = b53_fdb_dump, 2638 .port_fdb_add = b53_fdb_add, 2639 .port_fdb_del = b53_fdb_del, 2640 .port_mirror_add = b53_mirror_add, 2641 .port_mirror_del = b53_mirror_del, 2642 .port_mdb_add = b53_mdb_add, 2643 .port_mdb_del = b53_mdb_del, 2644 .port_max_mtu = b53_get_max_mtu, 2645 .port_change_mtu = b53_change_mtu, 2646 }; 2647 2648 struct b53_chip_data { 2649 u32 chip_id; 2650 const char *dev_name; 2651 u16 vlans; 2652 u16 enabled_ports; 2653 u8 imp_port; 2654 u8 cpu_port; 2655 u8 vta_regs[3]; 2656 u8 arl_bins; 2657 u16 arl_buckets; 2658 u8 duplex_reg; 2659 u8 jumbo_pm_reg; 2660 u8 jumbo_size_reg; 2661 }; 2662 2663 #define B53_VTA_REGS \ 2664 { B53_VT_ACCESS, B53_VT_INDEX, B53_VT_ENTRY } 2665 #define B53_VTA_REGS_9798 \ 2666 { B53_VT_ACCESS_9798, B53_VT_INDEX_9798, B53_VT_ENTRY_9798 } 2667 #define B53_VTA_REGS_63XX \ 2668 { B53_VT_ACCESS_63XX, B53_VT_INDEX_63XX, B53_VT_ENTRY_63XX } 2669 2670 static const struct b53_chip_data b53_switch_chips[] = { 2671 { 2672 .chip_id = BCM5325_DEVICE_ID, 2673 .dev_name = "BCM5325", 2674 .vlans = 16, 2675 .enabled_ports = 0x3f, 2676 .arl_bins = 2, 2677 .arl_buckets = 1024, 2678 .imp_port = 5, 2679 .duplex_reg = B53_DUPLEX_STAT_FE, 2680 }, 2681 { 2682 .chip_id = BCM5365_DEVICE_ID, 2683 .dev_name = "BCM5365", 2684 .vlans = 256, 2685 .enabled_ports = 0x3f, 2686 .arl_bins = 2, 2687 .arl_buckets = 1024, 2688 .imp_port = 5, 2689 .duplex_reg = B53_DUPLEX_STAT_FE, 2690 }, 2691 { 2692 .chip_id = BCM5389_DEVICE_ID, 2693 .dev_name = "BCM5389", 2694 .vlans = 4096, 2695 .enabled_ports = 0x11f, 2696 .arl_bins = 4, 2697 .arl_buckets = 1024, 2698 .imp_port = 8, 2699 .vta_regs = B53_VTA_REGS, 2700 .duplex_reg = B53_DUPLEX_STAT_GE, 2701 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2702 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2703 }, 2704 { 2705 .chip_id = BCM5395_DEVICE_ID, 2706 .dev_name = "BCM5395", 2707 .vlans = 4096, 2708 .enabled_ports = 0x11f, 2709 .arl_bins = 4, 2710 .arl_buckets = 1024, 2711 .imp_port = 8, 2712 .vta_regs = B53_VTA_REGS, 2713 .duplex_reg = B53_DUPLEX_STAT_GE, 2714 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2715 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2716 }, 2717 { 2718 .chip_id = BCM5397_DEVICE_ID, 2719 .dev_name = "BCM5397", 2720 .vlans = 4096, 2721 .enabled_ports = 0x11f, 2722 .arl_bins = 4, 2723 .arl_buckets = 1024, 2724 .imp_port = 8, 2725 .vta_regs = B53_VTA_REGS_9798, 2726 .duplex_reg = B53_DUPLEX_STAT_GE, 2727 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2728 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2729 }, 2730 { 2731 .chip_id = BCM5398_DEVICE_ID, 2732 .dev_name = "BCM5398", 2733 .vlans = 4096, 2734 .enabled_ports = 0x17f, 2735 .arl_bins = 4, 2736 .arl_buckets = 1024, 2737 .imp_port = 8, 2738 .vta_regs = B53_VTA_REGS_9798, 2739 .duplex_reg = B53_DUPLEX_STAT_GE, 2740 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2741 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2742 }, 2743 { 2744 .chip_id = BCM53101_DEVICE_ID, 2745 .dev_name = "BCM53101", 2746 .vlans = 4096, 2747 .enabled_ports = 0x11f, 2748 .arl_bins = 4, 2749 .arl_buckets = 512, 2750 .vta_regs = B53_VTA_REGS, 2751 .imp_port = 8, 2752 .duplex_reg = B53_DUPLEX_STAT_GE, 2753 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2754 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2755 }, 2756 { 2757 .chip_id = BCM53115_DEVICE_ID, 2758 .dev_name = "BCM53115", 2759 .vlans = 4096, 2760 .enabled_ports = 0x11f, 2761 .arl_bins = 4, 2762 .arl_buckets = 1024, 2763 .vta_regs = B53_VTA_REGS, 2764 .imp_port = 8, 2765 .duplex_reg = B53_DUPLEX_STAT_GE, 2766 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2767 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2768 }, 2769 { 2770 .chip_id = BCM53125_DEVICE_ID, 2771 .dev_name = "BCM53125", 2772 .vlans = 4096, 2773 .enabled_ports = 0x1ff, 2774 .arl_bins = 4, 2775 .arl_buckets = 1024, 2776 .imp_port = 8, 2777 .vta_regs = B53_VTA_REGS, 2778 .duplex_reg = B53_DUPLEX_STAT_GE, 2779 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2780 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2781 }, 2782 { 2783 .chip_id = BCM53128_DEVICE_ID, 2784 .dev_name = "BCM53128", 2785 .vlans = 4096, 2786 .enabled_ports = 0x1ff, 2787 .arl_bins = 4, 2788 .arl_buckets = 1024, 2789 .imp_port = 8, 2790 .vta_regs = B53_VTA_REGS, 2791 .duplex_reg = B53_DUPLEX_STAT_GE, 2792 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2793 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2794 }, 2795 { 2796 .chip_id = BCM63XX_DEVICE_ID, 2797 .dev_name = "BCM63xx", 2798 .vlans = 4096, 2799 .enabled_ports = 0, /* pdata must provide them */ 2800 .arl_bins = 4, 2801 .arl_buckets = 1024, 2802 .imp_port = 8, 2803 .vta_regs = B53_VTA_REGS_63XX, 2804 .duplex_reg = B53_DUPLEX_STAT_63XX, 2805 .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX, 2806 .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX, 2807 }, 2808 { 2809 .chip_id = BCM53010_DEVICE_ID, 2810 .dev_name = "BCM53010", 2811 .vlans = 4096, 2812 .enabled_ports = 0x1bf, 2813 .arl_bins = 4, 2814 .arl_buckets = 1024, 2815 .imp_port = 8, 2816 .vta_regs = B53_VTA_REGS, 2817 .duplex_reg = B53_DUPLEX_STAT_GE, 2818 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2819 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2820 }, 2821 { 2822 .chip_id = BCM53011_DEVICE_ID, 2823 .dev_name = "BCM53011", 2824 .vlans = 4096, 2825 .enabled_ports = 0x1bf, 2826 .arl_bins = 4, 2827 .arl_buckets = 1024, 2828 .imp_port = 8, 2829 .vta_regs = B53_VTA_REGS, 2830 .duplex_reg = B53_DUPLEX_STAT_GE, 2831 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2832 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2833 }, 2834 { 2835 .chip_id = BCM53012_DEVICE_ID, 2836 .dev_name = "BCM53012", 2837 .vlans = 4096, 2838 .enabled_ports = 0x1bf, 2839 .arl_bins = 4, 2840 .arl_buckets = 1024, 2841 .imp_port = 8, 2842 .vta_regs = B53_VTA_REGS, 2843 .duplex_reg = B53_DUPLEX_STAT_GE, 2844 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2845 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2846 }, 2847 { 2848 .chip_id = BCM53018_DEVICE_ID, 2849 .dev_name = "BCM53018", 2850 .vlans = 4096, 2851 .enabled_ports = 0x1bf, 2852 .arl_bins = 4, 2853 .arl_buckets = 1024, 2854 .imp_port = 8, 2855 .vta_regs = B53_VTA_REGS, 2856 .duplex_reg = B53_DUPLEX_STAT_GE, 2857 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2858 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2859 }, 2860 { 2861 .chip_id = BCM53019_DEVICE_ID, 2862 .dev_name = "BCM53019", 2863 .vlans = 4096, 2864 .enabled_ports = 0x1bf, 2865 .arl_bins = 4, 2866 .arl_buckets = 1024, 2867 .imp_port = 8, 2868 .vta_regs = B53_VTA_REGS, 2869 .duplex_reg = B53_DUPLEX_STAT_GE, 2870 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2871 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2872 }, 2873 { 2874 .chip_id = BCM58XX_DEVICE_ID, 2875 .dev_name = "BCM585xx/586xx/88312", 2876 .vlans = 4096, 2877 .enabled_ports = 0x1ff, 2878 .arl_bins = 4, 2879 .arl_buckets = 1024, 2880 .imp_port = 8, 2881 .vta_regs = B53_VTA_REGS, 2882 .duplex_reg = B53_DUPLEX_STAT_GE, 2883 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2884 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2885 }, 2886 { 2887 .chip_id = BCM583XX_DEVICE_ID, 2888 .dev_name = "BCM583xx/11360", 2889 .vlans = 4096, 2890 .enabled_ports = 0x103, 2891 .arl_bins = 4, 2892 .arl_buckets = 1024, 2893 .imp_port = 8, 2894 .vta_regs = B53_VTA_REGS, 2895 .duplex_reg = B53_DUPLEX_STAT_GE, 2896 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2897 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2898 }, 2899 /* Starfighter 2 */ 2900 { 2901 .chip_id = BCM4908_DEVICE_ID, 2902 .dev_name = "BCM4908", 2903 .vlans = 4096, 2904 .enabled_ports = 0x1bf, 2905 .arl_bins = 4, 2906 .arl_buckets = 256, 2907 .imp_port = 8, 2908 .vta_regs = B53_VTA_REGS, 2909 .duplex_reg = B53_DUPLEX_STAT_GE, 2910 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2911 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2912 }, 2913 { 2914 .chip_id = BCM7445_DEVICE_ID, 2915 .dev_name = "BCM7445", 2916 .vlans = 4096, 2917 .enabled_ports = 0x1ff, 2918 .arl_bins = 4, 2919 .arl_buckets = 1024, 2920 .imp_port = 8, 2921 .vta_regs = B53_VTA_REGS, 2922 .duplex_reg = B53_DUPLEX_STAT_GE, 2923 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2924 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2925 }, 2926 { 2927 .chip_id = BCM7278_DEVICE_ID, 2928 .dev_name = "BCM7278", 2929 .vlans = 4096, 2930 .enabled_ports = 0x1ff, 2931 .arl_bins = 4, 2932 .arl_buckets = 256, 2933 .imp_port = 8, 2934 .vta_regs = B53_VTA_REGS, 2935 .duplex_reg = B53_DUPLEX_STAT_GE, 2936 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2937 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2938 }, 2939 { 2940 .chip_id = BCM53134_DEVICE_ID, 2941 .dev_name = "BCM53134", 2942 .vlans = 4096, 2943 .enabled_ports = 0x12f, 2944 .imp_port = 8, 2945 .cpu_port = B53_CPU_PORT, 2946 .vta_regs = B53_VTA_REGS, 2947 .arl_bins = 4, 2948 .arl_buckets = 1024, 2949 .duplex_reg = B53_DUPLEX_STAT_GE, 2950 .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 2951 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2952 }, 2953 }; 2954 2955 static int b53_switch_init(struct b53_device *dev) 2956 { 2957 u32 chip_id = dev->chip_id; 2958 unsigned int i; 2959 int ret; 2960 2961 if (is63xx(dev)) 2962 chip_id = BCM63XX_DEVICE_ID; 2963 2964 for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) { 2965 const struct b53_chip_data *chip = &b53_switch_chips[i]; 2966 2967 if (chip->chip_id == chip_id) { 2968 if (!dev->enabled_ports) 2969 dev->enabled_ports = chip->enabled_ports; 2970 dev->name = chip->dev_name; 2971 dev->duplex_reg = chip->duplex_reg; 2972 dev->vta_regs[0] = chip->vta_regs[0]; 2973 dev->vta_regs[1] = chip->vta_regs[1]; 2974 dev->vta_regs[2] = chip->vta_regs[2]; 2975 dev->jumbo_pm_reg = chip->jumbo_pm_reg; 2976 dev->imp_port = chip->imp_port; 2977 dev->num_vlans = chip->vlans; 2978 dev->num_arl_bins = chip->arl_bins; 2979 dev->num_arl_buckets = chip->arl_buckets; 2980 break; 2981 } 2982 } 2983 2984 /* check which BCM5325x version we have */ 2985 if (is5325(dev)) { 2986 u8 vc4; 2987 2988 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4); 2989 2990 /* check reserved bits */ 2991 switch (vc4 & 3) { 2992 case 1: 2993 /* BCM5325E */ 2994 break; 2995 case 3: 2996 /* BCM5325F - do not use port 4 */ 2997 dev->enabled_ports &= ~BIT(4); 2998 break; 2999 default: 3000 /* On the BCM47XX SoCs this is the supported internal switch.*/ 3001 #ifndef CONFIG_BCM47XX 3002 /* BCM5325M */ 3003 return -EINVAL; 3004 #else 3005 break; 3006 #endif 3007 } 3008 } 3009 3010 if (is5325e(dev)) 3011 dev->num_arl_buckets = 512; 3012 3013 dev->num_ports = fls(dev->enabled_ports); 3014 3015 dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS); 3016 3017 /* Include non standard CPU port built-in PHYs to be probed */ 3018 if (is539x(dev) || is531x5(dev)) { 3019 for (i = 0; i < dev->num_ports; i++) { 3020 if (!(dev->ds->phys_mii_mask & BIT(i)) && 3021 !b53_possible_cpu_port(dev->ds, i)) 3022 dev->ds->phys_mii_mask |= BIT(i); 3023 } 3024 } 3025 3026 dev->ports = devm_kcalloc(dev->dev, 3027 dev->num_ports, sizeof(struct b53_port), 3028 GFP_KERNEL); 3029 if (!dev->ports) 3030 return -ENOMEM; 3031 3032 dev->vlans = devm_kcalloc(dev->dev, 3033 dev->num_vlans, sizeof(struct b53_vlan), 3034 GFP_KERNEL); 3035 if (!dev->vlans) 3036 return -ENOMEM; 3037 3038 dev->reset_gpio = b53_switch_get_reset_gpio(dev); 3039 if (dev->reset_gpio >= 0) { 3040 ret = devm_gpio_request_one(dev->dev, dev->reset_gpio, 3041 GPIOF_OUT_INIT_HIGH, "robo_reset"); 3042 if (ret) 3043 return ret; 3044 } 3045 3046 return 0; 3047 } 3048 3049 struct b53_device *b53_switch_alloc(struct device *base, 3050 const struct b53_io_ops *ops, 3051 void *priv) 3052 { 3053 struct dsa_switch *ds; 3054 struct b53_device *dev; 3055 3056 ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL); 3057 if (!ds) 3058 return NULL; 3059 3060 ds->dev = base; 3061 3062 dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL); 3063 if (!dev) 3064 return NULL; 3065 3066 ds->priv = dev; 3067 dev->dev = base; 3068 3069 dev->ds = ds; 3070 dev->priv = priv; 3071 dev->ops = ops; 3072 ds->ops = &b53_switch_ops; 3073 ds->phylink_mac_ops = &b53_phylink_mac_ops; 3074 dev->vlan_enabled = true; 3075 dev->vlan_filtering = false; 3076 /* Let DSA handle the case were multiple bridges span the same switch 3077 * device and different VLAN awareness settings are requested, which 3078 * would be breaking filtering semantics for any of the other bridge 3079 * devices. (not hardware supported) 3080 */ 3081 ds->vlan_filtering_is_global = true; 3082 3083 mutex_init(&dev->reg_mutex); 3084 mutex_init(&dev->stats_mutex); 3085 mutex_init(&dev->arl_mutex); 3086 3087 return dev; 3088 } 3089 EXPORT_SYMBOL(b53_switch_alloc); 3090 3091 int b53_switch_detect(struct b53_device *dev) 3092 { 3093 u32 id32; 3094 u16 tmp; 3095 u8 id8; 3096 int ret; 3097 3098 ret = b53_read8(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id8); 3099 if (ret) 3100 return ret; 3101 3102 switch (id8) { 3103 case 0: 3104 /* BCM5325 and BCM5365 do not have this register so reads 3105 * return 0. But the read operation did succeed, so assume this 3106 * is one of them. 3107 * 3108 * Next check if we can write to the 5325's VTA register; for 3109 * 5365 it is read only. 3110 */ 3111 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf); 3112 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp); 3113 3114 if (tmp == 0xf) { 3115 u32 phy_id; 3116 int val; 3117 3118 dev->chip_id = BCM5325_DEVICE_ID; 3119 3120 val = b53_phy_read16(dev->ds, 0, MII_PHYSID1); 3121 phy_id = (val & 0xffff) << 16; 3122 val = b53_phy_read16(dev->ds, 0, MII_PHYSID2); 3123 phy_id |= (val & 0xfff0); 3124 3125 if (phy_id == 0x00406330) 3126 dev->variant_id = B53_VARIANT_5325M; 3127 else if (phy_id == 0x0143bc30) 3128 dev->variant_id = B53_VARIANT_5325E; 3129 } else { 3130 dev->chip_id = BCM5365_DEVICE_ID; 3131 } 3132 break; 3133 case BCM5389_DEVICE_ID: 3134 case BCM5395_DEVICE_ID: 3135 case BCM5397_DEVICE_ID: 3136 case BCM5398_DEVICE_ID: 3137 dev->chip_id = id8; 3138 break; 3139 default: 3140 ret = b53_read32(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id32); 3141 if (ret) 3142 return ret; 3143 3144 switch (id32) { 3145 case BCM53101_DEVICE_ID: 3146 case BCM53115_DEVICE_ID: 3147 case BCM53125_DEVICE_ID: 3148 case BCM53128_DEVICE_ID: 3149 case BCM53010_DEVICE_ID: 3150 case BCM53011_DEVICE_ID: 3151 case BCM53012_DEVICE_ID: 3152 case BCM53018_DEVICE_ID: 3153 case BCM53019_DEVICE_ID: 3154 case BCM53134_DEVICE_ID: 3155 dev->chip_id = id32; 3156 break; 3157 default: 3158 dev_err(dev->dev, 3159 "unsupported switch detected (BCM53%02x/BCM%x)\n", 3160 id8, id32); 3161 return -ENODEV; 3162 } 3163 } 3164 3165 if (dev->chip_id == BCM5325_DEVICE_ID) 3166 return b53_read8(dev, B53_STAT_PAGE, B53_REV_ID_25, 3167 &dev->core_rev); 3168 else 3169 return b53_read8(dev, B53_MGMT_PAGE, B53_REV_ID, 3170 &dev->core_rev); 3171 } 3172 EXPORT_SYMBOL(b53_switch_detect); 3173 3174 int b53_switch_register(struct b53_device *dev) 3175 { 3176 int ret; 3177 3178 if (dev->pdata) { 3179 dev->chip_id = dev->pdata->chip_id; 3180 dev->enabled_ports = dev->pdata->enabled_ports; 3181 } 3182 3183 if (!dev->chip_id && b53_switch_detect(dev)) 3184 return -EINVAL; 3185 3186 ret = b53_switch_init(dev); 3187 if (ret) 3188 return ret; 3189 3190 dev_info(dev->dev, "found switch: %s, rev %i\n", 3191 dev->name, dev->core_rev); 3192 3193 return dsa_register_switch(dev->ds); 3194 } 3195 EXPORT_SYMBOL(b53_switch_register); 3196 3197 MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>"); 3198 MODULE_DESCRIPTION("B53 switch library"); 3199 MODULE_LICENSE("Dual BSD/GPL"); 3200