1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Lantiq / Intel GSWIP switch driver for VRX200, xRX300 and xRX330 SoCs 4 * 5 * Copyright (C) 2010 Lantiq Deutschland 6 * Copyright (C) 2012 John Crispin <john@phrozen.org> 7 * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de> 8 * 9 * The VLAN and bridge model the GSWIP hardware uses does not directly 10 * matches the model DSA uses. 11 * 12 * The hardware has 64 possible table entries for bridges with one VLAN 13 * ID, one flow id and a list of ports for each bridge. All entries which 14 * match the same flow ID are combined in the mac learning table, they 15 * act as one global bridge. 16 * The hardware does not support VLAN filter on the port, but on the 17 * bridge, this driver converts the DSA model to the hardware. 18 * 19 * The CPU gets all the exception frames which do not match any forwarding 20 * rule and the CPU port is also added to all bridges. This makes it possible 21 * to handle all the special cases easily in software. 22 * At the initialization the driver allocates one bridge table entry for 23 * each switch port which is used when the port is used without an 24 * explicit bridge. This prevents the frames from being forwarded 25 * between all LAN ports by default. 26 */ 27 28 #include "lantiq_gswip.h" 29 #include "lantiq_pce.h" 30 31 #include <linux/delay.h> 32 #include <linux/etherdevice.h> 33 #include <linux/firmware.h> 34 #include <linux/if_bridge.h> 35 #include <linux/if_vlan.h> 36 #include <linux/iopoll.h> 37 #include <linux/mfd/syscon.h> 38 #include <linux/module.h> 39 #include <linux/of_mdio.h> 40 #include <linux/of_net.h> 41 #include <linux/of_platform.h> 42 #include <linux/phy.h> 43 #include <linux/phylink.h> 44 #include <dt-bindings/mips/lantiq_rcu_gphy.h> 45 46 struct xway_gphy_match_data { 47 char *fe_firmware_name; 48 char *ge_firmware_name; 49 }; 50 51 struct gswip_pce_table_entry { 52 u16 index; // PCE_TBL_ADDR.ADDR = pData->table_index 53 u16 table; // PCE_TBL_CTRL.ADDR = pData->table 54 u16 key[8]; 55 u16 val[5]; 56 u16 mask; 57 u8 gmap; 58 bool type; 59 bool valid; 60 bool key_mode; 61 }; 62 63 struct gswip_rmon_cnt_desc { 64 unsigned int size; 65 unsigned int offset; 66 const char *name; 67 }; 68 69 #define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name} 70 71 static const struct gswip_rmon_cnt_desc gswip_rmon_cnt[] = { 72 /** Receive Packet Count (only packets that are accepted and not discarded). */ 73 MIB_DESC(1, 0x1F, "RxGoodPkts"), 74 MIB_DESC(1, 0x23, "RxUnicastPkts"), 75 MIB_DESC(1, 0x22, "RxMulticastPkts"), 76 MIB_DESC(1, 0x21, "RxFCSErrorPkts"), 77 MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"), 78 MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"), 79 MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"), 80 MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"), 81 MIB_DESC(1, 0x20, "RxGoodPausePkts"), 82 MIB_DESC(1, 0x1A, "RxAlignErrorPkts"), 83 MIB_DESC(1, 0x12, "Rx64BytePkts"), 84 MIB_DESC(1, 0x13, "Rx127BytePkts"), 85 MIB_DESC(1, 0x14, "Rx255BytePkts"), 86 MIB_DESC(1, 0x15, "Rx511BytePkts"), 87 MIB_DESC(1, 0x16, "Rx1023BytePkts"), 88 /** Receive Size 1024-1522 (or more, if configured) Packet Count. */ 89 MIB_DESC(1, 0x17, "RxMaxBytePkts"), 90 MIB_DESC(1, 0x18, "RxDroppedPkts"), 91 MIB_DESC(1, 0x19, "RxFilteredPkts"), 92 MIB_DESC(2, 0x24, "RxGoodBytes"), 93 MIB_DESC(2, 0x26, "RxBadBytes"), 94 MIB_DESC(1, 0x11, "TxAcmDroppedPkts"), 95 MIB_DESC(1, 0x0C, "TxGoodPkts"), 96 MIB_DESC(1, 0x06, "TxUnicastPkts"), 97 MIB_DESC(1, 0x07, "TxMulticastPkts"), 98 MIB_DESC(1, 0x00, "Tx64BytePkts"), 99 MIB_DESC(1, 0x01, "Tx127BytePkts"), 100 MIB_DESC(1, 0x02, "Tx255BytePkts"), 101 MIB_DESC(1, 0x03, "Tx511BytePkts"), 102 MIB_DESC(1, 0x04, "Tx1023BytePkts"), 103 /** Transmit Size 1024-1522 (or more, if configured) Packet Count. */ 104 MIB_DESC(1, 0x05, "TxMaxBytePkts"), 105 MIB_DESC(1, 0x08, "TxSingleCollCount"), 106 MIB_DESC(1, 0x09, "TxMultCollCount"), 107 MIB_DESC(1, 0x0A, "TxLateCollCount"), 108 MIB_DESC(1, 0x0B, "TxExcessCollCount"), 109 MIB_DESC(1, 0x0D, "TxPauseCount"), 110 MIB_DESC(1, 0x10, "TxDroppedPkts"), 111 MIB_DESC(2, 0x0E, "TxGoodBytes"), 112 }; 113 114 static u32 gswip_switch_r(struct gswip_priv *priv, u32 offset) 115 { 116 return __raw_readl(priv->gswip + (offset * 4)); 117 } 118 119 static void gswip_switch_w(struct gswip_priv *priv, u32 val, u32 offset) 120 { 121 __raw_writel(val, priv->gswip + (offset * 4)); 122 } 123 124 static void gswip_switch_mask(struct gswip_priv *priv, u32 clear, u32 set, 125 u32 offset) 126 { 127 u32 val = gswip_switch_r(priv, offset); 128 129 val &= ~(clear); 130 val |= set; 131 gswip_switch_w(priv, val, offset); 132 } 133 134 static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset, 135 u32 cleared) 136 { 137 u32 val; 138 139 return readx_poll_timeout(__raw_readl, priv->gswip + (offset * 4), val, 140 (val & cleared) == 0, 20, 50000); 141 } 142 143 static u32 gswip_mdio_r(struct gswip_priv *priv, u32 offset) 144 { 145 return __raw_readl(priv->mdio + (offset * 4)); 146 } 147 148 static void gswip_mdio_w(struct gswip_priv *priv, u32 val, u32 offset) 149 { 150 __raw_writel(val, priv->mdio + (offset * 4)); 151 } 152 153 static void gswip_mdio_mask(struct gswip_priv *priv, u32 clear, u32 set, 154 u32 offset) 155 { 156 u32 val = gswip_mdio_r(priv, offset); 157 158 val &= ~(clear); 159 val |= set; 160 gswip_mdio_w(priv, val, offset); 161 } 162 163 static u32 gswip_mii_r(struct gswip_priv *priv, u32 offset) 164 { 165 return __raw_readl(priv->mii + (offset * 4)); 166 } 167 168 static void gswip_mii_w(struct gswip_priv *priv, u32 val, u32 offset) 169 { 170 __raw_writel(val, priv->mii + (offset * 4)); 171 } 172 173 static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set, 174 u32 offset) 175 { 176 u32 val = gswip_mii_r(priv, offset); 177 178 val &= ~(clear); 179 val |= set; 180 gswip_mii_w(priv, val, offset); 181 } 182 183 static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set, 184 int port) 185 { 186 int reg_port; 187 188 /* MII_CFG register only exists for MII ports */ 189 if (!(priv->hw_info->mii_ports & BIT(port))) 190 return; 191 192 reg_port = port + priv->hw_info->mii_port_reg_offset; 193 194 gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(reg_port)); 195 } 196 197 static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set, 198 int port) 199 { 200 int reg_port; 201 202 /* MII_PCDU register only exists for MII ports */ 203 if (!(priv->hw_info->mii_ports & BIT(port))) 204 return; 205 206 reg_port = port + priv->hw_info->mii_port_reg_offset; 207 208 switch (reg_port) { 209 case 0: 210 gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU0); 211 break; 212 case 1: 213 gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU1); 214 break; 215 case 5: 216 gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU5); 217 break; 218 } 219 } 220 221 static int gswip_mdio_poll(struct gswip_priv *priv) 222 { 223 int cnt = 100; 224 225 while (likely(cnt--)) { 226 u32 ctrl = gswip_mdio_r(priv, GSWIP_MDIO_CTRL); 227 228 if ((ctrl & GSWIP_MDIO_CTRL_BUSY) == 0) 229 return 0; 230 usleep_range(20, 40); 231 } 232 233 return -ETIMEDOUT; 234 } 235 236 static int gswip_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val) 237 { 238 struct gswip_priv *priv = bus->priv; 239 int err; 240 241 err = gswip_mdio_poll(priv); 242 if (err) { 243 dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n"); 244 return err; 245 } 246 247 gswip_mdio_w(priv, val, GSWIP_MDIO_WRITE); 248 gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR | 249 ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) | 250 (reg & GSWIP_MDIO_CTRL_REGAD_MASK), 251 GSWIP_MDIO_CTRL); 252 253 return 0; 254 } 255 256 static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg) 257 { 258 struct gswip_priv *priv = bus->priv; 259 int err; 260 261 err = gswip_mdio_poll(priv); 262 if (err) { 263 dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n"); 264 return err; 265 } 266 267 gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD | 268 ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) | 269 (reg & GSWIP_MDIO_CTRL_REGAD_MASK), 270 GSWIP_MDIO_CTRL); 271 272 err = gswip_mdio_poll(priv); 273 if (err) { 274 dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n"); 275 return err; 276 } 277 278 return gswip_mdio_r(priv, GSWIP_MDIO_READ); 279 } 280 281 static int gswip_mdio(struct gswip_priv *priv) 282 { 283 struct device_node *mdio_np, *switch_np = priv->dev->of_node; 284 struct device *dev = priv->dev; 285 struct mii_bus *bus; 286 int err = 0; 287 288 mdio_np = of_get_compatible_child(switch_np, "lantiq,xrx200-mdio"); 289 if (!mdio_np) 290 mdio_np = of_get_child_by_name(switch_np, "mdio"); 291 292 if (!of_device_is_available(mdio_np)) 293 goto out_put_node; 294 295 bus = devm_mdiobus_alloc(dev); 296 if (!bus) { 297 err = -ENOMEM; 298 goto out_put_node; 299 } 300 301 bus->priv = priv; 302 bus->read = gswip_mdio_rd; 303 bus->write = gswip_mdio_wr; 304 bus->name = "lantiq,xrx200-mdio"; 305 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev)); 306 bus->parent = priv->dev; 307 308 err = devm_of_mdiobus_register(dev, bus, mdio_np); 309 310 out_put_node: 311 of_node_put(mdio_np); 312 313 return err; 314 } 315 316 static int gswip_pce_table_entry_read(struct gswip_priv *priv, 317 struct gswip_pce_table_entry *tbl) 318 { 319 int i; 320 int err; 321 u16 crtl; 322 u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD : 323 GSWIP_PCE_TBL_CTRL_OPMOD_ADRD; 324 325 mutex_lock(&priv->pce_table_lock); 326 327 err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, 328 GSWIP_PCE_TBL_CTRL_BAS); 329 if (err) { 330 mutex_unlock(&priv->pce_table_lock); 331 return err; 332 } 333 334 gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR); 335 gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | 336 GSWIP_PCE_TBL_CTRL_OPMOD_MASK, 337 tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS, 338 GSWIP_PCE_TBL_CTRL); 339 340 err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, 341 GSWIP_PCE_TBL_CTRL_BAS); 342 if (err) { 343 mutex_unlock(&priv->pce_table_lock); 344 return err; 345 } 346 347 for (i = 0; i < ARRAY_SIZE(tbl->key); i++) 348 tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i)); 349 350 for (i = 0; i < ARRAY_SIZE(tbl->val); i++) 351 tbl->val[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_VAL(i)); 352 353 tbl->mask = gswip_switch_r(priv, GSWIP_PCE_TBL_MASK); 354 355 crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL); 356 357 tbl->type = !!(crtl & GSWIP_PCE_TBL_CTRL_TYPE); 358 tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD); 359 tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7; 360 361 mutex_unlock(&priv->pce_table_lock); 362 363 return 0; 364 } 365 366 static int gswip_pce_table_entry_write(struct gswip_priv *priv, 367 struct gswip_pce_table_entry *tbl) 368 { 369 int i; 370 int err; 371 u16 crtl; 372 u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR : 373 GSWIP_PCE_TBL_CTRL_OPMOD_ADWR; 374 375 mutex_lock(&priv->pce_table_lock); 376 377 err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, 378 GSWIP_PCE_TBL_CTRL_BAS); 379 if (err) { 380 mutex_unlock(&priv->pce_table_lock); 381 return err; 382 } 383 384 gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR); 385 gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | 386 GSWIP_PCE_TBL_CTRL_OPMOD_MASK, 387 tbl->table | addr_mode, 388 GSWIP_PCE_TBL_CTRL); 389 390 for (i = 0; i < ARRAY_SIZE(tbl->key); i++) 391 gswip_switch_w(priv, tbl->key[i], GSWIP_PCE_TBL_KEY(i)); 392 393 for (i = 0; i < ARRAY_SIZE(tbl->val); i++) 394 gswip_switch_w(priv, tbl->val[i], GSWIP_PCE_TBL_VAL(i)); 395 396 gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | 397 GSWIP_PCE_TBL_CTRL_OPMOD_MASK, 398 tbl->table | addr_mode, 399 GSWIP_PCE_TBL_CTRL); 400 401 gswip_switch_w(priv, tbl->mask, GSWIP_PCE_TBL_MASK); 402 403 crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL); 404 crtl &= ~(GSWIP_PCE_TBL_CTRL_TYPE | GSWIP_PCE_TBL_CTRL_VLD | 405 GSWIP_PCE_TBL_CTRL_GMAP_MASK); 406 if (tbl->type) 407 crtl |= GSWIP_PCE_TBL_CTRL_TYPE; 408 if (tbl->valid) 409 crtl |= GSWIP_PCE_TBL_CTRL_VLD; 410 crtl |= (tbl->gmap << 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK; 411 crtl |= GSWIP_PCE_TBL_CTRL_BAS; 412 gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL); 413 414 err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, 415 GSWIP_PCE_TBL_CTRL_BAS); 416 417 mutex_unlock(&priv->pce_table_lock); 418 419 return err; 420 } 421 422 /* Add the LAN port into a bridge with the CPU port by 423 * default. This prevents automatic forwarding of 424 * packages between the LAN ports when no explicit 425 * bridge is configured. 426 */ 427 static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add) 428 { 429 struct gswip_pce_table_entry vlan_active = {0,}; 430 struct gswip_pce_table_entry vlan_mapping = {0,}; 431 int err; 432 433 vlan_active.index = port + 1; 434 vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN; 435 vlan_active.key[0] = 0; /* vid */ 436 vlan_active.val[0] = port + 1 /* fid */; 437 vlan_active.valid = add; 438 err = gswip_pce_table_entry_write(priv, &vlan_active); 439 if (err) { 440 dev_err(priv->dev, "failed to write active VLAN: %d\n", err); 441 return err; 442 } 443 444 if (!add) 445 return 0; 446 447 vlan_mapping.index = port + 1; 448 vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; 449 vlan_mapping.val[0] = 0 /* vid */; 450 vlan_mapping.val[1] = BIT(port) | dsa_cpu_ports(priv->ds); 451 vlan_mapping.val[2] = 0; 452 err = gswip_pce_table_entry_write(priv, &vlan_mapping); 453 if (err) { 454 dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err); 455 return err; 456 } 457 458 return 0; 459 } 460 461 static int gswip_port_enable(struct dsa_switch *ds, int port, 462 struct phy_device *phydev) 463 { 464 struct gswip_priv *priv = ds->priv; 465 int err; 466 467 if (!dsa_is_cpu_port(ds, port)) { 468 u32 mdio_phy = 0; 469 470 err = gswip_add_single_port_br(priv, port, true); 471 if (err) 472 return err; 473 474 if (phydev) 475 mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK; 476 477 gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy, 478 GSWIP_MDIO_PHYp(port)); 479 } 480 481 /* RMON Counter Enable for port */ 482 gswip_switch_w(priv, GSWIP_BM_PCFG_CNTEN, GSWIP_BM_PCFGp(port)); 483 484 /* enable port fetch/store dma & VLAN Modification */ 485 gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_EN | 486 GSWIP_FDMA_PCTRL_VLANMOD_BOTH, 487 GSWIP_FDMA_PCTRLp(port)); 488 gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN, 489 GSWIP_SDMA_PCTRLp(port)); 490 491 return 0; 492 } 493 494 static void gswip_port_disable(struct dsa_switch *ds, int port) 495 { 496 struct gswip_priv *priv = ds->priv; 497 498 gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0, 499 GSWIP_FDMA_PCTRLp(port)); 500 gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0, 501 GSWIP_SDMA_PCTRLp(port)); 502 } 503 504 static int gswip_pce_load_microcode(struct gswip_priv *priv) 505 { 506 int i; 507 int err; 508 509 gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | 510 GSWIP_PCE_TBL_CTRL_OPMOD_MASK, 511 GSWIP_PCE_TBL_CTRL_OPMOD_ADWR, GSWIP_PCE_TBL_CTRL); 512 gswip_switch_w(priv, 0, GSWIP_PCE_TBL_MASK); 513 514 for (i = 0; i < priv->hw_info->pce_microcode_size; i++) { 515 gswip_switch_w(priv, i, GSWIP_PCE_TBL_ADDR); 516 gswip_switch_w(priv, (*priv->hw_info->pce_microcode)[i].val_0, 517 GSWIP_PCE_TBL_VAL(0)); 518 gswip_switch_w(priv, (*priv->hw_info->pce_microcode)[i].val_1, 519 GSWIP_PCE_TBL_VAL(1)); 520 gswip_switch_w(priv, (*priv->hw_info->pce_microcode)[i].val_2, 521 GSWIP_PCE_TBL_VAL(2)); 522 gswip_switch_w(priv, (*priv->hw_info->pce_microcode)[i].val_3, 523 GSWIP_PCE_TBL_VAL(3)); 524 525 /* start the table access: */ 526 gswip_switch_mask(priv, 0, GSWIP_PCE_TBL_CTRL_BAS, 527 GSWIP_PCE_TBL_CTRL); 528 err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, 529 GSWIP_PCE_TBL_CTRL_BAS); 530 if (err) 531 return err; 532 } 533 534 /* tell the switch that the microcode is loaded */ 535 gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MC_VALID, 536 GSWIP_PCE_GCTRL_0); 537 538 return 0; 539 } 540 541 static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port, 542 bool vlan_filtering, 543 struct netlink_ext_ack *extack) 544 { 545 struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); 546 struct gswip_priv *priv = ds->priv; 547 548 /* Do not allow changing the VLAN filtering options while in bridge */ 549 if (bridge && !!(priv->port_vlan_filter & BIT(port)) != vlan_filtering) { 550 NL_SET_ERR_MSG_MOD(extack, 551 "Dynamic toggling of vlan_filtering not supported"); 552 return -EIO; 553 } 554 555 if (vlan_filtering) { 556 /* Use tag based VLAN */ 557 gswip_switch_mask(priv, 558 GSWIP_PCE_VCTRL_VSR, 559 GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR | 560 GSWIP_PCE_VCTRL_VEMR, 561 GSWIP_PCE_VCTRL(port)); 562 gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_TVM, 0, 563 GSWIP_PCE_PCTRL_0p(port)); 564 } else { 565 /* Use port based VLAN */ 566 gswip_switch_mask(priv, 567 GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR | 568 GSWIP_PCE_VCTRL_VEMR, 569 GSWIP_PCE_VCTRL_VSR, 570 GSWIP_PCE_VCTRL(port)); 571 gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_TVM, 572 GSWIP_PCE_PCTRL_0p(port)); 573 } 574 575 return 0; 576 } 577 578 static int gswip_setup(struct dsa_switch *ds) 579 { 580 unsigned int cpu_ports = dsa_cpu_ports(ds); 581 struct gswip_priv *priv = ds->priv; 582 struct dsa_port *cpu_dp; 583 int err, i; 584 585 gswip_switch_w(priv, GSWIP_SWRES_R0, GSWIP_SWRES); 586 usleep_range(5000, 10000); 587 gswip_switch_w(priv, 0, GSWIP_SWRES); 588 589 /* disable port fetch/store dma on all ports */ 590 for (i = 0; i < priv->hw_info->max_ports; i++) { 591 gswip_port_disable(ds, i); 592 gswip_port_vlan_filtering(ds, i, false, NULL); 593 } 594 595 /* enable Switch */ 596 gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB); 597 598 err = gswip_pce_load_microcode(priv); 599 if (err) { 600 dev_err(priv->dev, "writing PCE microcode failed, %i\n", err); 601 return err; 602 } 603 604 /* Default unknown Broadcast/Multicast/Unicast port maps */ 605 gswip_switch_w(priv, cpu_ports, GSWIP_PCE_PMAP1); 606 gswip_switch_w(priv, cpu_ports, GSWIP_PCE_PMAP2); 607 gswip_switch_w(priv, cpu_ports, GSWIP_PCE_PMAP3); 608 609 /* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an 610 * interoperability problem with this auto polling mechanism because 611 * their status registers think that the link is in a different state 612 * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set 613 * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the 614 * auto polling state machine consider the link being negotiated with 615 * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads 616 * to the switch port being completely dead (RX and TX are both not 617 * working). 618 * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F 619 * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes 620 * it would work fine for a few minutes to hours and then stop, on 621 * other device it would no traffic could be sent or received at all. 622 * Testing shows that when PHY auto polling is disabled these problems 623 * go away. 624 */ 625 gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0); 626 627 /* Configure the MDIO Clock 2.5 MHz */ 628 gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1); 629 630 /* bring up the mdio bus */ 631 err = gswip_mdio(priv); 632 if (err) { 633 dev_err(priv->dev, "mdio bus setup failed\n"); 634 return err; 635 } 636 637 /* Disable the xMII interface and clear it's isolation bit */ 638 for (i = 0; i < priv->hw_info->max_ports; i++) 639 gswip_mii_mask_cfg(priv, 640 GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE, 641 0, i); 642 643 dsa_switch_for_each_cpu_port(cpu_dp, ds) { 644 /* enable special tag insertion on cpu port */ 645 gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN, 646 GSWIP_FDMA_PCTRLp(cpu_dp->index)); 647 648 /* accept special tag in ingress direction */ 649 gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS, 650 GSWIP_PCE_PCTRL_0p(cpu_dp->index)); 651 } 652 653 gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD, 654 GSWIP_BM_QUEUE_GCTRL); 655 656 /* VLAN aware Switching */ 657 gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_VLAN, GSWIP_PCE_GCTRL_0); 658 659 /* Flush MAC Table */ 660 gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MTFL, GSWIP_PCE_GCTRL_0); 661 662 err = gswip_switch_r_timeout(priv, GSWIP_PCE_GCTRL_0, 663 GSWIP_PCE_GCTRL_0_MTFL); 664 if (err) { 665 dev_err(priv->dev, "MAC flushing didn't finish\n"); 666 return err; 667 } 668 669 ds->mtu_enforcement_ingress = true; 670 671 ds->configure_vlan_while_not_filtering = false; 672 673 return 0; 674 } 675 676 static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds, 677 int port, 678 enum dsa_tag_protocol mp) 679 { 680 struct gswip_priv *priv = ds->priv; 681 682 return priv->hw_info->tag_protocol; 683 } 684 685 static int gswip_vlan_active_create(struct gswip_priv *priv, 686 struct net_device *bridge, 687 int fid, u16 vid) 688 { 689 struct gswip_pce_table_entry vlan_active = {0,}; 690 unsigned int max_ports = priv->hw_info->max_ports; 691 int idx = -1; 692 int err; 693 int i; 694 695 /* Look for a free slot */ 696 for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { 697 if (!priv->vlans[i].bridge) { 698 idx = i; 699 break; 700 } 701 } 702 703 if (idx == -1) 704 return -ENOSPC; 705 706 if (fid == -1) 707 fid = idx; 708 709 vlan_active.index = idx; 710 vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN; 711 vlan_active.key[0] = vid; 712 vlan_active.val[0] = fid; 713 vlan_active.valid = true; 714 715 err = gswip_pce_table_entry_write(priv, &vlan_active); 716 if (err) { 717 dev_err(priv->dev, "failed to write active VLAN: %d\n", err); 718 return err; 719 } 720 721 priv->vlans[idx].bridge = bridge; 722 priv->vlans[idx].vid = vid; 723 priv->vlans[idx].fid = fid; 724 725 return idx; 726 } 727 728 static int gswip_vlan_active_remove(struct gswip_priv *priv, int idx) 729 { 730 struct gswip_pce_table_entry vlan_active = {0,}; 731 int err; 732 733 vlan_active.index = idx; 734 vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN; 735 vlan_active.valid = false; 736 err = gswip_pce_table_entry_write(priv, &vlan_active); 737 if (err) 738 dev_err(priv->dev, "failed to delete active VLAN: %d\n", err); 739 priv->vlans[idx].bridge = NULL; 740 741 return err; 742 } 743 744 static int gswip_vlan_add_unaware(struct gswip_priv *priv, 745 struct net_device *bridge, int port) 746 { 747 struct gswip_pce_table_entry vlan_mapping = {0,}; 748 unsigned int max_ports = priv->hw_info->max_ports; 749 bool active_vlan_created = false; 750 int idx = -1; 751 int i; 752 int err; 753 754 /* Check if there is already a page for this bridge */ 755 for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { 756 if (priv->vlans[i].bridge == bridge) { 757 idx = i; 758 break; 759 } 760 } 761 762 /* If this bridge is not programmed yet, add a Active VLAN table 763 * entry in a free slot and prepare the VLAN mapping table entry. 764 */ 765 if (idx == -1) { 766 idx = gswip_vlan_active_create(priv, bridge, -1, 0); 767 if (idx < 0) 768 return idx; 769 active_vlan_created = true; 770 771 vlan_mapping.index = idx; 772 vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; 773 /* VLAN ID byte, maps to the VLAN ID of vlan active table */ 774 vlan_mapping.val[0] = 0; 775 } else { 776 /* Read the existing VLAN mapping entry from the switch */ 777 vlan_mapping.index = idx; 778 vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; 779 err = gswip_pce_table_entry_read(priv, &vlan_mapping); 780 if (err) { 781 dev_err(priv->dev, "failed to read VLAN mapping: %d\n", 782 err); 783 return err; 784 } 785 } 786 787 /* Update the VLAN mapping entry and write it to the switch */ 788 vlan_mapping.val[1] |= dsa_cpu_ports(priv->ds); 789 vlan_mapping.val[1] |= BIT(port); 790 err = gswip_pce_table_entry_write(priv, &vlan_mapping); 791 if (err) { 792 dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err); 793 /* In case an Active VLAN was creaetd delete it again */ 794 if (active_vlan_created) 795 gswip_vlan_active_remove(priv, idx); 796 return err; 797 } 798 799 gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port)); 800 return 0; 801 } 802 803 static int gswip_vlan_add_aware(struct gswip_priv *priv, 804 struct net_device *bridge, int port, 805 u16 vid, bool untagged, 806 bool pvid) 807 { 808 struct gswip_pce_table_entry vlan_mapping = {0,}; 809 unsigned int max_ports = priv->hw_info->max_ports; 810 unsigned int cpu_ports = dsa_cpu_ports(priv->ds); 811 bool active_vlan_created = false; 812 int idx = -1; 813 int fid = -1; 814 int i; 815 int err; 816 817 /* Check if there is already a page for this bridge */ 818 for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { 819 if (priv->vlans[i].bridge == bridge) { 820 if (fid != -1 && fid != priv->vlans[i].fid) 821 dev_err(priv->dev, "one bridge with multiple flow ids\n"); 822 fid = priv->vlans[i].fid; 823 if (priv->vlans[i].vid == vid) { 824 idx = i; 825 break; 826 } 827 } 828 } 829 830 /* If this bridge is not programmed yet, add a Active VLAN table 831 * entry in a free slot and prepare the VLAN mapping table entry. 832 */ 833 if (idx == -1) { 834 idx = gswip_vlan_active_create(priv, bridge, fid, vid); 835 if (idx < 0) 836 return idx; 837 active_vlan_created = true; 838 839 vlan_mapping.index = idx; 840 vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; 841 /* VLAN ID byte, maps to the VLAN ID of vlan active table */ 842 vlan_mapping.val[0] = vid; 843 } else { 844 /* Read the existing VLAN mapping entry from the switch */ 845 vlan_mapping.index = idx; 846 vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; 847 err = gswip_pce_table_entry_read(priv, &vlan_mapping); 848 if (err) { 849 dev_err(priv->dev, "failed to read VLAN mapping: %d\n", 850 err); 851 return err; 852 } 853 } 854 855 vlan_mapping.val[0] = vid; 856 /* Update the VLAN mapping entry and write it to the switch */ 857 vlan_mapping.val[1] |= cpu_ports; 858 vlan_mapping.val[2] |= cpu_ports; 859 vlan_mapping.val[1] |= BIT(port); 860 if (untagged) 861 vlan_mapping.val[2] &= ~BIT(port); 862 else 863 vlan_mapping.val[2] |= BIT(port); 864 err = gswip_pce_table_entry_write(priv, &vlan_mapping); 865 if (err) { 866 dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err); 867 /* In case an Active VLAN was creaetd delete it again */ 868 if (active_vlan_created) 869 gswip_vlan_active_remove(priv, idx); 870 return err; 871 } 872 873 if (pvid) 874 gswip_switch_w(priv, idx, GSWIP_PCE_DEFPVID(port)); 875 876 return 0; 877 } 878 879 static int gswip_vlan_remove(struct gswip_priv *priv, 880 struct net_device *bridge, int port, 881 u16 vid, bool pvid, bool vlan_aware) 882 { 883 struct gswip_pce_table_entry vlan_mapping = {0,}; 884 unsigned int max_ports = priv->hw_info->max_ports; 885 int idx = -1; 886 int i; 887 int err; 888 889 /* Check if there is already a page for this bridge */ 890 for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { 891 if (priv->vlans[i].bridge == bridge && 892 (!vlan_aware || priv->vlans[i].vid == vid)) { 893 idx = i; 894 break; 895 } 896 } 897 898 if (idx == -1) { 899 dev_err(priv->dev, "bridge to leave does not exists\n"); 900 return -ENOENT; 901 } 902 903 vlan_mapping.index = idx; 904 vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; 905 err = gswip_pce_table_entry_read(priv, &vlan_mapping); 906 if (err) { 907 dev_err(priv->dev, "failed to read VLAN mapping: %d\n", err); 908 return err; 909 } 910 911 vlan_mapping.val[1] &= ~BIT(port); 912 vlan_mapping.val[2] &= ~BIT(port); 913 err = gswip_pce_table_entry_write(priv, &vlan_mapping); 914 if (err) { 915 dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err); 916 return err; 917 } 918 919 /* In case all ports are removed from the bridge, remove the VLAN */ 920 if (!(vlan_mapping.val[1] & ~dsa_cpu_ports(priv->ds))) { 921 err = gswip_vlan_active_remove(priv, idx); 922 if (err) { 923 dev_err(priv->dev, "failed to write active VLAN: %d\n", 924 err); 925 return err; 926 } 927 } 928 929 /* GSWIP 2.2 (GRX300) and later program here the VID directly. */ 930 if (pvid) 931 gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port)); 932 933 return 0; 934 } 935 936 static int gswip_port_bridge_join(struct dsa_switch *ds, int port, 937 struct dsa_bridge bridge, 938 bool *tx_fwd_offload, 939 struct netlink_ext_ack *extack) 940 { 941 struct net_device *br = bridge.dev; 942 struct gswip_priv *priv = ds->priv; 943 int err; 944 945 /* When the bridge uses VLAN filtering we have to configure VLAN 946 * specific bridges. No bridge is configured here. 947 */ 948 if (!br_vlan_enabled(br)) { 949 err = gswip_vlan_add_unaware(priv, br, port); 950 if (err) 951 return err; 952 priv->port_vlan_filter &= ~BIT(port); 953 } else { 954 priv->port_vlan_filter |= BIT(port); 955 } 956 return gswip_add_single_port_br(priv, port, false); 957 } 958 959 static void gswip_port_bridge_leave(struct dsa_switch *ds, int port, 960 struct dsa_bridge bridge) 961 { 962 struct net_device *br = bridge.dev; 963 struct gswip_priv *priv = ds->priv; 964 965 gswip_add_single_port_br(priv, port, true); 966 967 /* When the bridge uses VLAN filtering we have to configure VLAN 968 * specific bridges. No bridge is configured here. 969 */ 970 if (!br_vlan_enabled(br)) 971 gswip_vlan_remove(priv, br, port, 0, true, false); 972 } 973 974 static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port, 975 const struct switchdev_obj_port_vlan *vlan, 976 struct netlink_ext_ack *extack) 977 { 978 struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); 979 struct gswip_priv *priv = ds->priv; 980 unsigned int max_ports = priv->hw_info->max_ports; 981 int pos = max_ports; 982 int i, idx = -1; 983 984 /* We only support VLAN filtering on bridges */ 985 if (!dsa_is_cpu_port(ds, port) && !bridge) 986 return -EOPNOTSUPP; 987 988 /* Check if there is already a page for this VLAN */ 989 for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { 990 if (priv->vlans[i].bridge == bridge && 991 priv->vlans[i].vid == vlan->vid) { 992 idx = i; 993 break; 994 } 995 } 996 997 /* If this VLAN is not programmed yet, we have to reserve 998 * one entry in the VLAN table. Make sure we start at the 999 * next position round. 1000 */ 1001 if (idx == -1) { 1002 /* Look for a free slot */ 1003 for (; pos < ARRAY_SIZE(priv->vlans); pos++) { 1004 if (!priv->vlans[pos].bridge) { 1005 idx = pos; 1006 pos++; 1007 break; 1008 } 1009 } 1010 1011 if (idx == -1) { 1012 NL_SET_ERR_MSG_MOD(extack, "No slot in VLAN table"); 1013 return -ENOSPC; 1014 } 1015 } 1016 1017 return 0; 1018 } 1019 1020 static int gswip_port_vlan_add(struct dsa_switch *ds, int port, 1021 const struct switchdev_obj_port_vlan *vlan, 1022 struct netlink_ext_ack *extack) 1023 { 1024 struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); 1025 struct gswip_priv *priv = ds->priv; 1026 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1027 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 1028 int err; 1029 1030 err = gswip_port_vlan_prepare(ds, port, vlan, extack); 1031 if (err) 1032 return err; 1033 1034 /* We have to receive all packets on the CPU port and should not 1035 * do any VLAN filtering here. This is also called with bridge 1036 * NULL and then we do not know for which bridge to configure 1037 * this. 1038 */ 1039 if (dsa_is_cpu_port(ds, port)) 1040 return 0; 1041 1042 return gswip_vlan_add_aware(priv, bridge, port, vlan->vid, 1043 untagged, pvid); 1044 } 1045 1046 static int gswip_port_vlan_del(struct dsa_switch *ds, int port, 1047 const struct switchdev_obj_port_vlan *vlan) 1048 { 1049 struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); 1050 struct gswip_priv *priv = ds->priv; 1051 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 1052 1053 /* We have to receive all packets on the CPU port and should not 1054 * do any VLAN filtering here. This is also called with bridge 1055 * NULL and then we do not know for which bridge to configure 1056 * this. 1057 */ 1058 if (dsa_is_cpu_port(ds, port)) 1059 return 0; 1060 1061 return gswip_vlan_remove(priv, bridge, port, vlan->vid, pvid, true); 1062 } 1063 1064 static void gswip_port_fast_age(struct dsa_switch *ds, int port) 1065 { 1066 struct gswip_priv *priv = ds->priv; 1067 struct gswip_pce_table_entry mac_bridge = {0,}; 1068 int i; 1069 int err; 1070 1071 for (i = 0; i < 2048; i++) { 1072 mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE; 1073 mac_bridge.index = i; 1074 1075 err = gswip_pce_table_entry_read(priv, &mac_bridge); 1076 if (err) { 1077 dev_err(priv->dev, "failed to read mac bridge: %d\n", 1078 err); 1079 return; 1080 } 1081 1082 if (!mac_bridge.valid) 1083 continue; 1084 1085 if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC) 1086 continue; 1087 1088 if (port != FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT, 1089 mac_bridge.val[0])) 1090 continue; 1091 1092 mac_bridge.valid = false; 1093 err = gswip_pce_table_entry_write(priv, &mac_bridge); 1094 if (err) { 1095 dev_err(priv->dev, "failed to write mac bridge: %d\n", 1096 err); 1097 return; 1098 } 1099 } 1100 } 1101 1102 static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) 1103 { 1104 struct gswip_priv *priv = ds->priv; 1105 u32 stp_state; 1106 1107 switch (state) { 1108 case BR_STATE_DISABLED: 1109 gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0, 1110 GSWIP_SDMA_PCTRLp(port)); 1111 return; 1112 case BR_STATE_BLOCKING: 1113 case BR_STATE_LISTENING: 1114 stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LISTEN; 1115 break; 1116 case BR_STATE_LEARNING: 1117 stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LEARNING; 1118 break; 1119 case BR_STATE_FORWARDING: 1120 stp_state = GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING; 1121 break; 1122 default: 1123 dev_err(priv->dev, "invalid STP state: %d\n", state); 1124 return; 1125 } 1126 1127 gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN, 1128 GSWIP_SDMA_PCTRLp(port)); 1129 gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_PSTATE_MASK, stp_state, 1130 GSWIP_PCE_PCTRL_0p(port)); 1131 } 1132 1133 static int gswip_port_fdb(struct dsa_switch *ds, int port, 1134 const unsigned char *addr, u16 vid, bool add) 1135 { 1136 struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); 1137 struct gswip_priv *priv = ds->priv; 1138 struct gswip_pce_table_entry mac_bridge = {0,}; 1139 unsigned int max_ports = priv->hw_info->max_ports; 1140 int fid = -1; 1141 int i; 1142 int err; 1143 1144 if (!bridge) 1145 return -EINVAL; 1146 1147 for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { 1148 if (priv->vlans[i].bridge == bridge) { 1149 fid = priv->vlans[i].fid; 1150 break; 1151 } 1152 } 1153 1154 if (fid == -1) { 1155 dev_err(priv->dev, "no FID found for bridge %s\n", 1156 bridge->name); 1157 return -EINVAL; 1158 } 1159 1160 mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE; 1161 mac_bridge.key_mode = true; 1162 mac_bridge.key[0] = addr[5] | (addr[4] << 8); 1163 mac_bridge.key[1] = addr[3] | (addr[2] << 8); 1164 mac_bridge.key[2] = addr[1] | (addr[0] << 8); 1165 mac_bridge.key[3] = FIELD_PREP(GSWIP_TABLE_MAC_BRIDGE_KEY3_FID, fid); 1166 mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */ 1167 mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC; 1168 mac_bridge.valid = add; 1169 1170 err = gswip_pce_table_entry_write(priv, &mac_bridge); 1171 if (err) 1172 dev_err(priv->dev, "failed to write mac bridge: %d\n", err); 1173 1174 return err; 1175 } 1176 1177 static int gswip_port_fdb_add(struct dsa_switch *ds, int port, 1178 const unsigned char *addr, u16 vid, 1179 struct dsa_db db) 1180 { 1181 return gswip_port_fdb(ds, port, addr, vid, true); 1182 } 1183 1184 static int gswip_port_fdb_del(struct dsa_switch *ds, int port, 1185 const unsigned char *addr, u16 vid, 1186 struct dsa_db db) 1187 { 1188 return gswip_port_fdb(ds, port, addr, vid, false); 1189 } 1190 1191 static int gswip_port_fdb_dump(struct dsa_switch *ds, int port, 1192 dsa_fdb_dump_cb_t *cb, void *data) 1193 { 1194 struct gswip_priv *priv = ds->priv; 1195 struct gswip_pce_table_entry mac_bridge = {0,}; 1196 unsigned char addr[ETH_ALEN]; 1197 int i; 1198 int err; 1199 1200 for (i = 0; i < 2048; i++) { 1201 mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE; 1202 mac_bridge.index = i; 1203 1204 err = gswip_pce_table_entry_read(priv, &mac_bridge); 1205 if (err) { 1206 dev_err(priv->dev, 1207 "failed to read mac bridge entry %d: %d\n", 1208 i, err); 1209 return err; 1210 } 1211 1212 if (!mac_bridge.valid) 1213 continue; 1214 1215 addr[5] = mac_bridge.key[0] & 0xff; 1216 addr[4] = (mac_bridge.key[0] >> 8) & 0xff; 1217 addr[3] = mac_bridge.key[1] & 0xff; 1218 addr[2] = (mac_bridge.key[1] >> 8) & 0xff; 1219 addr[1] = mac_bridge.key[2] & 0xff; 1220 addr[0] = (mac_bridge.key[2] >> 8) & 0xff; 1221 if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC) { 1222 if (mac_bridge.val[0] & BIT(port)) { 1223 err = cb(addr, 0, true, data); 1224 if (err) 1225 return err; 1226 } 1227 } else { 1228 if (port == FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT, 1229 mac_bridge.val[0])) { 1230 err = cb(addr, 0, false, data); 1231 if (err) 1232 return err; 1233 } 1234 } 1235 } 1236 return 0; 1237 } 1238 1239 static int gswip_port_max_mtu(struct dsa_switch *ds, int port) 1240 { 1241 /* Includes 8 bytes for special header. */ 1242 return GSWIP_MAX_PACKET_LENGTH - VLAN_ETH_HLEN - ETH_FCS_LEN; 1243 } 1244 1245 static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) 1246 { 1247 struct gswip_priv *priv = ds->priv; 1248 1249 /* CPU port always has maximum mtu of user ports, so use it to set 1250 * switch frame size, including 8 byte special header. 1251 */ 1252 if (dsa_is_cpu_port(ds, port)) { 1253 new_mtu += 8; 1254 gswip_switch_w(priv, VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN, 1255 GSWIP_MAC_FLEN); 1256 } 1257 1258 /* Enable MLEN for ports with non-standard MTUs, including the special 1259 * header on the CPU port added above. 1260 */ 1261 if (new_mtu != ETH_DATA_LEN) 1262 gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN, 1263 GSWIP_MAC_CTRL_2p(port)); 1264 else 1265 gswip_switch_mask(priv, GSWIP_MAC_CTRL_2_MLEN, 0, 1266 GSWIP_MAC_CTRL_2p(port)); 1267 1268 return 0; 1269 } 1270 1271 static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port, 1272 struct phylink_config *config) 1273 { 1274 switch (port) { 1275 case 0: 1276 case 1: 1277 phy_interface_set_rgmii(config->supported_interfaces); 1278 __set_bit(PHY_INTERFACE_MODE_MII, 1279 config->supported_interfaces); 1280 __set_bit(PHY_INTERFACE_MODE_REVMII, 1281 config->supported_interfaces); 1282 __set_bit(PHY_INTERFACE_MODE_RMII, 1283 config->supported_interfaces); 1284 break; 1285 1286 case 2: 1287 case 3: 1288 case 4: 1289 case 6: 1290 __set_bit(PHY_INTERFACE_MODE_INTERNAL, 1291 config->supported_interfaces); 1292 break; 1293 1294 case 5: 1295 phy_interface_set_rgmii(config->supported_interfaces); 1296 __set_bit(PHY_INTERFACE_MODE_INTERNAL, 1297 config->supported_interfaces); 1298 break; 1299 } 1300 1301 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | 1302 MAC_10 | MAC_100 | MAC_1000; 1303 } 1304 1305 static void gswip_xrx300_phylink_get_caps(struct dsa_switch *ds, int port, 1306 struct phylink_config *config) 1307 { 1308 switch (port) { 1309 case 0: 1310 phy_interface_set_rgmii(config->supported_interfaces); 1311 __set_bit(PHY_INTERFACE_MODE_GMII, 1312 config->supported_interfaces); 1313 __set_bit(PHY_INTERFACE_MODE_RMII, 1314 config->supported_interfaces); 1315 break; 1316 1317 case 1: 1318 case 2: 1319 case 3: 1320 case 4: 1321 case 6: 1322 __set_bit(PHY_INTERFACE_MODE_INTERNAL, 1323 config->supported_interfaces); 1324 break; 1325 1326 case 5: 1327 phy_interface_set_rgmii(config->supported_interfaces); 1328 __set_bit(PHY_INTERFACE_MODE_INTERNAL, 1329 config->supported_interfaces); 1330 __set_bit(PHY_INTERFACE_MODE_RMII, 1331 config->supported_interfaces); 1332 break; 1333 } 1334 1335 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | 1336 MAC_10 | MAC_100 | MAC_1000; 1337 } 1338 1339 static void gswip_phylink_get_caps(struct dsa_switch *ds, int port, 1340 struct phylink_config *config) 1341 { 1342 struct gswip_priv *priv = ds->priv; 1343 1344 priv->hw_info->phylink_get_caps(ds, port, config); 1345 } 1346 1347 static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link) 1348 { 1349 u32 mdio_phy; 1350 1351 if (link) 1352 mdio_phy = GSWIP_MDIO_PHY_LINK_UP; 1353 else 1354 mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN; 1355 1356 gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_MASK, mdio_phy, 1357 GSWIP_MDIO_PHYp(port)); 1358 } 1359 1360 static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed, 1361 phy_interface_t interface) 1362 { 1363 u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0; 1364 1365 switch (speed) { 1366 case SPEED_10: 1367 mdio_phy = GSWIP_MDIO_PHY_SPEED_M10; 1368 1369 if (interface == PHY_INTERFACE_MODE_RMII) 1370 mii_cfg = GSWIP_MII_CFG_RATE_M50; 1371 else 1372 mii_cfg = GSWIP_MII_CFG_RATE_M2P5; 1373 1374 mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII; 1375 break; 1376 1377 case SPEED_100: 1378 mdio_phy = GSWIP_MDIO_PHY_SPEED_M100; 1379 1380 if (interface == PHY_INTERFACE_MODE_RMII) 1381 mii_cfg = GSWIP_MII_CFG_RATE_M50; 1382 else 1383 mii_cfg = GSWIP_MII_CFG_RATE_M25; 1384 1385 mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII; 1386 break; 1387 1388 case SPEED_1000: 1389 mdio_phy = GSWIP_MDIO_PHY_SPEED_G1; 1390 1391 mii_cfg = GSWIP_MII_CFG_RATE_M125; 1392 1393 mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII; 1394 break; 1395 } 1396 1397 gswip_mdio_mask(priv, GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy, 1398 GSWIP_MDIO_PHYp(port)); 1399 gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port); 1400 gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0, 1401 GSWIP_MAC_CTRL_0p(port)); 1402 } 1403 1404 static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex) 1405 { 1406 u32 mac_ctrl_0, mdio_phy; 1407 1408 if (duplex == DUPLEX_FULL) { 1409 mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN; 1410 mdio_phy = GSWIP_MDIO_PHY_FDUP_EN; 1411 } else { 1412 mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS; 1413 mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS; 1414 } 1415 1416 gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0, 1417 GSWIP_MAC_CTRL_0p(port)); 1418 gswip_mdio_mask(priv, GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy, 1419 GSWIP_MDIO_PHYp(port)); 1420 } 1421 1422 static void gswip_port_set_pause(struct gswip_priv *priv, int port, 1423 bool tx_pause, bool rx_pause) 1424 { 1425 u32 mac_ctrl_0, mdio_phy; 1426 1427 if (tx_pause && rx_pause) { 1428 mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX; 1429 mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN | 1430 GSWIP_MDIO_PHY_FCONRX_EN; 1431 } else if (tx_pause) { 1432 mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX; 1433 mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN | 1434 GSWIP_MDIO_PHY_FCONRX_DIS; 1435 } else if (rx_pause) { 1436 mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX; 1437 mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS | 1438 GSWIP_MDIO_PHY_FCONRX_EN; 1439 } else { 1440 mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE; 1441 mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS | 1442 GSWIP_MDIO_PHY_FCONRX_DIS; 1443 } 1444 1445 gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FCON_MASK, 1446 mac_ctrl_0, GSWIP_MAC_CTRL_0p(port)); 1447 gswip_mdio_mask(priv, 1448 GSWIP_MDIO_PHY_FCONTX_MASK | 1449 GSWIP_MDIO_PHY_FCONRX_MASK, 1450 mdio_phy, GSWIP_MDIO_PHYp(port)); 1451 } 1452 1453 static void gswip_phylink_mac_config(struct phylink_config *config, 1454 unsigned int mode, 1455 const struct phylink_link_state *state) 1456 { 1457 struct dsa_port *dp = dsa_phylink_to_port(config); 1458 struct gswip_priv *priv = dp->ds->priv; 1459 int port = dp->index; 1460 u32 miicfg = 0; 1461 1462 miicfg |= GSWIP_MII_CFG_LDCLKDIS; 1463 1464 switch (state->interface) { 1465 case PHY_INTERFACE_MODE_SGMII: 1466 case PHY_INTERFACE_MODE_1000BASEX: 1467 case PHY_INTERFACE_MODE_2500BASEX: 1468 return; 1469 case PHY_INTERFACE_MODE_MII: 1470 case PHY_INTERFACE_MODE_INTERNAL: 1471 miicfg |= GSWIP_MII_CFG_MODE_MIIM; 1472 break; 1473 case PHY_INTERFACE_MODE_REVMII: 1474 miicfg |= GSWIP_MII_CFG_MODE_MIIP; 1475 break; 1476 case PHY_INTERFACE_MODE_RMII: 1477 miicfg |= GSWIP_MII_CFG_MODE_RMIIM; 1478 break; 1479 case PHY_INTERFACE_MODE_RGMII: 1480 case PHY_INTERFACE_MODE_RGMII_ID: 1481 case PHY_INTERFACE_MODE_RGMII_RXID: 1482 case PHY_INTERFACE_MODE_RGMII_TXID: 1483 miicfg |= GSWIP_MII_CFG_MODE_RGMII; 1484 break; 1485 case PHY_INTERFACE_MODE_GMII: 1486 miicfg |= GSWIP_MII_CFG_MODE_GMII; 1487 break; 1488 default: 1489 dev_err(dp->ds->dev, 1490 "Unsupported interface: %d\n", state->interface); 1491 return; 1492 } 1493 1494 gswip_mii_mask_cfg(priv, 1495 GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK | 1496 GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS, 1497 miicfg, port); 1498 1499 switch (state->interface) { 1500 case PHY_INTERFACE_MODE_RGMII_ID: 1501 gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK | 1502 GSWIP_MII_PCDU_RXDLY_MASK, 0, port); 1503 break; 1504 case PHY_INTERFACE_MODE_RGMII_RXID: 1505 gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_RXDLY_MASK, 0, port); 1506 break; 1507 case PHY_INTERFACE_MODE_RGMII_TXID: 1508 gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK, 0, port); 1509 break; 1510 default: 1511 break; 1512 } 1513 } 1514 1515 static void gswip_phylink_mac_link_down(struct phylink_config *config, 1516 unsigned int mode, 1517 phy_interface_t interface) 1518 { 1519 struct dsa_port *dp = dsa_phylink_to_port(config); 1520 struct gswip_priv *priv = dp->ds->priv; 1521 int port = dp->index; 1522 1523 gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port); 1524 1525 if (!dsa_port_is_cpu(dp)) 1526 gswip_port_set_link(priv, port, false); 1527 } 1528 1529 static void gswip_phylink_mac_link_up(struct phylink_config *config, 1530 struct phy_device *phydev, 1531 unsigned int mode, 1532 phy_interface_t interface, 1533 int speed, int duplex, 1534 bool tx_pause, bool rx_pause) 1535 { 1536 struct dsa_port *dp = dsa_phylink_to_port(config); 1537 struct gswip_priv *priv = dp->ds->priv; 1538 int port = dp->index; 1539 1540 if (!dsa_port_is_cpu(dp)) { 1541 gswip_port_set_link(priv, port, true); 1542 gswip_port_set_speed(priv, port, speed, interface); 1543 gswip_port_set_duplex(priv, port, duplex); 1544 gswip_port_set_pause(priv, port, tx_pause, rx_pause); 1545 } 1546 1547 gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port); 1548 } 1549 1550 static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset, 1551 uint8_t *data) 1552 { 1553 int i; 1554 1555 if (stringset != ETH_SS_STATS) 1556 return; 1557 1558 for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) 1559 ethtool_puts(&data, gswip_rmon_cnt[i].name); 1560 } 1561 1562 static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table, 1563 u32 index) 1564 { 1565 u32 result; 1566 int err; 1567 1568 gswip_switch_w(priv, index, GSWIP_BM_RAM_ADDR); 1569 gswip_switch_mask(priv, GSWIP_BM_RAM_CTRL_ADDR_MASK | 1570 GSWIP_BM_RAM_CTRL_OPMOD, 1571 table | GSWIP_BM_RAM_CTRL_BAS, 1572 GSWIP_BM_RAM_CTRL); 1573 1574 err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL, 1575 GSWIP_BM_RAM_CTRL_BAS); 1576 if (err) { 1577 dev_err(priv->dev, "timeout while reading table: %u, index: %u\n", 1578 table, index); 1579 return 0; 1580 } 1581 1582 result = gswip_switch_r(priv, GSWIP_BM_RAM_VAL(0)); 1583 result |= gswip_switch_r(priv, GSWIP_BM_RAM_VAL(1)) << 16; 1584 1585 return result; 1586 } 1587 1588 static void gswip_get_ethtool_stats(struct dsa_switch *ds, int port, 1589 uint64_t *data) 1590 { 1591 struct gswip_priv *priv = ds->priv; 1592 const struct gswip_rmon_cnt_desc *rmon_cnt; 1593 int i; 1594 u64 high; 1595 1596 for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) { 1597 rmon_cnt = &gswip_rmon_cnt[i]; 1598 1599 data[i] = gswip_bcm_ram_entry_read(priv, port, 1600 rmon_cnt->offset); 1601 if (rmon_cnt->size == 2) { 1602 high = gswip_bcm_ram_entry_read(priv, port, 1603 rmon_cnt->offset + 1); 1604 data[i] |= high << 32; 1605 } 1606 } 1607 } 1608 1609 static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset) 1610 { 1611 if (sset != ETH_SS_STATS) 1612 return 0; 1613 1614 return ARRAY_SIZE(gswip_rmon_cnt); 1615 } 1616 1617 static struct phylink_pcs *gswip_phylink_mac_select_pcs(struct phylink_config *config, 1618 phy_interface_t interface) 1619 { 1620 struct dsa_port *dp = dsa_phylink_to_port(config); 1621 struct gswip_priv *priv = dp->ds->priv; 1622 1623 if (priv->hw_info->mac_select_pcs) 1624 return priv->hw_info->mac_select_pcs(config, interface); 1625 1626 return NULL; 1627 } 1628 1629 static const struct phylink_mac_ops gswip_phylink_mac_ops = { 1630 .mac_config = gswip_phylink_mac_config, 1631 .mac_link_down = gswip_phylink_mac_link_down, 1632 .mac_link_up = gswip_phylink_mac_link_up, 1633 .mac_select_pcs = gswip_phylink_mac_select_pcs, 1634 }; 1635 1636 static const struct dsa_switch_ops gswip_switch_ops = { 1637 .get_tag_protocol = gswip_get_tag_protocol, 1638 .setup = gswip_setup, 1639 .port_enable = gswip_port_enable, 1640 .port_disable = gswip_port_disable, 1641 .port_bridge_join = gswip_port_bridge_join, 1642 .port_bridge_leave = gswip_port_bridge_leave, 1643 .port_fast_age = gswip_port_fast_age, 1644 .port_vlan_filtering = gswip_port_vlan_filtering, 1645 .port_vlan_add = gswip_port_vlan_add, 1646 .port_vlan_del = gswip_port_vlan_del, 1647 .port_stp_state_set = gswip_port_stp_state_set, 1648 .port_fdb_add = gswip_port_fdb_add, 1649 .port_fdb_del = gswip_port_fdb_del, 1650 .port_fdb_dump = gswip_port_fdb_dump, 1651 .port_change_mtu = gswip_port_change_mtu, 1652 .port_max_mtu = gswip_port_max_mtu, 1653 .phylink_get_caps = gswip_phylink_get_caps, 1654 .get_strings = gswip_get_strings, 1655 .get_ethtool_stats = gswip_get_ethtool_stats, 1656 .get_sset_count = gswip_get_sset_count, 1657 }; 1658 1659 static const struct xway_gphy_match_data xrx200a1x_gphy_data = { 1660 .fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin", 1661 .ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin", 1662 }; 1663 1664 static const struct xway_gphy_match_data xrx200a2x_gphy_data = { 1665 .fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin", 1666 .ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin", 1667 }; 1668 1669 static const struct xway_gphy_match_data xrx300_gphy_data = { 1670 .fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin", 1671 .ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin", 1672 }; 1673 1674 static const struct of_device_id xway_gphy_match[] __maybe_unused = { 1675 { .compatible = "lantiq,xrx200-gphy-fw", .data = NULL }, 1676 { .compatible = "lantiq,xrx200a1x-gphy-fw", .data = &xrx200a1x_gphy_data }, 1677 { .compatible = "lantiq,xrx200a2x-gphy-fw", .data = &xrx200a2x_gphy_data }, 1678 { .compatible = "lantiq,xrx300-gphy-fw", .data = &xrx300_gphy_data }, 1679 { .compatible = "lantiq,xrx330-gphy-fw", .data = &xrx300_gphy_data }, 1680 {}, 1681 }; 1682 1683 static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gphy_fw) 1684 { 1685 struct device *dev = priv->dev; 1686 const struct firmware *fw; 1687 void *fw_addr; 1688 dma_addr_t dma_addr; 1689 dma_addr_t dev_addr; 1690 size_t size; 1691 int ret; 1692 1693 ret = clk_prepare_enable(gphy_fw->clk_gate); 1694 if (ret) 1695 return ret; 1696 1697 reset_control_assert(gphy_fw->reset); 1698 1699 /* The vendor BSP uses a 200ms delay after asserting the reset line. 1700 * Without this some users are observing that the PHY is not coming up 1701 * on the MDIO bus. 1702 */ 1703 msleep(200); 1704 1705 ret = request_firmware(&fw, gphy_fw->fw_name, dev); 1706 if (ret) 1707 return dev_err_probe(dev, ret, "failed to load firmware: %s\n", 1708 gphy_fw->fw_name); 1709 1710 /* GPHY cores need the firmware code in a persistent and contiguous 1711 * memory area with a 16 kB boundary aligned start address. 1712 */ 1713 size = fw->size + XRX200_GPHY_FW_ALIGN; 1714 1715 fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL); 1716 if (fw_addr) { 1717 fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN); 1718 dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN); 1719 memcpy(fw_addr, fw->data, fw->size); 1720 } else { 1721 release_firmware(fw); 1722 return -ENOMEM; 1723 } 1724 1725 release_firmware(fw); 1726 1727 ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, dev_addr); 1728 if (ret) 1729 return ret; 1730 1731 reset_control_deassert(gphy_fw->reset); 1732 1733 return ret; 1734 } 1735 1736 static int gswip_gphy_fw_probe(struct gswip_priv *priv, 1737 struct gswip_gphy_fw *gphy_fw, 1738 struct device_node *gphy_fw_np, int i) 1739 { 1740 struct device *dev = priv->dev; 1741 u32 gphy_mode; 1742 int ret; 1743 char gphyname[10]; 1744 1745 snprintf(gphyname, sizeof(gphyname), "gphy%d", i); 1746 1747 gphy_fw->clk_gate = devm_clk_get(dev, gphyname); 1748 if (IS_ERR(gphy_fw->clk_gate)) { 1749 return dev_err_probe(dev, PTR_ERR(gphy_fw->clk_gate), 1750 "Failed to lookup gate clock\n"); 1751 } 1752 1753 ret = of_property_read_u32(gphy_fw_np, "reg", &gphy_fw->fw_addr_offset); 1754 if (ret) 1755 return ret; 1756 1757 ret = of_property_read_u32(gphy_fw_np, "lantiq,gphy-mode", &gphy_mode); 1758 /* Default to GE mode */ 1759 if (ret) 1760 gphy_mode = GPHY_MODE_GE; 1761 1762 switch (gphy_mode) { 1763 case GPHY_MODE_FE: 1764 gphy_fw->fw_name = priv->gphy_fw_name_cfg->fe_firmware_name; 1765 break; 1766 case GPHY_MODE_GE: 1767 gphy_fw->fw_name = priv->gphy_fw_name_cfg->ge_firmware_name; 1768 break; 1769 default: 1770 return dev_err_probe(dev, -EINVAL, "Unknown GPHY mode %d\n", 1771 gphy_mode); 1772 } 1773 1774 gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np); 1775 if (IS_ERR(gphy_fw->reset)) 1776 return dev_err_probe(dev, PTR_ERR(gphy_fw->reset), 1777 "Failed to lookup gphy reset\n"); 1778 1779 return gswip_gphy_fw_load(priv, gphy_fw); 1780 } 1781 1782 static void gswip_gphy_fw_remove(struct gswip_priv *priv, 1783 struct gswip_gphy_fw *gphy_fw) 1784 { 1785 int ret; 1786 1787 /* check if the device was fully probed */ 1788 if (!gphy_fw->fw_name) 1789 return; 1790 1791 ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, 0); 1792 if (ret) 1793 dev_err(priv->dev, "can not reset GPHY FW pointer\n"); 1794 1795 clk_disable_unprepare(gphy_fw->clk_gate); 1796 1797 reset_control_put(gphy_fw->reset); 1798 } 1799 1800 static int gswip_gphy_fw_list(struct gswip_priv *priv, 1801 struct device_node *gphy_fw_list_np, u32 version) 1802 { 1803 struct device *dev = priv->dev; 1804 struct device_node *gphy_fw_np; 1805 const struct of_device_id *match; 1806 int err; 1807 int i = 0; 1808 1809 /* The VRX200 rev 1.1 uses the GSWIP 2.0 and needs the older 1810 * GPHY firmware. The VRX200 rev 1.2 uses the GSWIP 2.1 and also 1811 * needs a different GPHY firmware. 1812 */ 1813 if (of_device_is_compatible(gphy_fw_list_np, "lantiq,xrx200-gphy-fw")) { 1814 switch (version) { 1815 case GSWIP_VERSION_2_0: 1816 priv->gphy_fw_name_cfg = &xrx200a1x_gphy_data; 1817 break; 1818 case GSWIP_VERSION_2_1: 1819 priv->gphy_fw_name_cfg = &xrx200a2x_gphy_data; 1820 break; 1821 default: 1822 return dev_err_probe(dev, -ENOENT, 1823 "unknown GSWIP version: 0x%x\n", 1824 version); 1825 } 1826 } 1827 1828 match = of_match_node(xway_gphy_match, gphy_fw_list_np); 1829 if (match && match->data) 1830 priv->gphy_fw_name_cfg = match->data; 1831 1832 if (!priv->gphy_fw_name_cfg) 1833 return dev_err_probe(dev, -ENOENT, 1834 "GPHY compatible type not supported\n"); 1835 1836 priv->num_gphy_fw = of_get_available_child_count(gphy_fw_list_np); 1837 if (!priv->num_gphy_fw) 1838 return -ENOENT; 1839 1840 priv->rcu_regmap = syscon_regmap_lookup_by_phandle(gphy_fw_list_np, 1841 "lantiq,rcu"); 1842 if (IS_ERR(priv->rcu_regmap)) 1843 return PTR_ERR(priv->rcu_regmap); 1844 1845 priv->gphy_fw = devm_kmalloc_array(dev, priv->num_gphy_fw, 1846 sizeof(*priv->gphy_fw), 1847 GFP_KERNEL | __GFP_ZERO); 1848 if (!priv->gphy_fw) 1849 return -ENOMEM; 1850 1851 for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) { 1852 err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i], 1853 gphy_fw_np, i); 1854 if (err) { 1855 of_node_put(gphy_fw_np); 1856 goto remove_gphy; 1857 } 1858 i++; 1859 } 1860 1861 /* The standalone PHY11G requires 300ms to be fully 1862 * initialized and ready for any MDIO communication after being 1863 * taken out of reset. For the SoC-internal GPHY variant there 1864 * is no (known) documentation for the minimum time after a 1865 * reset. Use the same value as for the standalone variant as 1866 * some users have reported internal PHYs not being detected 1867 * without any delay. 1868 */ 1869 msleep(300); 1870 1871 return 0; 1872 1873 remove_gphy: 1874 for (i = 0; i < priv->num_gphy_fw; i++) 1875 gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]); 1876 return err; 1877 } 1878 1879 static int gswip_validate_cpu_port(struct dsa_switch *ds) 1880 { 1881 struct gswip_priv *priv = ds->priv; 1882 struct dsa_port *cpu_dp; 1883 int cpu_port = -1; 1884 1885 dsa_switch_for_each_cpu_port(cpu_dp, ds) { 1886 if (cpu_port != -1) 1887 return dev_err_probe(ds->dev, -EINVAL, 1888 "only a single CPU port is supported\n"); 1889 1890 cpu_port = cpu_dp->index; 1891 } 1892 1893 if (cpu_port == -1) 1894 return dev_err_probe(ds->dev, -EINVAL, "no CPU port defined\n"); 1895 1896 if (BIT(cpu_port) & ~priv->hw_info->allowed_cpu_ports) 1897 return dev_err_probe(ds->dev, -EINVAL, 1898 "unsupported CPU port defined\n"); 1899 1900 return 0; 1901 } 1902 1903 static int gswip_probe(struct platform_device *pdev) 1904 { 1905 struct device_node *np, *gphy_fw_np; 1906 struct device *dev = &pdev->dev; 1907 struct gswip_priv *priv; 1908 int err; 1909 int i; 1910 u32 version; 1911 1912 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 1913 if (!priv) 1914 return -ENOMEM; 1915 1916 priv->gswip = devm_platform_ioremap_resource(pdev, 0); 1917 if (IS_ERR(priv->gswip)) 1918 return PTR_ERR(priv->gswip); 1919 1920 priv->mdio = devm_platform_ioremap_resource(pdev, 1); 1921 if (IS_ERR(priv->mdio)) 1922 return PTR_ERR(priv->mdio); 1923 1924 priv->mii = devm_platform_ioremap_resource(pdev, 2); 1925 if (IS_ERR(priv->mii)) 1926 return PTR_ERR(priv->mii); 1927 1928 priv->hw_info = of_device_get_match_data(dev); 1929 if (!priv->hw_info) 1930 return -EINVAL; 1931 1932 priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL); 1933 if (!priv->ds) 1934 return -ENOMEM; 1935 1936 priv->ds->dev = dev; 1937 priv->ds->num_ports = priv->hw_info->max_ports; 1938 priv->ds->priv = priv; 1939 priv->ds->ops = &gswip_switch_ops; 1940 priv->ds->phylink_mac_ops = &gswip_phylink_mac_ops; 1941 priv->dev = dev; 1942 mutex_init(&priv->pce_table_lock); 1943 version = gswip_switch_r(priv, GSWIP_VERSION); 1944 1945 /* The hardware has the 'major/minor' version bytes in the wrong order 1946 * preventing numerical comparisons. Construct a 16-bit unsigned integer 1947 * having the REV field as most significant byte and the MOD field as 1948 * least significant byte. This is effectively swapping the two bytes of 1949 * the version variable, but other than using swab16 it doesn't affect 1950 * the source variable. 1951 */ 1952 priv->version = GSWIP_VERSION_REV(version) << 8 | 1953 GSWIP_VERSION_MOD(version); 1954 1955 np = dev->of_node; 1956 switch (version) { 1957 case GSWIP_VERSION_2_0: 1958 case GSWIP_VERSION_2_1: 1959 if (!of_device_is_compatible(np, "lantiq,xrx200-gswip")) 1960 return -EINVAL; 1961 break; 1962 case GSWIP_VERSION_2_2: 1963 case GSWIP_VERSION_2_2_ETC: 1964 if (!of_device_is_compatible(np, "lantiq,xrx300-gswip") && 1965 !of_device_is_compatible(np, "lantiq,xrx330-gswip")) 1966 return -EINVAL; 1967 break; 1968 default: 1969 return dev_err_probe(dev, -ENOENT, 1970 "unknown GSWIP version: 0x%x\n", version); 1971 } 1972 1973 /* bring up the mdio bus */ 1974 gphy_fw_np = of_get_compatible_child(dev->of_node, "lantiq,gphy-fw"); 1975 if (gphy_fw_np) { 1976 err = gswip_gphy_fw_list(priv, gphy_fw_np, version); 1977 of_node_put(gphy_fw_np); 1978 if (err) 1979 return dev_err_probe(dev, err, 1980 "gphy fw probe failed\n"); 1981 } 1982 1983 err = dsa_register_switch(priv->ds); 1984 if (err) { 1985 dev_err_probe(dev, err, "dsa switch registration failed\n"); 1986 goto gphy_fw_remove; 1987 } 1988 1989 err = gswip_validate_cpu_port(priv->ds); 1990 if (err) 1991 goto disable_switch; 1992 1993 platform_set_drvdata(pdev, priv); 1994 1995 dev_info(dev, "probed GSWIP version %lx mod %lx\n", 1996 GSWIP_VERSION_REV(version), GSWIP_VERSION_MOD(version)); 1997 return 0; 1998 1999 disable_switch: 2000 gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB); 2001 dsa_unregister_switch(priv->ds); 2002 gphy_fw_remove: 2003 for (i = 0; i < priv->num_gphy_fw; i++) 2004 gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]); 2005 return err; 2006 } 2007 2008 static void gswip_remove(struct platform_device *pdev) 2009 { 2010 struct gswip_priv *priv = platform_get_drvdata(pdev); 2011 int i; 2012 2013 if (!priv) 2014 return; 2015 2016 /* disable the switch */ 2017 gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB); 2018 2019 dsa_unregister_switch(priv->ds); 2020 2021 for (i = 0; i < priv->num_gphy_fw; i++) 2022 gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]); 2023 } 2024 2025 static void gswip_shutdown(struct platform_device *pdev) 2026 { 2027 struct gswip_priv *priv = platform_get_drvdata(pdev); 2028 2029 if (!priv) 2030 return; 2031 2032 dsa_switch_shutdown(priv->ds); 2033 2034 platform_set_drvdata(pdev, NULL); 2035 } 2036 2037 static const struct gswip_hw_info gswip_xrx200 = { 2038 .max_ports = 7, 2039 .allowed_cpu_ports = BIT(6), 2040 .mii_ports = BIT(0) | BIT(1) | BIT(5), 2041 .mii_port_reg_offset = 0, 2042 .phylink_get_caps = gswip_xrx200_phylink_get_caps, 2043 .pce_microcode = &gswip_pce_microcode, 2044 .pce_microcode_size = ARRAY_SIZE(gswip_pce_microcode), 2045 .tag_protocol = DSA_TAG_PROTO_GSWIP, 2046 }; 2047 2048 static const struct gswip_hw_info gswip_xrx300 = { 2049 .max_ports = 7, 2050 .allowed_cpu_ports = BIT(6), 2051 .mii_ports = BIT(0) | BIT(5), 2052 .mii_port_reg_offset = 0, 2053 .phylink_get_caps = gswip_xrx300_phylink_get_caps, 2054 .pce_microcode = &gswip_pce_microcode, 2055 .pce_microcode_size = ARRAY_SIZE(gswip_pce_microcode), 2056 .tag_protocol = DSA_TAG_PROTO_GSWIP, 2057 }; 2058 2059 static const struct of_device_id gswip_of_match[] = { 2060 { .compatible = "lantiq,xrx200-gswip", .data = &gswip_xrx200 }, 2061 { .compatible = "lantiq,xrx300-gswip", .data = &gswip_xrx300 }, 2062 { .compatible = "lantiq,xrx330-gswip", .data = &gswip_xrx300 }, 2063 {}, 2064 }; 2065 MODULE_DEVICE_TABLE(of, gswip_of_match); 2066 2067 static struct platform_driver gswip_driver = { 2068 .probe = gswip_probe, 2069 .remove = gswip_remove, 2070 .shutdown = gswip_shutdown, 2071 .driver = { 2072 .name = "gswip", 2073 .of_match_table = gswip_of_match, 2074 }, 2075 }; 2076 2077 module_platform_driver(gswip_driver); 2078 2079 MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin"); 2080 MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin"); 2081 MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin"); 2082 MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin"); 2083 MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin"); 2084 MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin"); 2085 MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>"); 2086 MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver"); 2087 MODULE_LICENSE("GPL v2"); 2088