1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Motorcomm YT921x Switch 4 * 5 * Should work on YT9213/YT9214/YT9215/YT9218, but only tested on YT9215+SGMII, 6 * be sure to do your own checks before porting to another chip. 7 * 8 * Copyright (c) 2025 David Yang 9 */ 10 11 #include <linux/etherdevice.h> 12 #include <linux/if_bridge.h> 13 #include <linux/if_hsr.h> 14 #include <linux/if_vlan.h> 15 #include <linux/iopoll.h> 16 #include <linux/mdio.h> 17 #include <linux/module.h> 18 #include <linux/of.h> 19 #include <linux/of_mdio.h> 20 #include <linux/of_net.h> 21 22 #include <net/dsa.h> 23 24 #include "yt921x.h" 25 26 struct yt921x_mib_desc { 27 unsigned int size; 28 unsigned int offset; 29 const char *name; 30 }; 31 32 #define MIB_DESC(_size, _offset, _name) \ 33 {_size, _offset, _name} 34 35 /* Must agree with yt921x_mib 36 * 37 * Unstructured fields (name != NULL) will appear in get_ethtool_stats(), 38 * structured go to their *_stats() methods, but we need their sizes and offsets 39 * to perform 32bit MIB overflow wraparound. 40 */ 41 static const struct yt921x_mib_desc yt921x_mib_descs[] = { 42 MIB_DESC(1, 0x00, NULL), /* RxBroadcast */ 43 MIB_DESC(1, 0x04, NULL), /* RxPause */ 44 MIB_DESC(1, 0x08, NULL), /* RxMulticast */ 45 MIB_DESC(1, 0x0c, NULL), /* RxCrcErr */ 46 47 MIB_DESC(1, 0x10, NULL), /* RxAlignErr */ 48 MIB_DESC(1, 0x14, NULL), /* RxUnderSizeErr */ 49 MIB_DESC(1, 0x18, NULL), /* RxFragErr */ 50 MIB_DESC(1, 0x1c, NULL), /* RxPktSz64 */ 51 52 MIB_DESC(1, 0x20, NULL), /* RxPktSz65To127 */ 53 MIB_DESC(1, 0x24, NULL), /* RxPktSz128To255 */ 54 MIB_DESC(1, 0x28, NULL), /* RxPktSz256To511 */ 55 MIB_DESC(1, 0x2c, NULL), /* RxPktSz512To1023 */ 56 57 MIB_DESC(1, 0x30, NULL), /* RxPktSz1024To1518 */ 58 MIB_DESC(1, 0x34, NULL), /* RxPktSz1519ToMax */ 59 MIB_DESC(2, 0x38, NULL), /* RxGoodBytes */ 60 /* 0x3c */ 61 62 MIB_DESC(2, 0x40, "RxBadBytes"), 63 /* 0x44 */ 64 MIB_DESC(2, 0x48, NULL), /* RxOverSzErr */ 65 /* 0x4c */ 66 67 MIB_DESC(1, 0x50, NULL), /* RxDropped */ 68 MIB_DESC(1, 0x54, NULL), /* TxBroadcast */ 69 MIB_DESC(1, 0x58, NULL), /* TxPause */ 70 MIB_DESC(1, 0x5c, NULL), /* TxMulticast */ 71 72 MIB_DESC(1, 0x60, NULL), /* TxUnderSizeErr */ 73 MIB_DESC(1, 0x64, NULL), /* TxPktSz64 */ 74 MIB_DESC(1, 0x68, NULL), /* TxPktSz65To127 */ 75 MIB_DESC(1, 0x6c, NULL), /* TxPktSz128To255 */ 76 77 MIB_DESC(1, 0x70, NULL), /* TxPktSz256To511 */ 78 MIB_DESC(1, 0x74, NULL), /* TxPktSz512To1023 */ 79 MIB_DESC(1, 0x78, NULL), /* TxPktSz1024To1518 */ 80 MIB_DESC(1, 0x7c, NULL), /* TxPktSz1519ToMax */ 81 82 MIB_DESC(2, 0x80, NULL), /* TxGoodBytes */ 83 /* 0x84 */ 84 MIB_DESC(2, 0x88, NULL), /* TxCollision */ 85 /* 0x8c */ 86 87 MIB_DESC(1, 0x90, NULL), /* TxExcessiveCollistion */ 88 MIB_DESC(1, 0x94, NULL), /* TxMultipleCollision */ 89 MIB_DESC(1, 0x98, NULL), /* TxSingleCollision */ 90 MIB_DESC(1, 0x9c, NULL), /* TxPkt */ 91 92 MIB_DESC(1, 0xa0, NULL), /* TxDeferred */ 93 MIB_DESC(1, 0xa4, NULL), /* TxLateCollision */ 94 MIB_DESC(1, 0xa8, "RxOAM"), 95 MIB_DESC(1, 0xac, "TxOAM"), 96 }; 97 98 struct yt921x_info { 99 const char *name; 100 u16 major; 101 /* Unknown, seems to be plain enumeration */ 102 u8 mode; 103 u8 extmode; 104 /* Ports with integral GbE PHYs, not including MCU Port 10 */ 105 u16 internal_mask; 106 /* TODO: see comments in yt921x_dsa_phylink_get_caps() */ 107 u16 external_mask; 108 }; 109 110 #define YT921X_PORT_MASK_INTn(port) BIT(port) 111 #define YT921X_PORT_MASK_INT0_n(n) GENMASK((n) - 1, 0) 112 #define YT921X_PORT_MASK_EXT0 BIT(8) 113 #define YT921X_PORT_MASK_EXT1 BIT(9) 114 115 static const struct yt921x_info yt921x_infos[] = { 116 { 117 "YT9215SC", YT9215_MAJOR, 1, 0, 118 YT921X_PORT_MASK_INT0_n(5), 119 YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1, 120 }, 121 { 122 "YT9215S", YT9215_MAJOR, 2, 0, 123 YT921X_PORT_MASK_INT0_n(5), 124 YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1, 125 }, 126 { 127 "YT9215RB", YT9215_MAJOR, 3, 0, 128 YT921X_PORT_MASK_INT0_n(5), 129 YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1, 130 }, 131 { 132 "YT9214NB", YT9215_MAJOR, 3, 2, 133 YT921X_PORT_MASK_INTn(1) | YT921X_PORT_MASK_INTn(3), 134 YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1, 135 }, 136 { 137 "YT9213NB", YT9215_MAJOR, 3, 3, 138 YT921X_PORT_MASK_INTn(1) | YT921X_PORT_MASK_INTn(3), 139 YT921X_PORT_MASK_EXT1, 140 }, 141 { 142 "YT9218N", YT9218_MAJOR, 0, 0, 143 YT921X_PORT_MASK_INT0_n(8), 144 0, 145 }, 146 { 147 "YT9218MB", YT9218_MAJOR, 1, 0, 148 YT921X_PORT_MASK_INT0_n(8), 149 YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1, 150 }, 151 {} 152 }; 153 154 #define YT921X_NAME "yt921x" 155 156 #define YT921X_VID_UNWARE 4095 157 158 #define YT921X_POLL_SLEEP_US 10000 159 #define YT921X_POLL_TIMEOUT_US 100000 160 161 /* The interval should be small enough to avoid overflow of 32bit MIBs. 162 * 163 * Until we can read MIBs from stats64 call directly (i.e. sleep 164 * there), we have to poll stats more frequently then it is actually needed. 165 * For overflow protection, normally, 100 sec interval should have been OK. 166 */ 167 #define YT921X_STATS_INTERVAL_JIFFIES (3 * HZ) 168 169 struct yt921x_reg_mdio { 170 struct mii_bus *bus; 171 int addr; 172 /* SWITCH_ID_1 / SWITCH_ID_0 of the device 173 * 174 * This is a way to multiplex multiple devices on the same MII phyaddr 175 * and should be configurable in DT. However, MDIO core simply doesn't 176 * allow multiple devices over one reg addr, so this is a fixed value 177 * for now until a solution is found. 178 * 179 * Keep this because we need switchid to form MII regaddrs anyway. 180 */ 181 unsigned char switchid; 182 }; 183 184 /* TODO: SPI/I2C */ 185 186 #define to_yt921x_priv(_ds) container_of_const(_ds, struct yt921x_priv, ds) 187 #define to_device(priv) ((priv)->ds.dev) 188 189 static int yt921x_reg_read(struct yt921x_priv *priv, u32 reg, u32 *valp) 190 { 191 WARN_ON(!mutex_is_locked(&priv->reg_lock)); 192 193 return priv->reg_ops->read(priv->reg_ctx, reg, valp); 194 } 195 196 static int yt921x_reg_write(struct yt921x_priv *priv, u32 reg, u32 val) 197 { 198 WARN_ON(!mutex_is_locked(&priv->reg_lock)); 199 200 return priv->reg_ops->write(priv->reg_ctx, reg, val); 201 } 202 203 static int 204 yt921x_reg_wait(struct yt921x_priv *priv, u32 reg, u32 mask, u32 *valp) 205 { 206 u32 val; 207 int res; 208 int ret; 209 210 ret = read_poll_timeout(yt921x_reg_read, res, 211 res || (val & mask) == *valp, 212 YT921X_POLL_SLEEP_US, YT921X_POLL_TIMEOUT_US, 213 false, priv, reg, &val); 214 if (ret) 215 return ret; 216 if (res) 217 return res; 218 219 *valp = val; 220 return 0; 221 } 222 223 static int 224 yt921x_reg_update_bits(struct yt921x_priv *priv, u32 reg, u32 mask, u32 val) 225 { 226 int res; 227 u32 v; 228 u32 u; 229 230 res = yt921x_reg_read(priv, reg, &v); 231 if (res) 232 return res; 233 234 u = v; 235 u &= ~mask; 236 u |= val; 237 if (u == v) 238 return 0; 239 240 return yt921x_reg_write(priv, reg, u); 241 } 242 243 static int yt921x_reg_set_bits(struct yt921x_priv *priv, u32 reg, u32 mask) 244 { 245 return yt921x_reg_update_bits(priv, reg, 0, mask); 246 } 247 248 static int yt921x_reg_clear_bits(struct yt921x_priv *priv, u32 reg, u32 mask) 249 { 250 return yt921x_reg_update_bits(priv, reg, mask, 0); 251 } 252 253 static int 254 yt921x_reg_toggle_bits(struct yt921x_priv *priv, u32 reg, u32 mask, bool set) 255 { 256 return yt921x_reg_update_bits(priv, reg, mask, !set ? 0 : mask); 257 } 258 259 /* Some registers, like VLANn_CTRL, should always be written in 64-bit, even if 260 * you are to write only the lower / upper 32 bits. 261 * 262 * There is no such restriction for reading, but we still provide 64-bit read 263 * wrappers so that we always handle u64 values. 264 */ 265 266 static int yt921x_reg64_read(struct yt921x_priv *priv, u32 reg, u64 *valp) 267 { 268 u32 lo; 269 u32 hi; 270 int res; 271 272 res = yt921x_reg_read(priv, reg, &lo); 273 if (res) 274 return res; 275 res = yt921x_reg_read(priv, reg + 4, &hi); 276 if (res) 277 return res; 278 279 *valp = ((u64)hi << 32) | lo; 280 return 0; 281 } 282 283 static int yt921x_reg64_write(struct yt921x_priv *priv, u32 reg, u64 val) 284 { 285 int res; 286 287 res = yt921x_reg_write(priv, reg, (u32)val); 288 if (res) 289 return res; 290 return yt921x_reg_write(priv, reg + 4, (u32)(val >> 32)); 291 } 292 293 static int 294 yt921x_reg64_update_bits(struct yt921x_priv *priv, u32 reg, u64 mask, u64 val) 295 { 296 int res; 297 u64 v; 298 u64 u; 299 300 res = yt921x_reg64_read(priv, reg, &v); 301 if (res) 302 return res; 303 304 u = v; 305 u &= ~mask; 306 u |= val; 307 if (u == v) 308 return 0; 309 310 return yt921x_reg64_write(priv, reg, u); 311 } 312 313 static int yt921x_reg64_clear_bits(struct yt921x_priv *priv, u32 reg, u64 mask) 314 { 315 return yt921x_reg64_update_bits(priv, reg, mask, 0); 316 } 317 318 static int yt921x_reg_mdio_read(void *context, u32 reg, u32 *valp) 319 { 320 struct yt921x_reg_mdio *mdio = context; 321 struct mii_bus *bus = mdio->bus; 322 int addr = mdio->addr; 323 u32 reg_addr; 324 u32 reg_data; 325 u32 val; 326 int res; 327 328 /* Hold the mdio bus lock to avoid (un)locking for 4 times */ 329 mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); 330 331 reg_addr = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_ADDR | 332 YT921X_SMI_READ; 333 res = __mdiobus_write(bus, addr, reg_addr, (u16)(reg >> 16)); 334 if (res) 335 goto end; 336 res = __mdiobus_write(bus, addr, reg_addr, (u16)reg); 337 if (res) 338 goto end; 339 340 reg_data = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_DATA | 341 YT921X_SMI_READ; 342 res = __mdiobus_read(bus, addr, reg_data); 343 if (res < 0) 344 goto end; 345 val = (u16)res; 346 res = __mdiobus_read(bus, addr, reg_data); 347 if (res < 0) 348 goto end; 349 val = (val << 16) | (u16)res; 350 351 *valp = val; 352 res = 0; 353 354 end: 355 mutex_unlock(&bus->mdio_lock); 356 return res; 357 } 358 359 static int yt921x_reg_mdio_write(void *context, u32 reg, u32 val) 360 { 361 struct yt921x_reg_mdio *mdio = context; 362 struct mii_bus *bus = mdio->bus; 363 int addr = mdio->addr; 364 u32 reg_addr; 365 u32 reg_data; 366 int res; 367 368 mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); 369 370 reg_addr = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_ADDR | 371 YT921X_SMI_WRITE; 372 res = __mdiobus_write(bus, addr, reg_addr, (u16)(reg >> 16)); 373 if (res) 374 goto end; 375 res = __mdiobus_write(bus, addr, reg_addr, (u16)reg); 376 if (res) 377 goto end; 378 379 reg_data = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_DATA | 380 YT921X_SMI_WRITE; 381 res = __mdiobus_write(bus, addr, reg_data, (u16)(val >> 16)); 382 if (res) 383 goto end; 384 res = __mdiobus_write(bus, addr, reg_data, (u16)val); 385 if (res) 386 goto end; 387 388 res = 0; 389 390 end: 391 mutex_unlock(&bus->mdio_lock); 392 return res; 393 } 394 395 static const struct yt921x_reg_ops yt921x_reg_ops_mdio = { 396 .read = yt921x_reg_mdio_read, 397 .write = yt921x_reg_mdio_write, 398 }; 399 400 /* TODO: SPI/I2C */ 401 402 static int yt921x_intif_wait(struct yt921x_priv *priv) 403 { 404 u32 val = 0; 405 406 return yt921x_reg_wait(priv, YT921X_INT_MBUS_OP, YT921X_MBUS_OP_START, 407 &val); 408 } 409 410 static int 411 yt921x_intif_read(struct yt921x_priv *priv, int port, int reg, u16 *valp) 412 { 413 struct device *dev = to_device(priv); 414 u32 mask; 415 u32 ctrl; 416 u32 val; 417 int res; 418 419 res = yt921x_intif_wait(priv); 420 if (res) 421 return res; 422 423 mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M | 424 YT921X_MBUS_CTRL_OP_M; 425 ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) | 426 YT921X_MBUS_CTRL_READ; 427 res = yt921x_reg_update_bits(priv, YT921X_INT_MBUS_CTRL, mask, ctrl); 428 if (res) 429 return res; 430 res = yt921x_reg_write(priv, YT921X_INT_MBUS_OP, YT921X_MBUS_OP_START); 431 if (res) 432 return res; 433 434 res = yt921x_intif_wait(priv); 435 if (res) 436 return res; 437 res = yt921x_reg_read(priv, YT921X_INT_MBUS_DIN, &val); 438 if (res) 439 return res; 440 441 if ((u16)val != val) 442 dev_info(dev, 443 "%s: port %d, reg 0x%x: Expected u16, got 0x%08x\n", 444 __func__, port, reg, val); 445 *valp = (u16)val; 446 return 0; 447 } 448 449 static int 450 yt921x_intif_write(struct yt921x_priv *priv, int port, int reg, u16 val) 451 { 452 u32 mask; 453 u32 ctrl; 454 int res; 455 456 res = yt921x_intif_wait(priv); 457 if (res) 458 return res; 459 460 mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M | 461 YT921X_MBUS_CTRL_OP_M; 462 ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) | 463 YT921X_MBUS_CTRL_WRITE; 464 res = yt921x_reg_update_bits(priv, YT921X_INT_MBUS_CTRL, mask, ctrl); 465 if (res) 466 return res; 467 res = yt921x_reg_write(priv, YT921X_INT_MBUS_DOUT, val); 468 if (res) 469 return res; 470 res = yt921x_reg_write(priv, YT921X_INT_MBUS_OP, YT921X_MBUS_OP_START); 471 if (res) 472 return res; 473 474 return yt921x_intif_wait(priv); 475 } 476 477 static int yt921x_mbus_int_read(struct mii_bus *mbus, int port, int reg) 478 { 479 struct yt921x_priv *priv = mbus->priv; 480 u16 val; 481 int res; 482 483 if (port >= YT921X_PORT_NUM) 484 return U16_MAX; 485 486 mutex_lock(&priv->reg_lock); 487 res = yt921x_intif_read(priv, port, reg, &val); 488 mutex_unlock(&priv->reg_lock); 489 490 if (res) 491 return res; 492 return val; 493 } 494 495 static int 496 yt921x_mbus_int_write(struct mii_bus *mbus, int port, int reg, u16 data) 497 { 498 struct yt921x_priv *priv = mbus->priv; 499 int res; 500 501 if (port >= YT921X_PORT_NUM) 502 return -ENODEV; 503 504 mutex_lock(&priv->reg_lock); 505 res = yt921x_intif_write(priv, port, reg, data); 506 mutex_unlock(&priv->reg_lock); 507 508 return res; 509 } 510 511 static int 512 yt921x_mbus_int_init(struct yt921x_priv *priv, struct device_node *mnp) 513 { 514 struct device *dev = to_device(priv); 515 struct mii_bus *mbus; 516 int res; 517 518 mbus = devm_mdiobus_alloc(dev); 519 if (!mbus) 520 return -ENOMEM; 521 522 mbus->name = "YT921x internal MDIO bus"; 523 snprintf(mbus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); 524 mbus->priv = priv; 525 mbus->read = yt921x_mbus_int_read; 526 mbus->write = yt921x_mbus_int_write; 527 mbus->parent = dev; 528 mbus->phy_mask = (u32)~GENMASK(YT921X_PORT_NUM - 1, 0); 529 530 res = devm_of_mdiobus_register(dev, mbus, mnp); 531 if (res) 532 return res; 533 534 priv->mbus_int = mbus; 535 536 return 0; 537 } 538 539 static int yt921x_extif_wait(struct yt921x_priv *priv) 540 { 541 u32 val = 0; 542 543 return yt921x_reg_wait(priv, YT921X_EXT_MBUS_OP, YT921X_MBUS_OP_START, 544 &val); 545 } 546 547 static int 548 yt921x_extif_read(struct yt921x_priv *priv, int port, int reg, u16 *valp) 549 { 550 struct device *dev = to_device(priv); 551 u32 mask; 552 u32 ctrl; 553 u32 val; 554 int res; 555 556 res = yt921x_extif_wait(priv); 557 if (res) 558 return res; 559 560 mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M | 561 YT921X_MBUS_CTRL_TYPE_M | YT921X_MBUS_CTRL_OP_M; 562 ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) | 563 YT921X_MBUS_CTRL_TYPE_C22 | YT921X_MBUS_CTRL_READ; 564 res = yt921x_reg_update_bits(priv, YT921X_EXT_MBUS_CTRL, mask, ctrl); 565 if (res) 566 return res; 567 res = yt921x_reg_write(priv, YT921X_EXT_MBUS_OP, YT921X_MBUS_OP_START); 568 if (res) 569 return res; 570 571 res = yt921x_extif_wait(priv); 572 if (res) 573 return res; 574 res = yt921x_reg_read(priv, YT921X_EXT_MBUS_DIN, &val); 575 if (res) 576 return res; 577 578 if ((u16)val != val) 579 dev_info(dev, 580 "%s: port %d, reg 0x%x: Expected u16, got 0x%08x\n", 581 __func__, port, reg, val); 582 *valp = (u16)val; 583 return 0; 584 } 585 586 static int 587 yt921x_extif_write(struct yt921x_priv *priv, int port, int reg, u16 val) 588 { 589 u32 mask; 590 u32 ctrl; 591 int res; 592 593 res = yt921x_extif_wait(priv); 594 if (res) 595 return res; 596 597 mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M | 598 YT921X_MBUS_CTRL_TYPE_M | YT921X_MBUS_CTRL_OP_M; 599 ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) | 600 YT921X_MBUS_CTRL_TYPE_C22 | YT921X_MBUS_CTRL_WRITE; 601 res = yt921x_reg_update_bits(priv, YT921X_EXT_MBUS_CTRL, mask, ctrl); 602 if (res) 603 return res; 604 res = yt921x_reg_write(priv, YT921X_EXT_MBUS_DOUT, val); 605 if (res) 606 return res; 607 res = yt921x_reg_write(priv, YT921X_EXT_MBUS_OP, YT921X_MBUS_OP_START); 608 if (res) 609 return res; 610 611 return yt921x_extif_wait(priv); 612 } 613 614 static int yt921x_mbus_ext_read(struct mii_bus *mbus, int port, int reg) 615 { 616 struct yt921x_priv *priv = mbus->priv; 617 u16 val; 618 int res; 619 620 mutex_lock(&priv->reg_lock); 621 res = yt921x_extif_read(priv, port, reg, &val); 622 mutex_unlock(&priv->reg_lock); 623 624 if (res) 625 return res; 626 return val; 627 } 628 629 static int 630 yt921x_mbus_ext_write(struct mii_bus *mbus, int port, int reg, u16 data) 631 { 632 struct yt921x_priv *priv = mbus->priv; 633 int res; 634 635 mutex_lock(&priv->reg_lock); 636 res = yt921x_extif_write(priv, port, reg, data); 637 mutex_unlock(&priv->reg_lock); 638 639 return res; 640 } 641 642 static int 643 yt921x_mbus_ext_init(struct yt921x_priv *priv, struct device_node *mnp) 644 { 645 struct device *dev = to_device(priv); 646 struct mii_bus *mbus; 647 int res; 648 649 mbus = devm_mdiobus_alloc(dev); 650 if (!mbus) 651 return -ENOMEM; 652 653 mbus->name = "YT921x external MDIO bus"; 654 snprintf(mbus->id, MII_BUS_ID_SIZE, "%s@ext", dev_name(dev)); 655 mbus->priv = priv; 656 /* TODO: c45? */ 657 mbus->read = yt921x_mbus_ext_read; 658 mbus->write = yt921x_mbus_ext_write; 659 mbus->parent = dev; 660 661 res = devm_of_mdiobus_register(dev, mbus, mnp); 662 if (res) 663 return res; 664 665 priv->mbus_ext = mbus; 666 667 return 0; 668 } 669 670 /* Read and handle overflow of 32bit MIBs. MIB buffer must be zeroed before. */ 671 static int yt921x_read_mib(struct yt921x_priv *priv, int port) 672 { 673 struct yt921x_port *pp = &priv->ports[port]; 674 struct device *dev = to_device(priv); 675 struct yt921x_mib *mib = &pp->mib; 676 int res = 0; 677 678 /* Reading of yt921x_port::mib is not protected by a lock and it's vain 679 * to keep its consistency, since we have to read registers one by one 680 * and there is no way to make a snapshot of MIB stats. 681 * 682 * Writing (by this function only) is and should be protected by 683 * reg_lock. 684 */ 685 686 for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) { 687 const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i]; 688 u32 reg = YT921X_MIBn_DATA0(port) + desc->offset; 689 u64 *valp = &((u64 *)mib)[i]; 690 u64 val = *valp; 691 u32 val0; 692 u32 val1; 693 694 res = yt921x_reg_read(priv, reg, &val0); 695 if (res) 696 break; 697 698 if (desc->size <= 1) { 699 if (val < (u32)val) 700 /* overflow */ 701 val += (u64)U32_MAX + 1; 702 val &= ~U32_MAX; 703 val |= val0; 704 } else { 705 res = yt921x_reg_read(priv, reg + 4, &val1); 706 if (res) 707 break; 708 val = ((u64)val0 << 32) | val1; 709 } 710 711 WRITE_ONCE(*valp, val); 712 } 713 714 pp->rx_frames = mib->rx_64byte + mib->rx_65_127byte + 715 mib->rx_128_255byte + mib->rx_256_511byte + 716 mib->rx_512_1023byte + mib->rx_1024_1518byte + 717 mib->rx_jumbo; 718 pp->tx_frames = mib->tx_64byte + mib->tx_65_127byte + 719 mib->tx_128_255byte + mib->tx_256_511byte + 720 mib->tx_512_1023byte + mib->tx_1024_1518byte + 721 mib->tx_jumbo; 722 723 if (res) 724 dev_err(dev, "Failed to %s port %d: %i\n", "read stats for", 725 port, res); 726 return res; 727 } 728 729 static void yt921x_poll_mib(struct work_struct *work) 730 { 731 struct yt921x_port *pp = container_of_const(work, struct yt921x_port, 732 mib_read.work); 733 struct yt921x_priv *priv = (void *)(pp - pp->index) - 734 offsetof(struct yt921x_priv, ports); 735 unsigned long delay = YT921X_STATS_INTERVAL_JIFFIES; 736 int port = pp->index; 737 int res; 738 739 mutex_lock(&priv->reg_lock); 740 res = yt921x_read_mib(priv, port); 741 mutex_unlock(&priv->reg_lock); 742 if (res) 743 delay *= 4; 744 745 schedule_delayed_work(&pp->mib_read, delay); 746 } 747 748 static void 749 yt921x_dsa_get_strings(struct dsa_switch *ds, int port, u32 stringset, 750 uint8_t *data) 751 { 752 if (stringset != ETH_SS_STATS) 753 return; 754 755 for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) { 756 const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i]; 757 758 if (desc->name) 759 ethtool_puts(&data, desc->name); 760 } 761 } 762 763 static void 764 yt921x_dsa_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) 765 { 766 struct yt921x_priv *priv = to_yt921x_priv(ds); 767 struct yt921x_port *pp = &priv->ports[port]; 768 struct yt921x_mib *mib = &pp->mib; 769 size_t j; 770 771 mutex_lock(&priv->reg_lock); 772 yt921x_read_mib(priv, port); 773 mutex_unlock(&priv->reg_lock); 774 775 j = 0; 776 for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) { 777 const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i]; 778 779 if (!desc->name) 780 continue; 781 782 data[j] = ((u64 *)mib)[i]; 783 j++; 784 } 785 } 786 787 static int yt921x_dsa_get_sset_count(struct dsa_switch *ds, int port, int sset) 788 { 789 int cnt = 0; 790 791 if (sset != ETH_SS_STATS) 792 return 0; 793 794 for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) { 795 const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i]; 796 797 if (desc->name) 798 cnt++; 799 } 800 801 return cnt; 802 } 803 804 static void 805 yt921x_dsa_get_eth_mac_stats(struct dsa_switch *ds, int port, 806 struct ethtool_eth_mac_stats *mac_stats) 807 { 808 struct yt921x_priv *priv = to_yt921x_priv(ds); 809 struct yt921x_port *pp = &priv->ports[port]; 810 struct yt921x_mib *mib = &pp->mib; 811 812 mutex_lock(&priv->reg_lock); 813 yt921x_read_mib(priv, port); 814 mutex_unlock(&priv->reg_lock); 815 816 mac_stats->FramesTransmittedOK = pp->tx_frames; 817 mac_stats->SingleCollisionFrames = mib->tx_single_collisions; 818 mac_stats->MultipleCollisionFrames = mib->tx_multiple_collisions; 819 mac_stats->FramesReceivedOK = pp->rx_frames; 820 mac_stats->FrameCheckSequenceErrors = mib->rx_crc_errors; 821 mac_stats->AlignmentErrors = mib->rx_alignment_errors; 822 mac_stats->OctetsTransmittedOK = mib->tx_good_bytes; 823 mac_stats->FramesWithDeferredXmissions = mib->tx_deferred; 824 mac_stats->LateCollisions = mib->tx_late_collisions; 825 mac_stats->FramesAbortedDueToXSColls = mib->tx_aborted_errors; 826 /* mac_stats->FramesLostDueToIntMACXmitError */ 827 /* mac_stats->CarrierSenseErrors */ 828 mac_stats->OctetsReceivedOK = mib->rx_good_bytes; 829 /* mac_stats->FramesLostDueToIntMACRcvError */ 830 mac_stats->MulticastFramesXmittedOK = mib->tx_multicast; 831 mac_stats->BroadcastFramesXmittedOK = mib->tx_broadcast; 832 /* mac_stats->FramesWithExcessiveDeferral */ 833 mac_stats->MulticastFramesReceivedOK = mib->rx_multicast; 834 mac_stats->BroadcastFramesReceivedOK = mib->rx_broadcast; 835 /* mac_stats->InRangeLengthErrors */ 836 /* mac_stats->OutOfRangeLengthField */ 837 mac_stats->FrameTooLongErrors = mib->rx_oversize_errors; 838 } 839 840 static void 841 yt921x_dsa_get_eth_ctrl_stats(struct dsa_switch *ds, int port, 842 struct ethtool_eth_ctrl_stats *ctrl_stats) 843 { 844 struct yt921x_priv *priv = to_yt921x_priv(ds); 845 struct yt921x_port *pp = &priv->ports[port]; 846 struct yt921x_mib *mib = &pp->mib; 847 848 mutex_lock(&priv->reg_lock); 849 yt921x_read_mib(priv, port); 850 mutex_unlock(&priv->reg_lock); 851 852 ctrl_stats->MACControlFramesTransmitted = mib->tx_pause; 853 ctrl_stats->MACControlFramesReceived = mib->rx_pause; 854 /* ctrl_stats->UnsupportedOpcodesReceived */ 855 } 856 857 static const struct ethtool_rmon_hist_range yt921x_rmon_ranges[] = { 858 { 0, 64 }, 859 { 65, 127 }, 860 { 128, 255 }, 861 { 256, 511 }, 862 { 512, 1023 }, 863 { 1024, 1518 }, 864 { 1519, YT921X_FRAME_SIZE_MAX }, 865 {} 866 }; 867 868 static void 869 yt921x_dsa_get_rmon_stats(struct dsa_switch *ds, int port, 870 struct ethtool_rmon_stats *rmon_stats, 871 const struct ethtool_rmon_hist_range **ranges) 872 { 873 struct yt921x_priv *priv = to_yt921x_priv(ds); 874 struct yt921x_port *pp = &priv->ports[port]; 875 struct yt921x_mib *mib = &pp->mib; 876 877 mutex_lock(&priv->reg_lock); 878 yt921x_read_mib(priv, port); 879 mutex_unlock(&priv->reg_lock); 880 881 *ranges = yt921x_rmon_ranges; 882 883 rmon_stats->undersize_pkts = mib->rx_undersize_errors; 884 rmon_stats->oversize_pkts = mib->rx_oversize_errors; 885 rmon_stats->fragments = mib->rx_alignment_errors; 886 /* rmon_stats->jabbers */ 887 888 rmon_stats->hist[0] = mib->rx_64byte; 889 rmon_stats->hist[1] = mib->rx_65_127byte; 890 rmon_stats->hist[2] = mib->rx_128_255byte; 891 rmon_stats->hist[3] = mib->rx_256_511byte; 892 rmon_stats->hist[4] = mib->rx_512_1023byte; 893 rmon_stats->hist[5] = mib->rx_1024_1518byte; 894 rmon_stats->hist[6] = mib->rx_jumbo; 895 896 rmon_stats->hist_tx[0] = mib->tx_64byte; 897 rmon_stats->hist_tx[1] = mib->tx_65_127byte; 898 rmon_stats->hist_tx[2] = mib->tx_128_255byte; 899 rmon_stats->hist_tx[3] = mib->tx_256_511byte; 900 rmon_stats->hist_tx[4] = mib->tx_512_1023byte; 901 rmon_stats->hist_tx[5] = mib->tx_1024_1518byte; 902 rmon_stats->hist_tx[6] = mib->tx_jumbo; 903 } 904 905 static void 906 yt921x_dsa_get_stats64(struct dsa_switch *ds, int port, 907 struct rtnl_link_stats64 *stats) 908 { 909 struct yt921x_priv *priv = to_yt921x_priv(ds); 910 struct yt921x_port *pp = &priv->ports[port]; 911 struct yt921x_mib *mib = &pp->mib; 912 913 stats->rx_length_errors = mib->rx_undersize_errors + 914 mib->rx_fragment_errors; 915 stats->rx_over_errors = mib->rx_oversize_errors; 916 stats->rx_crc_errors = mib->rx_crc_errors; 917 stats->rx_frame_errors = mib->rx_alignment_errors; 918 /* stats->rx_fifo_errors */ 919 /* stats->rx_missed_errors */ 920 921 stats->tx_aborted_errors = mib->tx_aborted_errors; 922 /* stats->tx_carrier_errors */ 923 stats->tx_fifo_errors = mib->tx_undersize_errors; 924 /* stats->tx_heartbeat_errors */ 925 stats->tx_window_errors = mib->tx_late_collisions; 926 927 stats->rx_packets = pp->rx_frames; 928 stats->tx_packets = pp->tx_frames; 929 stats->rx_bytes = mib->rx_good_bytes - ETH_FCS_LEN * stats->rx_packets; 930 stats->tx_bytes = mib->tx_good_bytes - ETH_FCS_LEN * stats->tx_packets; 931 stats->rx_errors = stats->rx_length_errors + stats->rx_over_errors + 932 stats->rx_crc_errors + stats->rx_frame_errors; 933 stats->tx_errors = stats->tx_aborted_errors + stats->tx_fifo_errors + 934 stats->tx_window_errors; 935 stats->rx_dropped = mib->rx_dropped; 936 /* stats->tx_dropped */ 937 stats->multicast = mib->rx_multicast; 938 stats->collisions = mib->tx_collisions; 939 } 940 941 static void 942 yt921x_dsa_get_pause_stats(struct dsa_switch *ds, int port, 943 struct ethtool_pause_stats *pause_stats) 944 { 945 struct yt921x_priv *priv = to_yt921x_priv(ds); 946 struct yt921x_port *pp = &priv->ports[port]; 947 struct yt921x_mib *mib = &pp->mib; 948 949 mutex_lock(&priv->reg_lock); 950 yt921x_read_mib(priv, port); 951 mutex_unlock(&priv->reg_lock); 952 953 pause_stats->tx_pause_frames = mib->tx_pause; 954 pause_stats->rx_pause_frames = mib->rx_pause; 955 } 956 957 static int 958 yt921x_set_eee(struct yt921x_priv *priv, int port, struct ethtool_keee *e) 959 { 960 /* Poor datasheet for EEE operations; don't ask if you are confused */ 961 962 bool enable = e->eee_enabled; 963 u16 new_mask; 964 int res; 965 966 /* Enable / disable global EEE */ 967 new_mask = priv->eee_ports_mask; 968 new_mask &= ~BIT(port); 969 new_mask |= !enable ? 0 : BIT(port); 970 971 if (!!new_mask != !!priv->eee_ports_mask) { 972 res = yt921x_reg_toggle_bits(priv, YT921X_PON_STRAP_FUNC, 973 YT921X_PON_STRAP_EEE, !!new_mask); 974 if (res) 975 return res; 976 res = yt921x_reg_toggle_bits(priv, YT921X_PON_STRAP_VAL, 977 YT921X_PON_STRAP_EEE, !!new_mask); 978 if (res) 979 return res; 980 } 981 982 priv->eee_ports_mask = new_mask; 983 984 /* Enable / disable port EEE */ 985 res = yt921x_reg_toggle_bits(priv, YT921X_EEE_CTRL, 986 YT921X_EEE_CTRL_ENn(port), enable); 987 if (res) 988 return res; 989 res = yt921x_reg_toggle_bits(priv, YT921X_EEEn_VAL(port), 990 YT921X_EEE_VAL_DATA, enable); 991 if (res) 992 return res; 993 994 return 0; 995 } 996 997 static int 998 yt921x_dsa_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e) 999 { 1000 struct yt921x_priv *priv = to_yt921x_priv(ds); 1001 int res; 1002 1003 mutex_lock(&priv->reg_lock); 1004 res = yt921x_set_eee(priv, port, e); 1005 mutex_unlock(&priv->reg_lock); 1006 1007 return res; 1008 } 1009 1010 static int 1011 yt921x_dsa_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) 1012 { 1013 /* Only serves as packet filter, since the frame size is always set to 1014 * maximum after reset 1015 */ 1016 1017 struct yt921x_priv *priv = to_yt921x_priv(ds); 1018 struct dsa_port *dp = dsa_to_port(ds, port); 1019 int frame_size; 1020 int res; 1021 1022 frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN; 1023 if (dsa_port_is_cpu(dp)) 1024 frame_size += YT921X_TAG_LEN; 1025 1026 mutex_lock(&priv->reg_lock); 1027 res = yt921x_reg_update_bits(priv, YT921X_MACn_FRAME(port), 1028 YT921X_MAC_FRAME_SIZE_M, 1029 YT921X_MAC_FRAME_SIZE(frame_size)); 1030 mutex_unlock(&priv->reg_lock); 1031 1032 return res; 1033 } 1034 1035 static int yt921x_dsa_port_max_mtu(struct dsa_switch *ds, int port) 1036 { 1037 /* Only called for user ports, exclude tag len here */ 1038 return YT921X_FRAME_SIZE_MAX - ETH_HLEN - ETH_FCS_LEN - YT921X_TAG_LEN; 1039 } 1040 1041 static int 1042 yt921x_mirror_del(struct yt921x_priv *priv, int port, bool ingress) 1043 { 1044 u32 mask; 1045 1046 if (ingress) 1047 mask = YT921X_MIRROR_IGR_PORTn(port); 1048 else 1049 mask = YT921X_MIRROR_EGR_PORTn(port); 1050 return yt921x_reg_clear_bits(priv, YT921X_MIRROR, mask); 1051 } 1052 1053 static int 1054 yt921x_mirror_add(struct yt921x_priv *priv, int port, bool ingress, 1055 int to_local_port, struct netlink_ext_ack *extack) 1056 { 1057 u32 srcs; 1058 u32 ctrl; 1059 u32 val; 1060 u32 dst; 1061 int res; 1062 1063 if (ingress) 1064 srcs = YT921X_MIRROR_IGR_PORTn(port); 1065 else 1066 srcs = YT921X_MIRROR_EGR_PORTn(port); 1067 dst = YT921X_MIRROR_PORT(to_local_port); 1068 1069 res = yt921x_reg_read(priv, YT921X_MIRROR, &val); 1070 if (res) 1071 return res; 1072 1073 /* other mirror tasks & different dst port -> conflict */ 1074 if ((val & ~srcs & (YT921X_MIRROR_EGR_PORTS_M | 1075 YT921X_MIRROR_IGR_PORTS_M)) && 1076 (val & YT921X_MIRROR_PORT_M) != dst) { 1077 NL_SET_ERR_MSG_MOD(extack, 1078 "Sniffer port is already configured, delete existing rules & retry"); 1079 return -EBUSY; 1080 } 1081 1082 ctrl = val & ~YT921X_MIRROR_PORT_M; 1083 ctrl |= srcs; 1084 ctrl |= dst; 1085 1086 if (ctrl == val) 1087 return 0; 1088 1089 return yt921x_reg_write(priv, YT921X_MIRROR, ctrl); 1090 } 1091 1092 static void 1093 yt921x_dsa_port_mirror_del(struct dsa_switch *ds, int port, 1094 struct dsa_mall_mirror_tc_entry *mirror) 1095 { 1096 struct yt921x_priv *priv = to_yt921x_priv(ds); 1097 struct device *dev = to_device(priv); 1098 int res; 1099 1100 mutex_lock(&priv->reg_lock); 1101 res = yt921x_mirror_del(priv, port, mirror->ingress); 1102 mutex_unlock(&priv->reg_lock); 1103 1104 if (res) 1105 dev_err(dev, "Failed to %s port %d: %i\n", "unmirror", 1106 port, res); 1107 } 1108 1109 static int 1110 yt921x_dsa_port_mirror_add(struct dsa_switch *ds, int port, 1111 struct dsa_mall_mirror_tc_entry *mirror, 1112 bool ingress, struct netlink_ext_ack *extack) 1113 { 1114 struct yt921x_priv *priv = to_yt921x_priv(ds); 1115 int res; 1116 1117 mutex_lock(&priv->reg_lock); 1118 res = yt921x_mirror_add(priv, port, ingress, 1119 mirror->to_local_port, extack); 1120 mutex_unlock(&priv->reg_lock); 1121 1122 return res; 1123 } 1124 1125 static int yt921x_fdb_wait(struct yt921x_priv *priv, u32 *valp) 1126 { 1127 struct device *dev = to_device(priv); 1128 u32 val = YT921X_FDB_RESULT_DONE; 1129 int res; 1130 1131 res = yt921x_reg_wait(priv, YT921X_FDB_RESULT, YT921X_FDB_RESULT_DONE, 1132 &val); 1133 if (res) { 1134 dev_err(dev, "FDB probably stuck\n"); 1135 return res; 1136 } 1137 1138 *valp = val; 1139 return 0; 1140 } 1141 1142 static int 1143 yt921x_fdb_in01(struct yt921x_priv *priv, const unsigned char *addr, 1144 u16 vid, u32 ctrl1) 1145 { 1146 u32 ctrl; 1147 int res; 1148 1149 ctrl = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]; 1150 res = yt921x_reg_write(priv, YT921X_FDB_IN0, ctrl); 1151 if (res) 1152 return res; 1153 1154 ctrl = ctrl1 | YT921X_FDB_IO1_FID(vid) | (addr[4] << 8) | addr[5]; 1155 return yt921x_reg_write(priv, YT921X_FDB_IN1, ctrl); 1156 } 1157 1158 static int 1159 yt921x_fdb_has(struct yt921x_priv *priv, const unsigned char *addr, u16 vid, 1160 u16 *indexp) 1161 { 1162 u32 ctrl; 1163 u32 val; 1164 int res; 1165 1166 res = yt921x_fdb_in01(priv, addr, vid, 0); 1167 if (res) 1168 return res; 1169 1170 ctrl = 0; 1171 res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl); 1172 if (res) 1173 return res; 1174 1175 ctrl = YT921X_FDB_OP_OP_GET_ONE | YT921X_FDB_OP_START; 1176 res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); 1177 if (res) 1178 return res; 1179 1180 res = yt921x_fdb_wait(priv, &val); 1181 if (res) 1182 return res; 1183 if (val & YT921X_FDB_RESULT_NOTFOUND) { 1184 *indexp = YT921X_FDB_NUM; 1185 return 0; 1186 } 1187 1188 *indexp = FIELD_GET(YT921X_FDB_RESULT_INDEX_M, val); 1189 return 0; 1190 } 1191 1192 static int 1193 yt921x_fdb_read(struct yt921x_priv *priv, unsigned char *addr, u16 *vidp, 1194 u16 *ports_maskp, u16 *indexp, u8 *statusp) 1195 { 1196 struct device *dev = to_device(priv); 1197 u16 index; 1198 u32 data0; 1199 u32 data1; 1200 u32 data2; 1201 u32 val; 1202 int res; 1203 1204 res = yt921x_fdb_wait(priv, &val); 1205 if (res) 1206 return res; 1207 if (val & YT921X_FDB_RESULT_NOTFOUND) { 1208 *ports_maskp = 0; 1209 return 0; 1210 } 1211 index = FIELD_GET(YT921X_FDB_RESULT_INDEX_M, val); 1212 1213 res = yt921x_reg_read(priv, YT921X_FDB_OUT1, &data1); 1214 if (res) 1215 return res; 1216 if ((data1 & YT921X_FDB_IO1_STATUS_M) == 1217 YT921X_FDB_IO1_STATUS_INVALID) { 1218 *ports_maskp = 0; 1219 return 0; 1220 } 1221 1222 res = yt921x_reg_read(priv, YT921X_FDB_OUT0, &data0); 1223 if (res) 1224 return res; 1225 res = yt921x_reg_read(priv, YT921X_FDB_OUT2, &data2); 1226 if (res) 1227 return res; 1228 1229 addr[0] = data0 >> 24; 1230 addr[1] = data0 >> 16; 1231 addr[2] = data0 >> 8; 1232 addr[3] = data0; 1233 addr[4] = data1 >> 8; 1234 addr[5] = data1; 1235 *vidp = FIELD_GET(YT921X_FDB_IO1_FID_M, data1); 1236 *indexp = index; 1237 *ports_maskp = FIELD_GET(YT921X_FDB_IO2_EGR_PORTS_M, data2); 1238 *statusp = FIELD_GET(YT921X_FDB_IO1_STATUS_M, data1); 1239 1240 dev_dbg(dev, 1241 "%s: index 0x%x, mac %02x:%02x:%02x:%02x:%02x:%02x, vid %d, ports 0x%x, status %d\n", 1242 __func__, *indexp, addr[0], addr[1], addr[2], addr[3], 1243 addr[4], addr[5], *vidp, *ports_maskp, *statusp); 1244 return 0; 1245 } 1246 1247 static int 1248 yt921x_fdb_dump(struct yt921x_priv *priv, u16 ports_mask, 1249 dsa_fdb_dump_cb_t *cb, void *data) 1250 { 1251 unsigned char addr[ETH_ALEN]; 1252 u8 status; 1253 u16 pmask; 1254 u16 index; 1255 u32 ctrl; 1256 u16 vid; 1257 int res; 1258 1259 ctrl = YT921X_FDB_OP_INDEX(0) | YT921X_FDB_OP_MODE_INDEX | 1260 YT921X_FDB_OP_OP_GET_ONE | YT921X_FDB_OP_START; 1261 res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); 1262 if (res) 1263 return res; 1264 res = yt921x_fdb_read(priv, addr, &vid, &pmask, &index, &status); 1265 if (res) 1266 return res; 1267 if ((pmask & ports_mask) && !is_multicast_ether_addr(addr)) { 1268 res = cb(addr, vid, 1269 status == YT921X_FDB_ENTRY_STATUS_STATIC, data); 1270 if (res) 1271 return res; 1272 } 1273 1274 ctrl = YT921X_FDB_IO2_EGR_PORTS(ports_mask); 1275 res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl); 1276 if (res) 1277 return res; 1278 1279 index = 0; 1280 do { 1281 ctrl = YT921X_FDB_OP_INDEX(index) | YT921X_FDB_OP_MODE_INDEX | 1282 YT921X_FDB_OP_NEXT_TYPE_UCAST_PORT | 1283 YT921X_FDB_OP_OP_GET_NEXT | YT921X_FDB_OP_START; 1284 res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); 1285 if (res) 1286 return res; 1287 1288 res = yt921x_fdb_read(priv, addr, &vid, &pmask, &index, 1289 &status); 1290 if (res) 1291 return res; 1292 if (!pmask) 1293 break; 1294 1295 if ((pmask & ports_mask) && !is_multicast_ether_addr(addr)) { 1296 res = cb(addr, vid, 1297 status == YT921X_FDB_ENTRY_STATUS_STATIC, 1298 data); 1299 if (res) 1300 return res; 1301 } 1302 1303 /* Never call GET_NEXT with 4095, otherwise it will hang 1304 * forever until a reset! 1305 */ 1306 } while (index < YT921X_FDB_NUM - 1); 1307 1308 return 0; 1309 } 1310 1311 static int 1312 yt921x_fdb_flush_raw(struct yt921x_priv *priv, u16 ports_mask, u16 vid, 1313 bool flush_static) 1314 { 1315 u32 ctrl; 1316 u32 val; 1317 int res; 1318 1319 if (vid < 4096) { 1320 ctrl = YT921X_FDB_IO1_FID(vid); 1321 res = yt921x_reg_write(priv, YT921X_FDB_IN1, ctrl); 1322 if (res) 1323 return res; 1324 } 1325 1326 ctrl = YT921X_FDB_IO2_EGR_PORTS(ports_mask); 1327 res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl); 1328 if (res) 1329 return res; 1330 1331 ctrl = YT921X_FDB_OP_OP_FLUSH | YT921X_FDB_OP_START; 1332 if (vid >= 4096) 1333 ctrl |= YT921X_FDB_OP_FLUSH_PORT; 1334 else 1335 ctrl |= YT921X_FDB_OP_FLUSH_PORT_VID; 1336 if (flush_static) 1337 ctrl |= YT921X_FDB_OP_FLUSH_STATIC; 1338 res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); 1339 if (res) 1340 return res; 1341 1342 res = yt921x_fdb_wait(priv, &val); 1343 if (res) 1344 return res; 1345 1346 return 0; 1347 } 1348 1349 static int 1350 yt921x_fdb_flush_port(struct yt921x_priv *priv, int port, bool flush_static) 1351 { 1352 return yt921x_fdb_flush_raw(priv, BIT(port), 4096, flush_static); 1353 } 1354 1355 static int 1356 yt921x_fdb_add_index_in12(struct yt921x_priv *priv, u16 index, u16 ctrl1, 1357 u16 ctrl2) 1358 { 1359 u32 ctrl; 1360 u32 val; 1361 int res; 1362 1363 res = yt921x_reg_write(priv, YT921X_FDB_IN1, ctrl1); 1364 if (res) 1365 return res; 1366 res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl2); 1367 if (res) 1368 return res; 1369 1370 ctrl = YT921X_FDB_OP_INDEX(index) | YT921X_FDB_OP_MODE_INDEX | 1371 YT921X_FDB_OP_OP_ADD | YT921X_FDB_OP_START; 1372 res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); 1373 if (res) 1374 return res; 1375 1376 return yt921x_fdb_wait(priv, &val); 1377 } 1378 1379 static int 1380 yt921x_fdb_add(struct yt921x_priv *priv, const unsigned char *addr, u16 vid, 1381 u16 ports_mask) 1382 { 1383 u32 ctrl; 1384 u32 val; 1385 int res; 1386 1387 ctrl = YT921X_FDB_IO1_STATUS_STATIC; 1388 res = yt921x_fdb_in01(priv, addr, vid, ctrl); 1389 if (res) 1390 return res; 1391 1392 ctrl = YT921X_FDB_IO2_EGR_PORTS(ports_mask); 1393 res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl); 1394 if (res) 1395 return res; 1396 1397 ctrl = YT921X_FDB_OP_OP_ADD | YT921X_FDB_OP_START; 1398 res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); 1399 if (res) 1400 return res; 1401 1402 return yt921x_fdb_wait(priv, &val); 1403 } 1404 1405 static int 1406 yt921x_fdb_leave(struct yt921x_priv *priv, const unsigned char *addr, 1407 u16 vid, u16 ports_mask) 1408 { 1409 u16 index; 1410 u32 ctrl1; 1411 u32 ctrl2; 1412 u32 ctrl; 1413 u32 val2; 1414 u32 val; 1415 int res; 1416 1417 /* Check for presence */ 1418 res = yt921x_fdb_has(priv, addr, vid, &index); 1419 if (res) 1420 return res; 1421 if (index >= YT921X_FDB_NUM) 1422 return 0; 1423 1424 /* Check if action required */ 1425 res = yt921x_reg_read(priv, YT921X_FDB_OUT2, &val2); 1426 if (res) 1427 return res; 1428 1429 ctrl2 = val2 & ~YT921X_FDB_IO2_EGR_PORTS(ports_mask); 1430 if (ctrl2 == val2) 1431 return 0; 1432 if (!(ctrl2 & YT921X_FDB_IO2_EGR_PORTS_M)) { 1433 ctrl = YT921X_FDB_OP_OP_DEL | YT921X_FDB_OP_START; 1434 res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); 1435 if (res) 1436 return res; 1437 1438 return yt921x_fdb_wait(priv, &val); 1439 } 1440 1441 res = yt921x_reg_read(priv, YT921X_FDB_OUT1, &ctrl1); 1442 if (res) 1443 return res; 1444 1445 return yt921x_fdb_add_index_in12(priv, index, ctrl1, ctrl2); 1446 } 1447 1448 static int 1449 yt921x_fdb_join(struct yt921x_priv *priv, const unsigned char *addr, u16 vid, 1450 u16 ports_mask) 1451 { 1452 u16 index; 1453 u32 ctrl1; 1454 u32 ctrl2; 1455 u32 val1; 1456 u32 val2; 1457 int res; 1458 1459 /* Check for presence */ 1460 res = yt921x_fdb_has(priv, addr, vid, &index); 1461 if (res) 1462 return res; 1463 if (index >= YT921X_FDB_NUM) 1464 return yt921x_fdb_add(priv, addr, vid, ports_mask); 1465 1466 /* Check if action required */ 1467 res = yt921x_reg_read(priv, YT921X_FDB_OUT1, &val1); 1468 if (res) 1469 return res; 1470 res = yt921x_reg_read(priv, YT921X_FDB_OUT2, &val2); 1471 if (res) 1472 return res; 1473 1474 ctrl1 = val1 & ~YT921X_FDB_IO1_STATUS_M; 1475 ctrl1 |= YT921X_FDB_IO1_STATUS_STATIC; 1476 ctrl2 = val2 | YT921X_FDB_IO2_EGR_PORTS(ports_mask); 1477 if (ctrl1 == val1 && ctrl2 == val2) 1478 return 0; 1479 1480 return yt921x_fdb_add_index_in12(priv, index, ctrl1, ctrl2); 1481 } 1482 1483 static int 1484 yt921x_dsa_port_fdb_dump(struct dsa_switch *ds, int port, 1485 dsa_fdb_dump_cb_t *cb, void *data) 1486 { 1487 struct yt921x_priv *priv = to_yt921x_priv(ds); 1488 int res; 1489 1490 mutex_lock(&priv->reg_lock); 1491 /* Hardware FDB is shared for fdb and mdb, "bridge fdb show" 1492 * only wants to see unicast 1493 */ 1494 res = yt921x_fdb_dump(priv, BIT(port), cb, data); 1495 mutex_unlock(&priv->reg_lock); 1496 1497 return res; 1498 } 1499 1500 static void yt921x_dsa_port_fast_age(struct dsa_switch *ds, int port) 1501 { 1502 struct yt921x_priv *priv = to_yt921x_priv(ds); 1503 struct device *dev = to_device(priv); 1504 int res; 1505 1506 mutex_lock(&priv->reg_lock); 1507 res = yt921x_fdb_flush_port(priv, port, false); 1508 mutex_unlock(&priv->reg_lock); 1509 1510 if (res) 1511 dev_err(dev, "Failed to %s port %d: %i\n", "clear FDB for", 1512 port, res); 1513 } 1514 1515 static int 1516 yt921x_dsa_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) 1517 { 1518 struct yt921x_priv *priv = to_yt921x_priv(ds); 1519 u32 ctrl; 1520 int res; 1521 1522 /* AGEING reg is set in 5s step */ 1523 ctrl = clamp(msecs / 5000, 1, U16_MAX); 1524 1525 mutex_lock(&priv->reg_lock); 1526 res = yt921x_reg_write(priv, YT921X_AGEING, ctrl); 1527 mutex_unlock(&priv->reg_lock); 1528 1529 return res; 1530 } 1531 1532 static int 1533 yt921x_dsa_port_fdb_del(struct dsa_switch *ds, int port, 1534 const unsigned char *addr, u16 vid, struct dsa_db db) 1535 { 1536 struct yt921x_priv *priv = to_yt921x_priv(ds); 1537 int res; 1538 1539 mutex_lock(&priv->reg_lock); 1540 res = yt921x_fdb_leave(priv, addr, vid, BIT(port)); 1541 mutex_unlock(&priv->reg_lock); 1542 1543 return res; 1544 } 1545 1546 static int 1547 yt921x_dsa_port_fdb_add(struct dsa_switch *ds, int port, 1548 const unsigned char *addr, u16 vid, struct dsa_db db) 1549 { 1550 struct yt921x_priv *priv = to_yt921x_priv(ds); 1551 int res; 1552 1553 mutex_lock(&priv->reg_lock); 1554 res = yt921x_fdb_join(priv, addr, vid, BIT(port)); 1555 mutex_unlock(&priv->reg_lock); 1556 1557 return res; 1558 } 1559 1560 static int 1561 yt921x_dsa_port_mdb_del(struct dsa_switch *ds, int port, 1562 const struct switchdev_obj_port_mdb *mdb, 1563 struct dsa_db db) 1564 { 1565 struct yt921x_priv *priv = to_yt921x_priv(ds); 1566 const unsigned char *addr = mdb->addr; 1567 u16 vid = mdb->vid; 1568 int res; 1569 1570 mutex_lock(&priv->reg_lock); 1571 res = yt921x_fdb_leave(priv, addr, vid, BIT(port)); 1572 mutex_unlock(&priv->reg_lock); 1573 1574 return res; 1575 } 1576 1577 static int 1578 yt921x_dsa_port_mdb_add(struct dsa_switch *ds, int port, 1579 const struct switchdev_obj_port_mdb *mdb, 1580 struct dsa_db db) 1581 { 1582 struct yt921x_priv *priv = to_yt921x_priv(ds); 1583 const unsigned char *addr = mdb->addr; 1584 u16 vid = mdb->vid; 1585 int res; 1586 1587 mutex_lock(&priv->reg_lock); 1588 res = yt921x_fdb_join(priv, addr, vid, BIT(port)); 1589 mutex_unlock(&priv->reg_lock); 1590 1591 return res; 1592 } 1593 1594 static int 1595 yt921x_port_set_pvid(struct yt921x_priv *priv, int port, u16 vid) 1596 { 1597 u32 mask; 1598 u32 ctrl; 1599 1600 mask = YT921X_PORT_VLAN_CTRL_CVID_M; 1601 ctrl = YT921X_PORT_VLAN_CTRL_CVID(vid); 1602 return yt921x_reg_update_bits(priv, YT921X_PORTn_VLAN_CTRL(port), 1603 mask, ctrl); 1604 } 1605 1606 static int 1607 yt921x_vlan_filtering(struct yt921x_priv *priv, int port, bool vlan_filtering) 1608 { 1609 struct dsa_port *dp = dsa_to_port(&priv->ds, port); 1610 struct net_device *bdev; 1611 u16 pvid; 1612 u32 mask; 1613 u32 ctrl; 1614 int res; 1615 1616 bdev = dsa_port_bridge_dev_get(dp); 1617 1618 if (!bdev || !vlan_filtering) 1619 pvid = YT921X_VID_UNWARE; 1620 else 1621 br_vlan_get_pvid(bdev, &pvid); 1622 res = yt921x_port_set_pvid(priv, port, pvid); 1623 if (res) 1624 return res; 1625 1626 mask = YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_TAGGED | 1627 YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED; 1628 ctrl = 0; 1629 /* Do not drop tagged frames here; let VLAN_IGR_FILTER do it */ 1630 if (vlan_filtering && !pvid) 1631 ctrl |= YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED; 1632 res = yt921x_reg_update_bits(priv, YT921X_PORTn_VLAN_CTRL1(port), 1633 mask, ctrl); 1634 if (res) 1635 return res; 1636 1637 res = yt921x_reg_toggle_bits(priv, YT921X_VLAN_IGR_FILTER, 1638 YT921X_VLAN_IGR_FILTER_PORTn(port), 1639 vlan_filtering); 1640 if (res) 1641 return res; 1642 1643 /* Turn on / off VLAN awareness */ 1644 mask = YT921X_PORT_IGR_TPIDn_CTAG_M; 1645 if (!vlan_filtering) 1646 ctrl = 0; 1647 else 1648 ctrl = YT921X_PORT_IGR_TPIDn_CTAG(0); 1649 res = yt921x_reg_update_bits(priv, YT921X_PORTn_IGR_TPID(port), 1650 mask, ctrl); 1651 if (res) 1652 return res; 1653 1654 return 0; 1655 } 1656 1657 static int 1658 yt921x_vlan_del(struct yt921x_priv *priv, int port, u16 vid) 1659 { 1660 u64 mask64; 1661 1662 mask64 = YT921X_VLAN_CTRL_PORTS(port) | 1663 YT921X_VLAN_CTRL_UNTAG_PORTn(port); 1664 1665 return yt921x_reg64_clear_bits(priv, YT921X_VLANn_CTRL(vid), mask64); 1666 } 1667 1668 static int 1669 yt921x_vlan_add(struct yt921x_priv *priv, int port, u16 vid, bool untagged) 1670 { 1671 u64 mask64; 1672 u64 ctrl64; 1673 1674 mask64 = YT921X_VLAN_CTRL_PORTn(port) | 1675 YT921X_VLAN_CTRL_PORTS(priv->cpu_ports_mask); 1676 ctrl64 = mask64; 1677 1678 mask64 |= YT921X_VLAN_CTRL_UNTAG_PORTn(port); 1679 if (untagged) 1680 ctrl64 |= YT921X_VLAN_CTRL_UNTAG_PORTn(port); 1681 1682 return yt921x_reg64_update_bits(priv, YT921X_VLANn_CTRL(vid), 1683 mask64, ctrl64); 1684 } 1685 1686 static int 1687 yt921x_pvid_clear(struct yt921x_priv *priv, int port) 1688 { 1689 struct dsa_port *dp = dsa_to_port(&priv->ds, port); 1690 bool vlan_filtering; 1691 u32 mask; 1692 int res; 1693 1694 vlan_filtering = dsa_port_is_vlan_filtering(dp); 1695 1696 res = yt921x_port_set_pvid(priv, port, 1697 vlan_filtering ? 0 : YT921X_VID_UNWARE); 1698 if (res) 1699 return res; 1700 1701 if (vlan_filtering) { 1702 mask = YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED; 1703 res = yt921x_reg_set_bits(priv, YT921X_PORTn_VLAN_CTRL1(port), 1704 mask); 1705 if (res) 1706 return res; 1707 } 1708 1709 return 0; 1710 } 1711 1712 static int 1713 yt921x_pvid_set(struct yt921x_priv *priv, int port, u16 vid) 1714 { 1715 struct dsa_port *dp = dsa_to_port(&priv->ds, port); 1716 bool vlan_filtering; 1717 u32 mask; 1718 int res; 1719 1720 vlan_filtering = dsa_port_is_vlan_filtering(dp); 1721 1722 if (vlan_filtering) { 1723 res = yt921x_port_set_pvid(priv, port, vid); 1724 if (res) 1725 return res; 1726 } 1727 1728 mask = YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED; 1729 res = yt921x_reg_clear_bits(priv, YT921X_PORTn_VLAN_CTRL1(port), mask); 1730 if (res) 1731 return res; 1732 1733 return 0; 1734 } 1735 1736 static int 1737 yt921x_dsa_port_vlan_filtering(struct dsa_switch *ds, int port, 1738 bool vlan_filtering, 1739 struct netlink_ext_ack *extack) 1740 { 1741 struct yt921x_priv *priv = to_yt921x_priv(ds); 1742 int res; 1743 1744 if (dsa_is_cpu_port(ds, port)) 1745 return 0; 1746 1747 mutex_lock(&priv->reg_lock); 1748 res = yt921x_vlan_filtering(priv, port, vlan_filtering); 1749 mutex_unlock(&priv->reg_lock); 1750 1751 return res; 1752 } 1753 1754 static int 1755 yt921x_dsa_port_vlan_del(struct dsa_switch *ds, int port, 1756 const struct switchdev_obj_port_vlan *vlan) 1757 { 1758 struct yt921x_priv *priv = to_yt921x_priv(ds); 1759 u16 vid = vlan->vid; 1760 u16 pvid; 1761 int res; 1762 1763 if (dsa_is_cpu_port(ds, port)) 1764 return 0; 1765 1766 mutex_lock(&priv->reg_lock); 1767 do { 1768 struct dsa_port *dp = dsa_to_port(ds, port); 1769 struct net_device *bdev; 1770 1771 res = yt921x_vlan_del(priv, port, vid); 1772 if (res) 1773 break; 1774 1775 bdev = dsa_port_bridge_dev_get(dp); 1776 if (bdev) { 1777 br_vlan_get_pvid(bdev, &pvid); 1778 if (pvid == vid) 1779 res = yt921x_pvid_clear(priv, port); 1780 } 1781 } while (0); 1782 mutex_unlock(&priv->reg_lock); 1783 1784 return res; 1785 } 1786 1787 static int 1788 yt921x_dsa_port_vlan_add(struct dsa_switch *ds, int port, 1789 const struct switchdev_obj_port_vlan *vlan, 1790 struct netlink_ext_ack *extack) 1791 { 1792 struct yt921x_priv *priv = to_yt921x_priv(ds); 1793 u16 vid = vlan->vid; 1794 u16 pvid; 1795 int res; 1796 1797 /* CPU port is supposed to be a member of every VLAN; see 1798 * yt921x_vlan_add() and yt921x_port_setup() 1799 */ 1800 if (dsa_is_cpu_port(ds, port)) 1801 return 0; 1802 1803 mutex_lock(&priv->reg_lock); 1804 do { 1805 struct dsa_port *dp = dsa_to_port(ds, port); 1806 struct net_device *bdev; 1807 1808 res = yt921x_vlan_add(priv, port, vid, 1809 vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED); 1810 if (res) 1811 break; 1812 1813 bdev = dsa_port_bridge_dev_get(dp); 1814 if (bdev) { 1815 if (vlan->flags & BRIDGE_VLAN_INFO_PVID) { 1816 res = yt921x_pvid_set(priv, port, vid); 1817 } else { 1818 br_vlan_get_pvid(bdev, &pvid); 1819 if (pvid == vid) 1820 res = yt921x_pvid_clear(priv, port); 1821 } 1822 } 1823 } while (0); 1824 mutex_unlock(&priv->reg_lock); 1825 1826 return res; 1827 } 1828 1829 static int yt921x_userport_standalone(struct yt921x_priv *priv, int port) 1830 { 1831 u32 mask; 1832 u32 ctrl; 1833 int res; 1834 1835 ctrl = ~priv->cpu_ports_mask; 1836 res = yt921x_reg_write(priv, YT921X_PORTn_ISOLATION(port), ctrl); 1837 if (res) 1838 return res; 1839 1840 /* Turn off FDB learning to prevent FDB pollution */ 1841 mask = YT921X_PORT_LEARN_DIS; 1842 res = yt921x_reg_set_bits(priv, YT921X_PORTn_LEARN(port), mask); 1843 if (res) 1844 return res; 1845 1846 /* Turn off VLAN awareness */ 1847 mask = YT921X_PORT_IGR_TPIDn_CTAG_M; 1848 res = yt921x_reg_clear_bits(priv, YT921X_PORTn_IGR_TPID(port), mask); 1849 if (res) 1850 return res; 1851 1852 /* Unrelated since learning is off and all packets are trapped; 1853 * set it anyway 1854 */ 1855 res = yt921x_port_set_pvid(priv, port, YT921X_VID_UNWARE); 1856 if (res) 1857 return res; 1858 1859 return 0; 1860 } 1861 1862 static int yt921x_userport_bridge(struct yt921x_priv *priv, int port) 1863 { 1864 u32 mask; 1865 int res; 1866 1867 mask = YT921X_PORT_LEARN_DIS; 1868 res = yt921x_reg_clear_bits(priv, YT921X_PORTn_LEARN(port), mask); 1869 if (res) 1870 return res; 1871 1872 return 0; 1873 } 1874 1875 static int yt921x_isolate(struct yt921x_priv *priv, int port) 1876 { 1877 u32 mask; 1878 int res; 1879 1880 mask = BIT(port); 1881 for (int i = 0; i < YT921X_PORT_NUM; i++) { 1882 if ((BIT(i) & priv->cpu_ports_mask) || i == port) 1883 continue; 1884 1885 res = yt921x_reg_set_bits(priv, YT921X_PORTn_ISOLATION(i), 1886 mask); 1887 if (res) 1888 return res; 1889 } 1890 1891 return 0; 1892 } 1893 1894 /* Make sure to include the CPU port in ports_mask, or your bridge will 1895 * not have it. 1896 */ 1897 static int yt921x_bridge(struct yt921x_priv *priv, u16 ports_mask) 1898 { 1899 unsigned long targets_mask = ports_mask & ~priv->cpu_ports_mask; 1900 u32 isolated_mask; 1901 u32 ctrl; 1902 int port; 1903 int res; 1904 1905 isolated_mask = 0; 1906 for_each_set_bit(port, &targets_mask, YT921X_PORT_NUM) { 1907 struct yt921x_port *pp = &priv->ports[port]; 1908 1909 if (pp->isolated) 1910 isolated_mask |= BIT(port); 1911 } 1912 1913 /* Block from non-cpu bridge ports ... */ 1914 for_each_set_bit(port, &targets_mask, YT921X_PORT_NUM) { 1915 struct yt921x_port *pp = &priv->ports[port]; 1916 1917 /* to non-bridge ports */ 1918 ctrl = ~ports_mask; 1919 /* to isolated ports when isolated */ 1920 if (pp->isolated) 1921 ctrl |= isolated_mask; 1922 /* to itself when non-hairpin */ 1923 if (!pp->hairpin) 1924 ctrl |= BIT(port); 1925 else 1926 ctrl &= ~BIT(port); 1927 1928 res = yt921x_reg_write(priv, YT921X_PORTn_ISOLATION(port), 1929 ctrl); 1930 if (res) 1931 return res; 1932 } 1933 1934 return 0; 1935 } 1936 1937 static int yt921x_bridge_leave(struct yt921x_priv *priv, int port) 1938 { 1939 int res; 1940 1941 res = yt921x_userport_standalone(priv, port); 1942 if (res) 1943 return res; 1944 1945 res = yt921x_isolate(priv, port); 1946 if (res) 1947 return res; 1948 1949 return 0; 1950 } 1951 1952 static int 1953 yt921x_bridge_join(struct yt921x_priv *priv, int port, u16 ports_mask) 1954 { 1955 int res; 1956 1957 res = yt921x_userport_bridge(priv, port); 1958 if (res) 1959 return res; 1960 1961 res = yt921x_bridge(priv, ports_mask); 1962 if (res) 1963 return res; 1964 1965 return 0; 1966 } 1967 1968 static u32 1969 dsa_bridge_ports(struct dsa_switch *ds, const struct net_device *bdev) 1970 { 1971 struct dsa_port *dp; 1972 u32 mask = 0; 1973 1974 dsa_switch_for_each_user_port(dp, ds) 1975 if (dsa_port_offloads_bridge_dev(dp, bdev)) 1976 mask |= BIT(dp->index); 1977 1978 return mask; 1979 } 1980 1981 static int 1982 yt921x_bridge_flags(struct yt921x_priv *priv, int port, 1983 struct switchdev_brport_flags flags) 1984 { 1985 struct yt921x_port *pp = &priv->ports[port]; 1986 bool do_flush; 1987 u32 mask; 1988 int res; 1989 1990 if (flags.mask & BR_LEARNING) { 1991 bool learning = flags.val & BR_LEARNING; 1992 1993 mask = YT921X_PORT_LEARN_DIS; 1994 res = yt921x_reg_toggle_bits(priv, YT921X_PORTn_LEARN(port), 1995 mask, !learning); 1996 if (res) 1997 return res; 1998 } 1999 2000 /* BR_FLOOD, BR_MCAST_FLOOD: see the comment where ACT_UNK_ACTn_TRAP 2001 * is set 2002 */ 2003 2004 /* BR_BCAST_FLOOD: we can filter bcast, but cannot trap them */ 2005 2006 do_flush = false; 2007 if (flags.mask & BR_HAIRPIN_MODE) { 2008 pp->hairpin = flags.val & BR_HAIRPIN_MODE; 2009 do_flush = true; 2010 } 2011 if (flags.mask & BR_ISOLATED) { 2012 pp->isolated = flags.val & BR_ISOLATED; 2013 do_flush = true; 2014 } 2015 if (do_flush) { 2016 struct dsa_switch *ds = &priv->ds; 2017 struct dsa_port *dp = dsa_to_port(ds, port); 2018 struct net_device *bdev; 2019 2020 bdev = dsa_port_bridge_dev_get(dp); 2021 if (bdev) { 2022 u32 ports_mask; 2023 2024 ports_mask = dsa_bridge_ports(ds, bdev); 2025 ports_mask |= priv->cpu_ports_mask; 2026 res = yt921x_bridge(priv, ports_mask); 2027 if (res) 2028 return res; 2029 } 2030 } 2031 2032 return 0; 2033 } 2034 2035 static int 2036 yt921x_dsa_port_pre_bridge_flags(struct dsa_switch *ds, int port, 2037 struct switchdev_brport_flags flags, 2038 struct netlink_ext_ack *extack) 2039 { 2040 if (flags.mask & ~(BR_HAIRPIN_MODE | BR_LEARNING | BR_FLOOD | 2041 BR_MCAST_FLOOD | BR_ISOLATED)) 2042 return -EINVAL; 2043 return 0; 2044 } 2045 2046 static int 2047 yt921x_dsa_port_bridge_flags(struct dsa_switch *ds, int port, 2048 struct switchdev_brport_flags flags, 2049 struct netlink_ext_ack *extack) 2050 { 2051 struct yt921x_priv *priv = to_yt921x_priv(ds); 2052 int res; 2053 2054 if (dsa_is_cpu_port(ds, port)) 2055 return 0; 2056 2057 mutex_lock(&priv->reg_lock); 2058 res = yt921x_bridge_flags(priv, port, flags); 2059 mutex_unlock(&priv->reg_lock); 2060 2061 return res; 2062 } 2063 2064 static void 2065 yt921x_dsa_port_bridge_leave(struct dsa_switch *ds, int port, 2066 struct dsa_bridge bridge) 2067 { 2068 struct yt921x_priv *priv = to_yt921x_priv(ds); 2069 struct device *dev = to_device(priv); 2070 int res; 2071 2072 if (dsa_is_cpu_port(ds, port)) 2073 return; 2074 2075 mutex_lock(&priv->reg_lock); 2076 res = yt921x_bridge_leave(priv, port); 2077 mutex_unlock(&priv->reg_lock); 2078 2079 if (res) 2080 dev_err(dev, "Failed to %s port %d: %i\n", "unbridge", 2081 port, res); 2082 } 2083 2084 static int 2085 yt921x_dsa_port_bridge_join(struct dsa_switch *ds, int port, 2086 struct dsa_bridge bridge, bool *tx_fwd_offload, 2087 struct netlink_ext_ack *extack) 2088 { 2089 struct yt921x_priv *priv = to_yt921x_priv(ds); 2090 u16 ports_mask; 2091 int res; 2092 2093 if (dsa_is_cpu_port(ds, port)) 2094 return 0; 2095 2096 ports_mask = dsa_bridge_ports(ds, bridge.dev); 2097 ports_mask |= priv->cpu_ports_mask; 2098 2099 mutex_lock(&priv->reg_lock); 2100 res = yt921x_bridge_join(priv, port, ports_mask); 2101 mutex_unlock(&priv->reg_lock); 2102 2103 return res; 2104 } 2105 2106 static int yt921x_port_down(struct yt921x_priv *priv, int port) 2107 { 2108 u32 mask; 2109 int res; 2110 2111 mask = YT921X_PORT_LINK | YT921X_PORT_RX_MAC_EN | YT921X_PORT_TX_MAC_EN; 2112 res = yt921x_reg_clear_bits(priv, YT921X_PORTn_CTRL(port), mask); 2113 if (res) 2114 return res; 2115 2116 if (yt921x_port_is_external(port)) { 2117 mask = YT921X_SERDES_LINK; 2118 res = yt921x_reg_clear_bits(priv, YT921X_SERDESn(port), mask); 2119 if (res) 2120 return res; 2121 2122 mask = YT921X_XMII_LINK; 2123 res = yt921x_reg_clear_bits(priv, YT921X_XMIIn(port), mask); 2124 if (res) 2125 return res; 2126 } 2127 2128 return 0; 2129 } 2130 2131 static int 2132 yt921x_port_up(struct yt921x_priv *priv, int port, unsigned int mode, 2133 phy_interface_t interface, int speed, int duplex, 2134 bool tx_pause, bool rx_pause) 2135 { 2136 u32 mask; 2137 u32 ctrl; 2138 int res; 2139 2140 switch (speed) { 2141 case SPEED_10: 2142 ctrl = YT921X_PORT_SPEED_10; 2143 break; 2144 case SPEED_100: 2145 ctrl = YT921X_PORT_SPEED_100; 2146 break; 2147 case SPEED_1000: 2148 ctrl = YT921X_PORT_SPEED_1000; 2149 break; 2150 case SPEED_2500: 2151 ctrl = YT921X_PORT_SPEED_2500; 2152 break; 2153 case SPEED_10000: 2154 ctrl = YT921X_PORT_SPEED_10000; 2155 break; 2156 default: 2157 return -EINVAL; 2158 } 2159 if (duplex == DUPLEX_FULL) 2160 ctrl |= YT921X_PORT_DUPLEX_FULL; 2161 if (tx_pause) 2162 ctrl |= YT921X_PORT_TX_PAUSE; 2163 if (rx_pause) 2164 ctrl |= YT921X_PORT_RX_PAUSE; 2165 ctrl |= YT921X_PORT_RX_MAC_EN | YT921X_PORT_TX_MAC_EN; 2166 res = yt921x_reg_write(priv, YT921X_PORTn_CTRL(port), ctrl); 2167 if (res) 2168 return res; 2169 2170 if (yt921x_port_is_external(port)) { 2171 mask = YT921X_SERDES_SPEED_M; 2172 switch (speed) { 2173 case SPEED_10: 2174 ctrl = YT921X_SERDES_SPEED_10; 2175 break; 2176 case SPEED_100: 2177 ctrl = YT921X_SERDES_SPEED_100; 2178 break; 2179 case SPEED_1000: 2180 ctrl = YT921X_SERDES_SPEED_1000; 2181 break; 2182 case SPEED_2500: 2183 ctrl = YT921X_SERDES_SPEED_2500; 2184 break; 2185 case SPEED_10000: 2186 ctrl = YT921X_SERDES_SPEED_10000; 2187 break; 2188 default: 2189 return -EINVAL; 2190 } 2191 mask |= YT921X_SERDES_DUPLEX_FULL; 2192 if (duplex == DUPLEX_FULL) 2193 ctrl |= YT921X_SERDES_DUPLEX_FULL; 2194 mask |= YT921X_SERDES_TX_PAUSE; 2195 if (tx_pause) 2196 ctrl |= YT921X_SERDES_TX_PAUSE; 2197 mask |= YT921X_SERDES_RX_PAUSE; 2198 if (rx_pause) 2199 ctrl |= YT921X_SERDES_RX_PAUSE; 2200 mask |= YT921X_SERDES_LINK; 2201 ctrl |= YT921X_SERDES_LINK; 2202 res = yt921x_reg_update_bits(priv, YT921X_SERDESn(port), 2203 mask, ctrl); 2204 if (res) 2205 return res; 2206 2207 mask = YT921X_XMII_LINK; 2208 res = yt921x_reg_set_bits(priv, YT921X_XMIIn(port), mask); 2209 if (res) 2210 return res; 2211 2212 switch (speed) { 2213 case SPEED_10: 2214 ctrl = YT921X_MDIO_POLLING_SPEED_10; 2215 break; 2216 case SPEED_100: 2217 ctrl = YT921X_MDIO_POLLING_SPEED_100; 2218 break; 2219 case SPEED_1000: 2220 ctrl = YT921X_MDIO_POLLING_SPEED_1000; 2221 break; 2222 case SPEED_2500: 2223 ctrl = YT921X_MDIO_POLLING_SPEED_2500; 2224 break; 2225 case SPEED_10000: 2226 ctrl = YT921X_MDIO_POLLING_SPEED_10000; 2227 break; 2228 default: 2229 return -EINVAL; 2230 } 2231 if (duplex == DUPLEX_FULL) 2232 ctrl |= YT921X_MDIO_POLLING_DUPLEX_FULL; 2233 ctrl |= YT921X_MDIO_POLLING_LINK; 2234 res = yt921x_reg_write(priv, YT921X_MDIO_POLLINGn(port), ctrl); 2235 if (res) 2236 return res; 2237 } 2238 2239 return 0; 2240 } 2241 2242 static int 2243 yt921x_port_config(struct yt921x_priv *priv, int port, unsigned int mode, 2244 phy_interface_t interface) 2245 { 2246 struct device *dev = to_device(priv); 2247 u32 mask; 2248 u32 ctrl; 2249 int res; 2250 2251 if (!yt921x_port_is_external(port)) { 2252 if (interface != PHY_INTERFACE_MODE_INTERNAL) { 2253 dev_err(dev, "Wrong mode %d on port %d\n", 2254 interface, port); 2255 return -EINVAL; 2256 } 2257 return 0; 2258 } 2259 2260 switch (interface) { 2261 /* SERDES */ 2262 case PHY_INTERFACE_MODE_SGMII: 2263 case PHY_INTERFACE_MODE_100BASEX: 2264 case PHY_INTERFACE_MODE_1000BASEX: 2265 case PHY_INTERFACE_MODE_2500BASEX: 2266 mask = YT921X_SERDES_CTRL_PORTn(port); 2267 res = yt921x_reg_set_bits(priv, YT921X_SERDES_CTRL, mask); 2268 if (res) 2269 return res; 2270 2271 mask = YT921X_XMII_CTRL_PORTn(port); 2272 res = yt921x_reg_clear_bits(priv, YT921X_XMII_CTRL, mask); 2273 if (res) 2274 return res; 2275 2276 mask = YT921X_SERDES_MODE_M; 2277 switch (interface) { 2278 case PHY_INTERFACE_MODE_SGMII: 2279 ctrl = YT921X_SERDES_MODE_SGMII; 2280 break; 2281 case PHY_INTERFACE_MODE_100BASEX: 2282 ctrl = YT921X_SERDES_MODE_100BASEX; 2283 break; 2284 case PHY_INTERFACE_MODE_1000BASEX: 2285 ctrl = YT921X_SERDES_MODE_1000BASEX; 2286 break; 2287 case PHY_INTERFACE_MODE_2500BASEX: 2288 ctrl = YT921X_SERDES_MODE_2500BASEX; 2289 break; 2290 default: 2291 return -EINVAL; 2292 } 2293 res = yt921x_reg_update_bits(priv, YT921X_SERDESn(port), 2294 mask, ctrl); 2295 if (res) 2296 return res; 2297 2298 break; 2299 /* add XMII support here */ 2300 default: 2301 return -EINVAL; 2302 } 2303 2304 return 0; 2305 } 2306 2307 static void 2308 yt921x_phylink_mac_link_down(struct phylink_config *config, unsigned int mode, 2309 phy_interface_t interface) 2310 { 2311 struct dsa_port *dp = dsa_phylink_to_port(config); 2312 struct yt921x_priv *priv = to_yt921x_priv(dp->ds); 2313 int port = dp->index; 2314 int res; 2315 2316 /* No need to sync; port control block is hold until device remove */ 2317 cancel_delayed_work(&priv->ports[port].mib_read); 2318 2319 mutex_lock(&priv->reg_lock); 2320 res = yt921x_port_down(priv, port); 2321 mutex_unlock(&priv->reg_lock); 2322 2323 if (res) 2324 dev_err(dp->ds->dev, "Failed to %s port %d: %i\n", "bring down", 2325 port, res); 2326 } 2327 2328 static void 2329 yt921x_phylink_mac_link_up(struct phylink_config *config, 2330 struct phy_device *phydev, unsigned int mode, 2331 phy_interface_t interface, int speed, int duplex, 2332 bool tx_pause, bool rx_pause) 2333 { 2334 struct dsa_port *dp = dsa_phylink_to_port(config); 2335 struct yt921x_priv *priv = to_yt921x_priv(dp->ds); 2336 int port = dp->index; 2337 int res; 2338 2339 mutex_lock(&priv->reg_lock); 2340 res = yt921x_port_up(priv, port, mode, interface, speed, duplex, 2341 tx_pause, rx_pause); 2342 mutex_unlock(&priv->reg_lock); 2343 2344 if (res) 2345 dev_err(dp->ds->dev, "Failed to %s port %d: %i\n", "bring up", 2346 port, res); 2347 2348 schedule_delayed_work(&priv->ports[port].mib_read, 0); 2349 } 2350 2351 static void 2352 yt921x_phylink_mac_config(struct phylink_config *config, unsigned int mode, 2353 const struct phylink_link_state *state) 2354 { 2355 struct dsa_port *dp = dsa_phylink_to_port(config); 2356 struct yt921x_priv *priv = to_yt921x_priv(dp->ds); 2357 int port = dp->index; 2358 int res; 2359 2360 mutex_lock(&priv->reg_lock); 2361 res = yt921x_port_config(priv, port, mode, state->interface); 2362 mutex_unlock(&priv->reg_lock); 2363 2364 if (res) 2365 dev_err(dp->ds->dev, "Failed to %s port %d: %i\n", "config", 2366 port, res); 2367 } 2368 2369 static void 2370 yt921x_dsa_phylink_get_caps(struct dsa_switch *ds, int port, 2371 struct phylink_config *config) 2372 { 2373 struct yt921x_priv *priv = to_yt921x_priv(ds); 2374 const struct yt921x_info *info = priv->info; 2375 2376 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | 2377 MAC_10 | MAC_100 | MAC_1000; 2378 2379 if (info->internal_mask & BIT(port)) { 2380 /* Port 10 for MCU should probably go here too. But since that 2381 * is untested yet, turn it down for the moment by letting it 2382 * fall to the default branch. 2383 */ 2384 __set_bit(PHY_INTERFACE_MODE_INTERNAL, 2385 config->supported_interfaces); 2386 } else if (info->external_mask & BIT(port)) { 2387 /* TODO: external ports may support SERDES only, XMII only, or 2388 * SERDES + XMII depending on the chip. However, we can't get 2389 * the accurate config table due to lack of document, thus 2390 * we simply declare SERDES + XMII and rely on the correctness 2391 * of devicetree for now. 2392 */ 2393 2394 /* SERDES */ 2395 __set_bit(PHY_INTERFACE_MODE_SGMII, 2396 config->supported_interfaces); 2397 /* REVSGMII (SGMII in PHY role) should go here, once 2398 * PHY_INTERFACE_MODE_REVSGMII is introduced. 2399 */ 2400 __set_bit(PHY_INTERFACE_MODE_100BASEX, 2401 config->supported_interfaces); 2402 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 2403 config->supported_interfaces); 2404 __set_bit(PHY_INTERFACE_MODE_2500BASEX, 2405 config->supported_interfaces); 2406 config->mac_capabilities |= MAC_2500FD; 2407 2408 /* XMII */ 2409 2410 /* Not tested. To add support for XMII: 2411 * - Add proper interface modes below 2412 * - Handle them in yt921x_port_config() 2413 */ 2414 } 2415 /* no such port: empty supported_interfaces causes phylink to turn it 2416 * down 2417 */ 2418 } 2419 2420 static int yt921x_port_setup(struct yt921x_priv *priv, int port) 2421 { 2422 struct dsa_switch *ds = &priv->ds; 2423 u32 ctrl; 2424 int res; 2425 2426 res = yt921x_userport_standalone(priv, port); 2427 if (res) 2428 return res; 2429 2430 if (dsa_is_cpu_port(ds, port)) { 2431 /* Egress of CPU port is supposed to be completely controlled 2432 * via tagging, so set to oneway isolated (drop all packets 2433 * without tag). 2434 */ 2435 ctrl = ~(u32)0; 2436 res = yt921x_reg_write(priv, YT921X_PORTn_ISOLATION(port), 2437 ctrl); 2438 if (res) 2439 return res; 2440 2441 /* To simplify FDB "isolation" simulation, we also disable 2442 * learning on the CPU port, and let software identify packets 2443 * towarding CPU (either trapped or a static FDB entry is 2444 * matched, no matter which bridge that entry is for), which is 2445 * already done by yt921x_userport_standalone(). As a result, 2446 * VLAN-awareness becomes unrelated on the CPU port (set to 2447 * VLAN-unaware by the way). 2448 */ 2449 } 2450 2451 return 0; 2452 } 2453 2454 static enum dsa_tag_protocol 2455 yt921x_dsa_get_tag_protocol(struct dsa_switch *ds, int port, 2456 enum dsa_tag_protocol m) 2457 { 2458 return DSA_TAG_PROTO_YT921X; 2459 } 2460 2461 static int yt921x_dsa_port_setup(struct dsa_switch *ds, int port) 2462 { 2463 struct yt921x_priv *priv = to_yt921x_priv(ds); 2464 int res; 2465 2466 mutex_lock(&priv->reg_lock); 2467 res = yt921x_port_setup(priv, port); 2468 mutex_unlock(&priv->reg_lock); 2469 2470 return res; 2471 } 2472 2473 static int yt921x_edata_wait(struct yt921x_priv *priv, u32 *valp) 2474 { 2475 u32 val = YT921X_EDATA_DATA_IDLE; 2476 int res; 2477 2478 res = yt921x_reg_wait(priv, YT921X_EDATA_DATA, 2479 YT921X_EDATA_DATA_STATUS_M, &val); 2480 if (res) 2481 return res; 2482 2483 *valp = val; 2484 return 0; 2485 } 2486 2487 static int 2488 yt921x_edata_read_cont(struct yt921x_priv *priv, u8 addr, u8 *valp) 2489 { 2490 u32 ctrl; 2491 u32 val; 2492 int res; 2493 2494 ctrl = YT921X_EDATA_CTRL_ADDR(addr) | YT921X_EDATA_CTRL_READ; 2495 res = yt921x_reg_write(priv, YT921X_EDATA_CTRL, ctrl); 2496 if (res) 2497 return res; 2498 res = yt921x_edata_wait(priv, &val); 2499 if (res) 2500 return res; 2501 2502 *valp = FIELD_GET(YT921X_EDATA_DATA_DATA_M, val); 2503 return 0; 2504 } 2505 2506 static int yt921x_edata_read(struct yt921x_priv *priv, u8 addr, u8 *valp) 2507 { 2508 u32 val; 2509 int res; 2510 2511 res = yt921x_edata_wait(priv, &val); 2512 if (res) 2513 return res; 2514 return yt921x_edata_read_cont(priv, addr, valp); 2515 } 2516 2517 static int yt921x_chip_detect(struct yt921x_priv *priv) 2518 { 2519 struct device *dev = to_device(priv); 2520 const struct yt921x_info *info; 2521 u8 extmode; 2522 u32 chipid; 2523 u32 major; 2524 u32 mode; 2525 int res; 2526 2527 res = yt921x_reg_read(priv, YT921X_CHIP_ID, &chipid); 2528 if (res) 2529 return res; 2530 2531 major = FIELD_GET(YT921X_CHIP_ID_MAJOR, chipid); 2532 2533 for (info = yt921x_infos; info->name; info++) 2534 if (info->major == major) 2535 break; 2536 if (!info->name) { 2537 dev_err(dev, "Unexpected chipid 0x%x\n", chipid); 2538 return -ENODEV; 2539 } 2540 2541 res = yt921x_reg_read(priv, YT921X_CHIP_MODE, &mode); 2542 if (res) 2543 return res; 2544 res = yt921x_edata_read(priv, YT921X_EDATA_EXTMODE, &extmode); 2545 if (res) 2546 return res; 2547 2548 for (; info->name; info++) 2549 if (info->major == major && info->mode == mode && 2550 info->extmode == extmode) 2551 break; 2552 if (!info->name) { 2553 dev_err(dev, 2554 "Unsupported chipid 0x%x with chipmode 0x%x 0x%x\n", 2555 chipid, mode, extmode); 2556 return -ENODEV; 2557 } 2558 2559 /* Print chipid here since we are interested in lower 16 bits */ 2560 dev_info(dev, 2561 "Motorcomm %s ethernet switch, chipid: 0x%x, chipmode: 0x%x 0x%x\n", 2562 info->name, chipid, mode, extmode); 2563 2564 priv->info = info; 2565 return 0; 2566 } 2567 2568 static int yt921x_chip_reset(struct yt921x_priv *priv) 2569 { 2570 struct device *dev = to_device(priv); 2571 u16 eth_p_tag; 2572 u32 val; 2573 int res; 2574 2575 res = yt921x_chip_detect(priv); 2576 if (res) 2577 return res; 2578 2579 /* Reset */ 2580 res = yt921x_reg_write(priv, YT921X_RST, YT921X_RST_HW); 2581 if (res) 2582 return res; 2583 2584 /* RST_HW is almost same as GPIO hard reset, so we need this delay. */ 2585 fsleep(YT921X_RST_DELAY_US); 2586 2587 val = 0; 2588 res = yt921x_reg_wait(priv, YT921X_RST, ~0, &val); 2589 if (res) 2590 return res; 2591 2592 /* Check for tag EtherType; do it after reset in case you messed it up 2593 * before. 2594 */ 2595 res = yt921x_reg_read(priv, YT921X_CPU_TAG_TPID, &val); 2596 if (res) 2597 return res; 2598 eth_p_tag = FIELD_GET(YT921X_CPU_TAG_TPID_TPID_M, val); 2599 if (eth_p_tag != ETH_P_YT921X) { 2600 dev_err(dev, "Tag type 0x%x != 0x%x\n", eth_p_tag, 2601 ETH_P_YT921X); 2602 /* Despite being possible, we choose not to set CPU_TAG_TPID, 2603 * since there is no way it can be different unless you have the 2604 * wrong chip. 2605 */ 2606 return -EINVAL; 2607 } 2608 2609 return 0; 2610 } 2611 2612 static int yt921x_chip_setup(struct yt921x_priv *priv) 2613 { 2614 struct dsa_switch *ds = &priv->ds; 2615 unsigned long cpu_ports_mask; 2616 u64 ctrl64; 2617 u32 ctrl; 2618 int port; 2619 int res; 2620 2621 /* Enable DSA */ 2622 priv->cpu_ports_mask = dsa_cpu_ports(ds); 2623 2624 ctrl = YT921X_EXT_CPU_PORT_TAG_EN | YT921X_EXT_CPU_PORT_PORT_EN | 2625 YT921X_EXT_CPU_PORT_PORT(__ffs(priv->cpu_ports_mask)); 2626 res = yt921x_reg_write(priv, YT921X_EXT_CPU_PORT, ctrl); 2627 if (res) 2628 return res; 2629 2630 /* Enable and clear MIB */ 2631 res = yt921x_reg_set_bits(priv, YT921X_FUNC, YT921X_FUNC_MIB); 2632 if (res) 2633 return res; 2634 2635 ctrl = YT921X_MIB_CTRL_CLEAN | YT921X_MIB_CTRL_ALL_PORT; 2636 res = yt921x_reg_write(priv, YT921X_MIB_CTRL, ctrl); 2637 if (res) 2638 return res; 2639 2640 /* Setup software switch */ 2641 ctrl = YT921X_CPU_COPY_TO_EXT_CPU; 2642 res = yt921x_reg_write(priv, YT921X_CPU_COPY, ctrl); 2643 if (res) 2644 return res; 2645 2646 ctrl = GENMASK(10, 0); 2647 res = yt921x_reg_write(priv, YT921X_FILTER_UNK_UCAST, ctrl); 2648 if (res) 2649 return res; 2650 res = yt921x_reg_write(priv, YT921X_FILTER_UNK_MCAST, ctrl); 2651 if (res) 2652 return res; 2653 2654 /* YT921x does not support native DSA port bridging, so we use port 2655 * isolation to emulate it. However, be especially careful that port 2656 * isolation takes _after_ FDB lookups, i.e. if an FDB entry (from 2657 * another bridge) is matched and the destination port (in another 2658 * bridge) is blocked, the packet will be dropped instead of flooding to 2659 * the "bridged" ports, thus we need to trap and handle those packets by 2660 * software. 2661 * 2662 * If there is no more than one bridge, we might be able to drop them 2663 * directly given some conditions are met, but we trap them in all cases 2664 * for now. 2665 */ 2666 ctrl = 0; 2667 for (int i = 0; i < YT921X_PORT_NUM; i++) 2668 ctrl |= YT921X_ACT_UNK_ACTn_TRAP(i); 2669 /* Except for CPU ports, if any packets are sent via CPU ports without 2670 * tag, they should be dropped. 2671 */ 2672 cpu_ports_mask = priv->cpu_ports_mask; 2673 for_each_set_bit(port, &cpu_ports_mask, YT921X_PORT_NUM) { 2674 ctrl &= ~YT921X_ACT_UNK_ACTn_M(port); 2675 ctrl |= YT921X_ACT_UNK_ACTn_DROP(port); 2676 } 2677 res = yt921x_reg_write(priv, YT921X_ACT_UNK_UCAST, ctrl); 2678 if (res) 2679 return res; 2680 res = yt921x_reg_write(priv, YT921X_ACT_UNK_MCAST, ctrl); 2681 if (res) 2682 return res; 2683 2684 /* Tagged VID 0 should be treated as untagged, which confuses the 2685 * hardware a lot 2686 */ 2687 ctrl64 = YT921X_VLAN_CTRL_LEARN_DIS | YT921X_VLAN_CTRL_PORTS_M; 2688 res = yt921x_reg64_write(priv, YT921X_VLANn_CTRL(0), ctrl64); 2689 if (res) 2690 return res; 2691 2692 /* Miscellaneous */ 2693 res = yt921x_reg_set_bits(priv, YT921X_SENSOR, YT921X_SENSOR_TEMP); 2694 if (res) 2695 return res; 2696 2697 return 0; 2698 } 2699 2700 static int yt921x_dsa_setup(struct dsa_switch *ds) 2701 { 2702 struct yt921x_priv *priv = to_yt921x_priv(ds); 2703 struct device *dev = to_device(priv); 2704 struct device_node *np = dev->of_node; 2705 struct device_node *child; 2706 int res; 2707 2708 mutex_lock(&priv->reg_lock); 2709 res = yt921x_chip_reset(priv); 2710 mutex_unlock(&priv->reg_lock); 2711 2712 if (res) 2713 return res; 2714 2715 /* Register the internal mdio bus. Nodes for internal ports should have 2716 * proper phy-handle pointing to their PHYs. Not enabling the internal 2717 * bus is possible, though pretty wired, if internal ports are not used. 2718 */ 2719 child = of_get_child_by_name(np, "mdio"); 2720 if (child) { 2721 res = yt921x_mbus_int_init(priv, child); 2722 of_node_put(child); 2723 if (res) 2724 return res; 2725 } 2726 2727 /* External mdio bus is optional */ 2728 child = of_get_child_by_name(np, "mdio-external"); 2729 if (child) { 2730 res = yt921x_mbus_ext_init(priv, child); 2731 of_node_put(child); 2732 if (res) 2733 return res; 2734 2735 dev_err(dev, "Untested external mdio bus\n"); 2736 return -ENODEV; 2737 } 2738 2739 mutex_lock(&priv->reg_lock); 2740 res = yt921x_chip_setup(priv); 2741 mutex_unlock(&priv->reg_lock); 2742 2743 if (res) 2744 return res; 2745 2746 return 0; 2747 } 2748 2749 static const struct phylink_mac_ops yt921x_phylink_mac_ops = { 2750 .mac_link_down = yt921x_phylink_mac_link_down, 2751 .mac_link_up = yt921x_phylink_mac_link_up, 2752 .mac_config = yt921x_phylink_mac_config, 2753 }; 2754 2755 static const struct dsa_switch_ops yt921x_dsa_switch_ops = { 2756 /* mib */ 2757 .get_strings = yt921x_dsa_get_strings, 2758 .get_ethtool_stats = yt921x_dsa_get_ethtool_stats, 2759 .get_sset_count = yt921x_dsa_get_sset_count, 2760 .get_eth_mac_stats = yt921x_dsa_get_eth_mac_stats, 2761 .get_eth_ctrl_stats = yt921x_dsa_get_eth_ctrl_stats, 2762 .get_rmon_stats = yt921x_dsa_get_rmon_stats, 2763 .get_stats64 = yt921x_dsa_get_stats64, 2764 .get_pause_stats = yt921x_dsa_get_pause_stats, 2765 /* eee */ 2766 .support_eee = dsa_supports_eee, 2767 .set_mac_eee = yt921x_dsa_set_mac_eee, 2768 /* mtu */ 2769 .port_change_mtu = yt921x_dsa_port_change_mtu, 2770 .port_max_mtu = yt921x_dsa_port_max_mtu, 2771 /* mirror */ 2772 .port_mirror_del = yt921x_dsa_port_mirror_del, 2773 .port_mirror_add = yt921x_dsa_port_mirror_add, 2774 /* fdb */ 2775 .port_fdb_dump = yt921x_dsa_port_fdb_dump, 2776 .port_fast_age = yt921x_dsa_port_fast_age, 2777 .set_ageing_time = yt921x_dsa_set_ageing_time, 2778 .port_fdb_del = yt921x_dsa_port_fdb_del, 2779 .port_fdb_add = yt921x_dsa_port_fdb_add, 2780 .port_mdb_del = yt921x_dsa_port_mdb_del, 2781 .port_mdb_add = yt921x_dsa_port_mdb_add, 2782 /* vlan */ 2783 .port_vlan_filtering = yt921x_dsa_port_vlan_filtering, 2784 .port_vlan_del = yt921x_dsa_port_vlan_del, 2785 .port_vlan_add = yt921x_dsa_port_vlan_add, 2786 /* bridge */ 2787 .port_pre_bridge_flags = yt921x_dsa_port_pre_bridge_flags, 2788 .port_bridge_flags = yt921x_dsa_port_bridge_flags, 2789 .port_bridge_leave = yt921x_dsa_port_bridge_leave, 2790 .port_bridge_join = yt921x_dsa_port_bridge_join, 2791 /* port */ 2792 .get_tag_protocol = yt921x_dsa_get_tag_protocol, 2793 .phylink_get_caps = yt921x_dsa_phylink_get_caps, 2794 .port_setup = yt921x_dsa_port_setup, 2795 /* chip */ 2796 .setup = yt921x_dsa_setup, 2797 }; 2798 2799 static void yt921x_mdio_shutdown(struct mdio_device *mdiodev) 2800 { 2801 struct yt921x_priv *priv = mdiodev_get_drvdata(mdiodev); 2802 2803 if (!priv) 2804 return; 2805 2806 dsa_switch_shutdown(&priv->ds); 2807 } 2808 2809 static void yt921x_mdio_remove(struct mdio_device *mdiodev) 2810 { 2811 struct yt921x_priv *priv = mdiodev_get_drvdata(mdiodev); 2812 2813 if (!priv) 2814 return; 2815 2816 for (size_t i = ARRAY_SIZE(priv->ports); i-- > 0; ) { 2817 struct yt921x_port *pp = &priv->ports[i]; 2818 2819 disable_delayed_work_sync(&pp->mib_read); 2820 } 2821 2822 dsa_unregister_switch(&priv->ds); 2823 2824 mutex_destroy(&priv->reg_lock); 2825 } 2826 2827 static int yt921x_mdio_probe(struct mdio_device *mdiodev) 2828 { 2829 struct device *dev = &mdiodev->dev; 2830 struct yt921x_reg_mdio *mdio; 2831 struct yt921x_priv *priv; 2832 struct dsa_switch *ds; 2833 2834 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 2835 if (!priv) 2836 return -ENOMEM; 2837 2838 mdio = devm_kzalloc(dev, sizeof(*mdio), GFP_KERNEL); 2839 if (!mdio) 2840 return -ENOMEM; 2841 2842 mdio->bus = mdiodev->bus; 2843 mdio->addr = mdiodev->addr; 2844 mdio->switchid = 0; 2845 2846 mutex_init(&priv->reg_lock); 2847 2848 priv->reg_ops = &yt921x_reg_ops_mdio; 2849 priv->reg_ctx = mdio; 2850 2851 for (size_t i = 0; i < ARRAY_SIZE(priv->ports); i++) { 2852 struct yt921x_port *pp = &priv->ports[i]; 2853 2854 pp->index = i; 2855 INIT_DELAYED_WORK(&pp->mib_read, yt921x_poll_mib); 2856 } 2857 2858 ds = &priv->ds; 2859 ds->dev = dev; 2860 ds->assisted_learning_on_cpu_port = true; 2861 ds->priv = priv; 2862 ds->ops = &yt921x_dsa_switch_ops; 2863 ds->phylink_mac_ops = &yt921x_phylink_mac_ops; 2864 ds->num_ports = YT921X_PORT_NUM; 2865 2866 mdiodev_set_drvdata(mdiodev, priv); 2867 2868 return dsa_register_switch(ds); 2869 } 2870 2871 static const struct of_device_id yt921x_of_match[] = { 2872 { .compatible = "motorcomm,yt9215" }, 2873 {} 2874 }; 2875 MODULE_DEVICE_TABLE(of, yt921x_of_match); 2876 2877 static struct mdio_driver yt921x_mdio_driver = { 2878 .probe = yt921x_mdio_probe, 2879 .remove = yt921x_mdio_remove, 2880 .shutdown = yt921x_mdio_shutdown, 2881 .mdiodrv.driver = { 2882 .name = YT921X_NAME, 2883 .of_match_table = yt921x_of_match, 2884 }, 2885 }; 2886 2887 mdio_module_driver(yt921x_mdio_driver); 2888 2889 MODULE_AUTHOR("David Yang <mmyangfl@gmail.com>"); 2890 MODULE_DESCRIPTION("Driver for Motorcomm YT921x Switch"); 2891 MODULE_LICENSE("GPL"); 2892