1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Motorcomm YT921x Switch 4 * 5 * Should work on YT9213/YT9214/YT9215/YT9218, but only tested on YT9215+SGMII, 6 * be sure to do your own checks before porting to another chip. 7 * 8 * Copyright (c) 2025 David Yang 9 */ 10 11 #include <linux/dcbnl.h> 12 #include <linux/etherdevice.h> 13 #include <linux/if_bridge.h> 14 #include <linux/if_hsr.h> 15 #include <linux/if_vlan.h> 16 #include <linux/iopoll.h> 17 #include <linux/mdio.h> 18 #include <linux/module.h> 19 #include <linux/of.h> 20 #include <linux/of_mdio.h> 21 #include <linux/of_net.h> 22 #include <linux/sort.h> 23 24 #include <net/dsa.h> 25 #include <net/dscp.h> 26 #include <net/ieee8021q.h> 27 28 #include "yt921x.h" 29 30 struct yt921x_mib_desc { 31 unsigned int size; 32 unsigned int offset; 33 const char *name; 34 }; 35 36 #define MIB_DESC(_size, _offset, _name) \ 37 {_size, _offset, _name} 38 39 /* Must agree with yt921x_mib 40 * 41 * Unstructured fields (name != NULL) will appear in get_ethtool_stats(), 42 * structured go to their *_stats() methods, but we need their sizes and offsets 43 * to perform 32bit MIB overflow wraparound. 44 */ 45 static const struct yt921x_mib_desc yt921x_mib_descs[] = { 46 MIB_DESC(1, YT921X_MIB_DATA_RX_BROADCAST, NULL), 47 MIB_DESC(1, YT921X_MIB_DATA_RX_PAUSE, NULL), 48 MIB_DESC(1, YT921X_MIB_DATA_RX_MULTICAST, NULL), 49 MIB_DESC(1, YT921X_MIB_DATA_RX_CRC_ERR, NULL), 50 51 MIB_DESC(1, YT921X_MIB_DATA_RX_ALIGN_ERR, NULL), 52 MIB_DESC(1, YT921X_MIB_DATA_RX_UNDERSIZE_ERR, NULL), 53 MIB_DESC(1, YT921X_MIB_DATA_RX_FRAG_ERR, NULL), 54 MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_64, NULL), 55 56 MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_65_TO_127, NULL), 57 MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_128_TO_255, NULL), 58 MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_256_TO_511, NULL), 59 MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_512_TO_1023, NULL), 60 61 MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_1024_TO_1518, NULL), 62 MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_1519_TO_MAX, NULL), 63 MIB_DESC(2, YT921X_MIB_DATA_RX_GOOD_BYTES, NULL), 64 65 MIB_DESC(2, YT921X_MIB_DATA_RX_BAD_BYTES, "RxBadBytes"), 66 MIB_DESC(1, YT921X_MIB_DATA_RX_OVERSIZE_ERR, NULL), 67 68 MIB_DESC(1, YT921X_MIB_DATA_RX_DROPPED, NULL), 69 MIB_DESC(1, YT921X_MIB_DATA_TX_BROADCAST, NULL), 70 MIB_DESC(1, YT921X_MIB_DATA_TX_PAUSE, NULL), 71 MIB_DESC(1, YT921X_MIB_DATA_TX_MULTICAST, NULL), 72 73 MIB_DESC(1, YT921X_MIB_DATA_TX_UNDERSIZE_ERR, NULL), 74 MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_64, NULL), 75 MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_65_TO_127, NULL), 76 MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_128_TO_255, NULL), 77 78 MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_256_TO_511, NULL), 79 MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_512_TO_1023, NULL), 80 MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_1024_TO_1518, NULL), 81 MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_1519_TO_MAX, NULL), 82 83 MIB_DESC(2, YT921X_MIB_DATA_TX_GOOD_BYTES, NULL), 84 MIB_DESC(1, YT921X_MIB_DATA_TX_COLLISION, NULL), 85 86 MIB_DESC(1, YT921X_MIB_DATA_TX_EXCESSIVE_COLLISION, NULL), 87 MIB_DESC(1, YT921X_MIB_DATA_TX_MULTIPLE_COLLISION, NULL), 88 MIB_DESC(1, YT921X_MIB_DATA_TX_SINGLE_COLLISION, NULL), 89 MIB_DESC(1, YT921X_MIB_DATA_TX_PKT, NULL), 90 91 MIB_DESC(1, YT921X_MIB_DATA_TX_DEFERRED, NULL), 92 MIB_DESC(1, YT921X_MIB_DATA_TX_LATE_COLLISION, NULL), 93 MIB_DESC(1, YT921X_MIB_DATA_RX_OAM, "RxOAM"), 94 MIB_DESC(1, YT921X_MIB_DATA_TX_OAM, "TxOAM"), 95 }; 96 97 struct yt921x_info { 98 const char *name; 99 u16 major; 100 /* Unknown, seems to be plain enumeration */ 101 u8 mode; 102 u8 extmode; 103 /* Ports with integral GbE PHYs, not including MCU Port 10 */ 104 u16 internal_mask; 105 /* TODO: see comments in yt921x_dsa_phylink_get_caps() */ 106 u16 external_mask; 107 }; 108 109 #define YT921X_PORT_MASK_INTn(port) BIT(port) 110 #define YT921X_PORT_MASK_INT0_n(n) GENMASK((n) - 1, 0) 111 #define YT921X_PORT_MASK_EXT0 BIT(8) 112 #define YT921X_PORT_MASK_EXT1 BIT(9) 113 114 static const struct yt921x_info yt921x_infos[] = { 115 { 116 "YT9215SC", YT9215_MAJOR, 1, 0, 117 YT921X_PORT_MASK_INT0_n(5), 118 YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1, 119 }, 120 { 121 "YT9215S", YT9215_MAJOR, 2, 0, 122 YT921X_PORT_MASK_INT0_n(5), 123 YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1, 124 }, 125 { 126 "YT9215RB", YT9215_MAJOR, 3, 0, 127 YT921X_PORT_MASK_INT0_n(5), 128 YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1, 129 }, 130 { 131 "YT9214NB", YT9215_MAJOR, 3, 2, 132 YT921X_PORT_MASK_INTn(1) | YT921X_PORT_MASK_INTn(3), 133 YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1, 134 }, 135 { 136 "YT9213NB", YT9215_MAJOR, 3, 3, 137 YT921X_PORT_MASK_INTn(1) | YT921X_PORT_MASK_INTn(3), 138 YT921X_PORT_MASK_EXT1, 139 }, 140 { 141 "YT9218N", YT9218_MAJOR, 0, 0, 142 YT921X_PORT_MASK_INT0_n(8), 143 0, 144 }, 145 { 146 "YT9218MB", YT9218_MAJOR, 1, 0, 147 YT921X_PORT_MASK_INT0_n(8), 148 YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1, 149 }, 150 {} 151 }; 152 153 #define YT921X_NAME "yt921x" 154 155 #define YT921X_VID_UNWARE 4095 156 157 #define YT921X_POLL_SLEEP_US 10000 158 #define YT921X_POLL_TIMEOUT_US 100000 159 160 /* The interval should be small enough to avoid overflow of 32bit MIBs. 161 * 162 * Until we can read MIBs from stats64 call directly (i.e. sleep 163 * there), we have to poll stats more frequently then it is actually needed. 164 * For overflow protection, normally, 100 sec interval should have been OK. 165 */ 166 #define YT921X_STATS_INTERVAL_JIFFIES (3 * HZ) 167 168 struct yt921x_reg_mdio { 169 struct mii_bus *bus; 170 int addr; 171 /* SWITCH_ID_1 / SWITCH_ID_0 of the device 172 * 173 * This is a way to multiplex multiple devices on the same MII phyaddr 174 * and should be configurable in DT. However, MDIO core simply doesn't 175 * allow multiple devices over one reg addr, so this is a fixed value 176 * for now until a solution is found. 177 * 178 * Keep this because we need switchid to form MII regaddrs anyway. 179 */ 180 unsigned char switchid; 181 }; 182 183 /* TODO: SPI/I2C */ 184 185 #define to_yt921x_priv(_ds) container_of_const(_ds, struct yt921x_priv, ds) 186 #define to_device(priv) ((priv)->ds.dev) 187 188 static int yt921x_reg_read(struct yt921x_priv *priv, u32 reg, u32 *valp) 189 { 190 WARN_ON(!mutex_is_locked(&priv->reg_lock)); 191 192 return priv->reg_ops->read(priv->reg_ctx, reg, valp); 193 } 194 195 static int yt921x_reg_write(struct yt921x_priv *priv, u32 reg, u32 val) 196 { 197 WARN_ON(!mutex_is_locked(&priv->reg_lock)); 198 199 return priv->reg_ops->write(priv->reg_ctx, reg, val); 200 } 201 202 static int 203 yt921x_reg_wait(struct yt921x_priv *priv, u32 reg, u32 mask, u32 *valp) 204 { 205 u32 val; 206 int res; 207 int ret; 208 209 ret = read_poll_timeout(yt921x_reg_read, res, 210 res || (val & mask) == *valp, 211 YT921X_POLL_SLEEP_US, YT921X_POLL_TIMEOUT_US, 212 false, priv, reg, &val); 213 if (ret) 214 return ret; 215 if (res) 216 return res; 217 218 *valp = val; 219 return 0; 220 } 221 222 static int 223 yt921x_reg_update_bits(struct yt921x_priv *priv, u32 reg, u32 mask, u32 val) 224 { 225 int res; 226 u32 v; 227 u32 u; 228 229 res = yt921x_reg_read(priv, reg, &v); 230 if (res) 231 return res; 232 233 u = v; 234 u &= ~mask; 235 u |= val; 236 if (u == v) 237 return 0; 238 239 return yt921x_reg_write(priv, reg, u); 240 } 241 242 static int yt921x_reg_set_bits(struct yt921x_priv *priv, u32 reg, u32 mask) 243 { 244 return yt921x_reg_update_bits(priv, reg, 0, mask); 245 } 246 247 static int yt921x_reg_clear_bits(struct yt921x_priv *priv, u32 reg, u32 mask) 248 { 249 return yt921x_reg_update_bits(priv, reg, mask, 0); 250 } 251 252 static int 253 yt921x_reg_toggle_bits(struct yt921x_priv *priv, u32 reg, u32 mask, bool set) 254 { 255 return yt921x_reg_update_bits(priv, reg, mask, !set ? 0 : mask); 256 } 257 258 /* Some registers, like VLANn_CTRL, should always be written in 64-bit, even if 259 * you are to write only the lower / upper 32 bits. 260 * 261 * There is no such restriction for reading, but we still provide 64-bit read 262 * wrappers so that we always handle u64 values. 263 */ 264 265 static int yt921x_reg64_read(struct yt921x_priv *priv, u32 reg, u64 *valp) 266 { 267 u32 lo; 268 u32 hi; 269 int res; 270 271 res = yt921x_reg_read(priv, reg, &lo); 272 if (res) 273 return res; 274 res = yt921x_reg_read(priv, reg + 4, &hi); 275 if (res) 276 return res; 277 278 *valp = ((u64)hi << 32) | lo; 279 return 0; 280 } 281 282 static int yt921x_reg64_write(struct yt921x_priv *priv, u32 reg, u64 val) 283 { 284 int res; 285 286 res = yt921x_reg_write(priv, reg, (u32)val); 287 if (res) 288 return res; 289 return yt921x_reg_write(priv, reg + 4, (u32)(val >> 32)); 290 } 291 292 static int 293 yt921x_reg64_update_bits(struct yt921x_priv *priv, u32 reg, u64 mask, u64 val) 294 { 295 int res; 296 u64 v; 297 u64 u; 298 299 res = yt921x_reg64_read(priv, reg, &v); 300 if (res) 301 return res; 302 303 u = v; 304 u &= ~mask; 305 u |= val; 306 if (u == v) 307 return 0; 308 309 return yt921x_reg64_write(priv, reg, u); 310 } 311 312 static int yt921x_reg64_clear_bits(struct yt921x_priv *priv, u32 reg, u64 mask) 313 { 314 return yt921x_reg64_update_bits(priv, reg, mask, 0); 315 } 316 317 static int yt921x_reg_mdio_read(void *context, u32 reg, u32 *valp) 318 { 319 struct yt921x_reg_mdio *mdio = context; 320 struct mii_bus *bus = mdio->bus; 321 int addr = mdio->addr; 322 u32 reg_addr; 323 u32 reg_data; 324 u32 val; 325 int res; 326 327 /* Hold the mdio bus lock to avoid (un)locking for 4 times */ 328 mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); 329 330 reg_addr = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_ADDR | 331 YT921X_SMI_READ; 332 res = __mdiobus_write(bus, addr, reg_addr, (u16)(reg >> 16)); 333 if (res) 334 goto end; 335 res = __mdiobus_write(bus, addr, reg_addr, (u16)reg); 336 if (res) 337 goto end; 338 339 reg_data = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_DATA | 340 YT921X_SMI_READ; 341 res = __mdiobus_read(bus, addr, reg_data); 342 if (res < 0) 343 goto end; 344 val = (u16)res; 345 res = __mdiobus_read(bus, addr, reg_data); 346 if (res < 0) 347 goto end; 348 val = (val << 16) | (u16)res; 349 350 *valp = val; 351 res = 0; 352 353 end: 354 mutex_unlock(&bus->mdio_lock); 355 return res; 356 } 357 358 static int yt921x_reg_mdio_write(void *context, u32 reg, u32 val) 359 { 360 struct yt921x_reg_mdio *mdio = context; 361 struct mii_bus *bus = mdio->bus; 362 int addr = mdio->addr; 363 u32 reg_addr; 364 u32 reg_data; 365 int res; 366 367 mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); 368 369 reg_addr = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_ADDR | 370 YT921X_SMI_WRITE; 371 res = __mdiobus_write(bus, addr, reg_addr, (u16)(reg >> 16)); 372 if (res) 373 goto end; 374 res = __mdiobus_write(bus, addr, reg_addr, (u16)reg); 375 if (res) 376 goto end; 377 378 reg_data = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_DATA | 379 YT921X_SMI_WRITE; 380 res = __mdiobus_write(bus, addr, reg_data, (u16)(val >> 16)); 381 if (res) 382 goto end; 383 res = __mdiobus_write(bus, addr, reg_data, (u16)val); 384 if (res) 385 goto end; 386 387 res = 0; 388 389 end: 390 mutex_unlock(&bus->mdio_lock); 391 return res; 392 } 393 394 static const struct yt921x_reg_ops yt921x_reg_ops_mdio = { 395 .read = yt921x_reg_mdio_read, 396 .write = yt921x_reg_mdio_write, 397 }; 398 399 /* TODO: SPI/I2C */ 400 401 static int yt921x_intif_wait(struct yt921x_priv *priv) 402 { 403 u32 val = 0; 404 405 return yt921x_reg_wait(priv, YT921X_INT_MBUS_OP, YT921X_MBUS_OP_START, 406 &val); 407 } 408 409 static int 410 yt921x_intif_read(struct yt921x_priv *priv, int port, int reg, u16 *valp) 411 { 412 struct device *dev = to_device(priv); 413 u32 mask; 414 u32 ctrl; 415 u32 val; 416 int res; 417 418 res = yt921x_intif_wait(priv); 419 if (res) 420 return res; 421 422 mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M | 423 YT921X_MBUS_CTRL_OP_M; 424 ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) | 425 YT921X_MBUS_CTRL_READ; 426 res = yt921x_reg_update_bits(priv, YT921X_INT_MBUS_CTRL, mask, ctrl); 427 if (res) 428 return res; 429 res = yt921x_reg_write(priv, YT921X_INT_MBUS_OP, YT921X_MBUS_OP_START); 430 if (res) 431 return res; 432 433 res = yt921x_intif_wait(priv); 434 if (res) 435 return res; 436 res = yt921x_reg_read(priv, YT921X_INT_MBUS_DIN, &val); 437 if (res) 438 return res; 439 440 if ((u16)val != val) 441 dev_info(dev, 442 "%s: port %d, reg 0x%x: Expected u16, got 0x%08x\n", 443 __func__, port, reg, val); 444 *valp = (u16)val; 445 return 0; 446 } 447 448 static int 449 yt921x_intif_write(struct yt921x_priv *priv, int port, int reg, u16 val) 450 { 451 u32 mask; 452 u32 ctrl; 453 int res; 454 455 res = yt921x_intif_wait(priv); 456 if (res) 457 return res; 458 459 mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M | 460 YT921X_MBUS_CTRL_OP_M; 461 ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) | 462 YT921X_MBUS_CTRL_WRITE; 463 res = yt921x_reg_update_bits(priv, YT921X_INT_MBUS_CTRL, mask, ctrl); 464 if (res) 465 return res; 466 res = yt921x_reg_write(priv, YT921X_INT_MBUS_DOUT, val); 467 if (res) 468 return res; 469 res = yt921x_reg_write(priv, YT921X_INT_MBUS_OP, YT921X_MBUS_OP_START); 470 if (res) 471 return res; 472 473 return yt921x_intif_wait(priv); 474 } 475 476 static int yt921x_mbus_int_read(struct mii_bus *mbus, int port, int reg) 477 { 478 struct yt921x_priv *priv = mbus->priv; 479 u16 val; 480 int res; 481 482 if (port >= YT921X_PORT_NUM) 483 return U16_MAX; 484 485 mutex_lock(&priv->reg_lock); 486 res = yt921x_intif_read(priv, port, reg, &val); 487 mutex_unlock(&priv->reg_lock); 488 489 if (res) 490 return res; 491 return val; 492 } 493 494 static int 495 yt921x_mbus_int_write(struct mii_bus *mbus, int port, int reg, u16 data) 496 { 497 struct yt921x_priv *priv = mbus->priv; 498 int res; 499 500 if (port >= YT921X_PORT_NUM) 501 return -ENODEV; 502 503 mutex_lock(&priv->reg_lock); 504 res = yt921x_intif_write(priv, port, reg, data); 505 mutex_unlock(&priv->reg_lock); 506 507 return res; 508 } 509 510 static int 511 yt921x_mbus_int_init(struct yt921x_priv *priv, struct device_node *mnp) 512 { 513 struct device *dev = to_device(priv); 514 struct mii_bus *mbus; 515 int res; 516 517 mbus = devm_mdiobus_alloc(dev); 518 if (!mbus) 519 return -ENOMEM; 520 521 mbus->name = "YT921x internal MDIO bus"; 522 snprintf(mbus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); 523 mbus->priv = priv; 524 mbus->read = yt921x_mbus_int_read; 525 mbus->write = yt921x_mbus_int_write; 526 mbus->parent = dev; 527 mbus->phy_mask = (u32)~GENMASK(YT921X_PORT_NUM - 1, 0); 528 529 res = devm_of_mdiobus_register(dev, mbus, mnp); 530 if (res) 531 return res; 532 533 priv->mbus_int = mbus; 534 535 return 0; 536 } 537 538 static int yt921x_extif_wait(struct yt921x_priv *priv) 539 { 540 u32 val = 0; 541 542 return yt921x_reg_wait(priv, YT921X_EXT_MBUS_OP, YT921X_MBUS_OP_START, 543 &val); 544 } 545 546 static int 547 yt921x_extif_read(struct yt921x_priv *priv, int port, int reg, u16 *valp) 548 { 549 struct device *dev = to_device(priv); 550 u32 mask; 551 u32 ctrl; 552 u32 val; 553 int res; 554 555 res = yt921x_extif_wait(priv); 556 if (res) 557 return res; 558 559 mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M | 560 YT921X_MBUS_CTRL_TYPE_M | YT921X_MBUS_CTRL_OP_M; 561 ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) | 562 YT921X_MBUS_CTRL_TYPE_C22 | YT921X_MBUS_CTRL_READ; 563 res = yt921x_reg_update_bits(priv, YT921X_EXT_MBUS_CTRL, mask, ctrl); 564 if (res) 565 return res; 566 res = yt921x_reg_write(priv, YT921X_EXT_MBUS_OP, YT921X_MBUS_OP_START); 567 if (res) 568 return res; 569 570 res = yt921x_extif_wait(priv); 571 if (res) 572 return res; 573 res = yt921x_reg_read(priv, YT921X_EXT_MBUS_DIN, &val); 574 if (res) 575 return res; 576 577 if ((u16)val != val) 578 dev_info(dev, 579 "%s: port %d, reg 0x%x: Expected u16, got 0x%08x\n", 580 __func__, port, reg, val); 581 *valp = (u16)val; 582 return 0; 583 } 584 585 static int 586 yt921x_extif_write(struct yt921x_priv *priv, int port, int reg, u16 val) 587 { 588 u32 mask; 589 u32 ctrl; 590 int res; 591 592 res = yt921x_extif_wait(priv); 593 if (res) 594 return res; 595 596 mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M | 597 YT921X_MBUS_CTRL_TYPE_M | YT921X_MBUS_CTRL_OP_M; 598 ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) | 599 YT921X_MBUS_CTRL_TYPE_C22 | YT921X_MBUS_CTRL_WRITE; 600 res = yt921x_reg_update_bits(priv, YT921X_EXT_MBUS_CTRL, mask, ctrl); 601 if (res) 602 return res; 603 res = yt921x_reg_write(priv, YT921X_EXT_MBUS_DOUT, val); 604 if (res) 605 return res; 606 res = yt921x_reg_write(priv, YT921X_EXT_MBUS_OP, YT921X_MBUS_OP_START); 607 if (res) 608 return res; 609 610 return yt921x_extif_wait(priv); 611 } 612 613 static int yt921x_mbus_ext_read(struct mii_bus *mbus, int port, int reg) 614 { 615 struct yt921x_priv *priv = mbus->priv; 616 u16 val; 617 int res; 618 619 mutex_lock(&priv->reg_lock); 620 res = yt921x_extif_read(priv, port, reg, &val); 621 mutex_unlock(&priv->reg_lock); 622 623 if (res) 624 return res; 625 return val; 626 } 627 628 static int 629 yt921x_mbus_ext_write(struct mii_bus *mbus, int port, int reg, u16 data) 630 { 631 struct yt921x_priv *priv = mbus->priv; 632 int res; 633 634 mutex_lock(&priv->reg_lock); 635 res = yt921x_extif_write(priv, port, reg, data); 636 mutex_unlock(&priv->reg_lock); 637 638 return res; 639 } 640 641 static int 642 yt921x_mbus_ext_init(struct yt921x_priv *priv, struct device_node *mnp) 643 { 644 struct device *dev = to_device(priv); 645 struct mii_bus *mbus; 646 int res; 647 648 mbus = devm_mdiobus_alloc(dev); 649 if (!mbus) 650 return -ENOMEM; 651 652 mbus->name = "YT921x external MDIO bus"; 653 snprintf(mbus->id, MII_BUS_ID_SIZE, "%s@ext", dev_name(dev)); 654 mbus->priv = priv; 655 /* TODO: c45? */ 656 mbus->read = yt921x_mbus_ext_read; 657 mbus->write = yt921x_mbus_ext_write; 658 mbus->parent = dev; 659 660 res = devm_of_mdiobus_register(dev, mbus, mnp); 661 if (res) 662 return res; 663 664 priv->mbus_ext = mbus; 665 666 return 0; 667 } 668 669 /* Read and handle overflow of 32bit MIBs. MIB buffer must be zeroed before. */ 670 static int yt921x_read_mib(struct yt921x_priv *priv, int port) 671 { 672 struct yt921x_port *pp = &priv->ports[port]; 673 struct device *dev = to_device(priv); 674 struct yt921x_mib *mib = &pp->mib; 675 int res = 0; 676 677 /* Reading of yt921x_port::mib is not protected by a lock and it's vain 678 * to keep its consistency, since we have to read registers one by one 679 * and there is no way to make a snapshot of MIB stats. 680 * 681 * Writing (by this function only) is and should be protected by 682 * reg_lock. 683 */ 684 685 for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) { 686 const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i]; 687 u32 reg = YT921X_MIBn_DATA0(port) + desc->offset; 688 u64 *valp = &((u64 *)mib)[i]; 689 u32 val0; 690 u64 val; 691 692 res = yt921x_reg_read(priv, reg, &val0); 693 if (res) 694 break; 695 696 if (desc->size <= 1) { 697 u64 old_val = *valp; 698 699 val = (old_val & ~(u64)U32_MAX) | val0; 700 if (val < old_val) 701 val += 1ull << 32; 702 } else { 703 u32 val1; 704 705 res = yt921x_reg_read(priv, reg + 4, &val1); 706 if (res) 707 break; 708 val = ((u64)val1 << 32) | val0; 709 } 710 711 WRITE_ONCE(*valp, val); 712 } 713 714 pp->rx_frames = mib->rx_64byte + mib->rx_65_127byte + 715 mib->rx_128_255byte + mib->rx_256_511byte + 716 mib->rx_512_1023byte + mib->rx_1024_1518byte + 717 mib->rx_jumbo; 718 pp->tx_frames = mib->tx_64byte + mib->tx_65_127byte + 719 mib->tx_128_255byte + mib->tx_256_511byte + 720 mib->tx_512_1023byte + mib->tx_1024_1518byte + 721 mib->tx_jumbo; 722 723 if (res) 724 dev_err(dev, "Failed to %s port %d: %i\n", "read stats for", 725 port, res); 726 return res; 727 } 728 729 static void yt921x_poll_mib(struct work_struct *work) 730 { 731 struct yt921x_port *pp = container_of_const(work, struct yt921x_port, 732 mib_read.work); 733 struct yt921x_priv *priv = (void *)(pp - pp->index) - 734 offsetof(struct yt921x_priv, ports); 735 unsigned long delay = YT921X_STATS_INTERVAL_JIFFIES; 736 int port = pp->index; 737 int res; 738 739 mutex_lock(&priv->reg_lock); 740 res = yt921x_read_mib(priv, port); 741 mutex_unlock(&priv->reg_lock); 742 if (res) 743 delay *= 4; 744 745 schedule_delayed_work(&pp->mib_read, delay); 746 } 747 748 static void 749 yt921x_dsa_get_strings(struct dsa_switch *ds, int port, u32 stringset, 750 uint8_t *data) 751 { 752 if (stringset != ETH_SS_STATS) 753 return; 754 755 for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) { 756 const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i]; 757 758 if (desc->name) 759 ethtool_puts(&data, desc->name); 760 } 761 } 762 763 static void 764 yt921x_dsa_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) 765 { 766 struct yt921x_priv *priv = to_yt921x_priv(ds); 767 struct yt921x_port *pp = &priv->ports[port]; 768 struct yt921x_mib *mib = &pp->mib; 769 size_t j; 770 771 mutex_lock(&priv->reg_lock); 772 yt921x_read_mib(priv, port); 773 mutex_unlock(&priv->reg_lock); 774 775 j = 0; 776 for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) { 777 const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i]; 778 779 if (!desc->name) 780 continue; 781 782 data[j] = ((u64 *)mib)[i]; 783 j++; 784 } 785 } 786 787 static int yt921x_dsa_get_sset_count(struct dsa_switch *ds, int port, int sset) 788 { 789 int cnt = 0; 790 791 if (sset != ETH_SS_STATS) 792 return 0; 793 794 for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) { 795 const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i]; 796 797 if (desc->name) 798 cnt++; 799 } 800 801 return cnt; 802 } 803 804 static void 805 yt921x_dsa_get_eth_mac_stats(struct dsa_switch *ds, int port, 806 struct ethtool_eth_mac_stats *mac_stats) 807 { 808 struct yt921x_priv *priv = to_yt921x_priv(ds); 809 struct yt921x_port *pp = &priv->ports[port]; 810 struct yt921x_mib *mib = &pp->mib; 811 812 mutex_lock(&priv->reg_lock); 813 yt921x_read_mib(priv, port); 814 mutex_unlock(&priv->reg_lock); 815 816 mac_stats->FramesTransmittedOK = pp->tx_frames; 817 mac_stats->SingleCollisionFrames = mib->tx_single_collisions; 818 mac_stats->MultipleCollisionFrames = mib->tx_multiple_collisions; 819 mac_stats->FramesReceivedOK = pp->rx_frames; 820 mac_stats->FrameCheckSequenceErrors = mib->rx_crc_errors; 821 mac_stats->AlignmentErrors = mib->rx_alignment_errors; 822 mac_stats->OctetsTransmittedOK = mib->tx_good_bytes; 823 mac_stats->FramesWithDeferredXmissions = mib->tx_deferred; 824 mac_stats->LateCollisions = mib->tx_late_collisions; 825 mac_stats->FramesAbortedDueToXSColls = mib->tx_aborted_errors; 826 /* mac_stats->FramesLostDueToIntMACXmitError */ 827 /* mac_stats->CarrierSenseErrors */ 828 mac_stats->OctetsReceivedOK = mib->rx_good_bytes; 829 /* mac_stats->FramesLostDueToIntMACRcvError */ 830 mac_stats->MulticastFramesXmittedOK = mib->tx_multicast; 831 mac_stats->BroadcastFramesXmittedOK = mib->tx_broadcast; 832 /* mac_stats->FramesWithExcessiveDeferral */ 833 mac_stats->MulticastFramesReceivedOK = mib->rx_multicast; 834 mac_stats->BroadcastFramesReceivedOK = mib->rx_broadcast; 835 /* mac_stats->InRangeLengthErrors */ 836 /* mac_stats->OutOfRangeLengthField */ 837 mac_stats->FrameTooLongErrors = mib->rx_oversize_errors; 838 } 839 840 static void 841 yt921x_dsa_get_eth_ctrl_stats(struct dsa_switch *ds, int port, 842 struct ethtool_eth_ctrl_stats *ctrl_stats) 843 { 844 struct yt921x_priv *priv = to_yt921x_priv(ds); 845 struct yt921x_port *pp = &priv->ports[port]; 846 struct yt921x_mib *mib = &pp->mib; 847 848 mutex_lock(&priv->reg_lock); 849 yt921x_read_mib(priv, port); 850 mutex_unlock(&priv->reg_lock); 851 852 ctrl_stats->MACControlFramesTransmitted = mib->tx_pause; 853 ctrl_stats->MACControlFramesReceived = mib->rx_pause; 854 /* ctrl_stats->UnsupportedOpcodesReceived */ 855 } 856 857 static const struct ethtool_rmon_hist_range yt921x_rmon_ranges[] = { 858 { 0, 64 }, 859 { 65, 127 }, 860 { 128, 255 }, 861 { 256, 511 }, 862 { 512, 1023 }, 863 { 1024, 1518 }, 864 { 1519, YT921X_FRAME_SIZE_MAX }, 865 {} 866 }; 867 868 static void 869 yt921x_dsa_get_rmon_stats(struct dsa_switch *ds, int port, 870 struct ethtool_rmon_stats *rmon_stats, 871 const struct ethtool_rmon_hist_range **ranges) 872 { 873 struct yt921x_priv *priv = to_yt921x_priv(ds); 874 struct yt921x_port *pp = &priv->ports[port]; 875 struct yt921x_mib *mib = &pp->mib; 876 877 mutex_lock(&priv->reg_lock); 878 yt921x_read_mib(priv, port); 879 mutex_unlock(&priv->reg_lock); 880 881 *ranges = yt921x_rmon_ranges; 882 883 rmon_stats->undersize_pkts = mib->rx_undersize_errors; 884 rmon_stats->oversize_pkts = mib->rx_oversize_errors; 885 rmon_stats->fragments = mib->rx_alignment_errors; 886 /* rmon_stats->jabbers */ 887 888 rmon_stats->hist[0] = mib->rx_64byte; 889 rmon_stats->hist[1] = mib->rx_65_127byte; 890 rmon_stats->hist[2] = mib->rx_128_255byte; 891 rmon_stats->hist[3] = mib->rx_256_511byte; 892 rmon_stats->hist[4] = mib->rx_512_1023byte; 893 rmon_stats->hist[5] = mib->rx_1024_1518byte; 894 rmon_stats->hist[6] = mib->rx_jumbo; 895 896 rmon_stats->hist_tx[0] = mib->tx_64byte; 897 rmon_stats->hist_tx[1] = mib->tx_65_127byte; 898 rmon_stats->hist_tx[2] = mib->tx_128_255byte; 899 rmon_stats->hist_tx[3] = mib->tx_256_511byte; 900 rmon_stats->hist_tx[4] = mib->tx_512_1023byte; 901 rmon_stats->hist_tx[5] = mib->tx_1024_1518byte; 902 rmon_stats->hist_tx[6] = mib->tx_jumbo; 903 } 904 905 static void 906 yt921x_dsa_get_stats64(struct dsa_switch *ds, int port, 907 struct rtnl_link_stats64 *stats) 908 { 909 struct yt921x_priv *priv = to_yt921x_priv(ds); 910 struct yt921x_port *pp = &priv->ports[port]; 911 struct yt921x_mib *mib = &pp->mib; 912 913 stats->rx_length_errors = mib->rx_undersize_errors + 914 mib->rx_fragment_errors; 915 stats->rx_over_errors = mib->rx_oversize_errors; 916 stats->rx_crc_errors = mib->rx_crc_errors; 917 stats->rx_frame_errors = mib->rx_alignment_errors; 918 /* stats->rx_fifo_errors */ 919 /* stats->rx_missed_errors */ 920 921 stats->tx_aborted_errors = mib->tx_aborted_errors; 922 /* stats->tx_carrier_errors */ 923 stats->tx_fifo_errors = mib->tx_undersize_errors; 924 /* stats->tx_heartbeat_errors */ 925 stats->tx_window_errors = mib->tx_late_collisions; 926 927 stats->rx_packets = pp->rx_frames; 928 stats->tx_packets = pp->tx_frames; 929 stats->rx_bytes = mib->rx_good_bytes - ETH_FCS_LEN * stats->rx_packets; 930 stats->tx_bytes = mib->tx_good_bytes - ETH_FCS_LEN * stats->tx_packets; 931 stats->rx_errors = stats->rx_length_errors + stats->rx_over_errors + 932 stats->rx_crc_errors + stats->rx_frame_errors; 933 stats->tx_errors = stats->tx_aborted_errors + stats->tx_fifo_errors + 934 stats->tx_window_errors; 935 stats->rx_dropped = mib->rx_dropped; 936 /* stats->tx_dropped */ 937 stats->multicast = mib->rx_multicast; 938 stats->collisions = mib->tx_collisions; 939 } 940 941 static void 942 yt921x_dsa_get_pause_stats(struct dsa_switch *ds, int port, 943 struct ethtool_pause_stats *pause_stats) 944 { 945 struct yt921x_priv *priv = to_yt921x_priv(ds); 946 struct yt921x_port *pp = &priv->ports[port]; 947 struct yt921x_mib *mib = &pp->mib; 948 949 mutex_lock(&priv->reg_lock); 950 yt921x_read_mib(priv, port); 951 mutex_unlock(&priv->reg_lock); 952 953 pause_stats->tx_pause_frames = mib->tx_pause; 954 pause_stats->rx_pause_frames = mib->rx_pause; 955 } 956 957 static int 958 yt921x_set_eee(struct yt921x_priv *priv, int port, struct ethtool_keee *e) 959 { 960 /* Poor datasheet for EEE operations; don't ask if you are confused */ 961 962 bool enable = e->eee_enabled; 963 u16 new_mask; 964 int res; 965 966 /* Enable / disable global EEE */ 967 new_mask = priv->eee_ports_mask; 968 new_mask &= ~BIT(port); 969 new_mask |= !enable ? 0 : BIT(port); 970 971 if (!!new_mask != !!priv->eee_ports_mask) { 972 res = yt921x_reg_toggle_bits(priv, YT921X_PON_STRAP_FUNC, 973 YT921X_PON_STRAP_EEE, !!new_mask); 974 if (res) 975 return res; 976 res = yt921x_reg_toggle_bits(priv, YT921X_PON_STRAP_VAL, 977 YT921X_PON_STRAP_EEE, !!new_mask); 978 if (res) 979 return res; 980 } 981 982 priv->eee_ports_mask = new_mask; 983 984 /* Enable / disable port EEE */ 985 res = yt921x_reg_toggle_bits(priv, YT921X_EEE_CTRL, 986 YT921X_EEE_CTRL_ENn(port), enable); 987 if (res) 988 return res; 989 res = yt921x_reg_toggle_bits(priv, YT921X_EEEn_VAL(port), 990 YT921X_EEE_VAL_DATA, enable); 991 if (res) 992 return res; 993 994 return 0; 995 } 996 997 static int 998 yt921x_dsa_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e) 999 { 1000 struct yt921x_priv *priv = to_yt921x_priv(ds); 1001 int res; 1002 1003 mutex_lock(&priv->reg_lock); 1004 res = yt921x_set_eee(priv, port, e); 1005 mutex_unlock(&priv->reg_lock); 1006 1007 return res; 1008 } 1009 1010 static int 1011 yt921x_dsa_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) 1012 { 1013 /* Only serves as packet filter, since the frame size is always set to 1014 * maximum after reset 1015 */ 1016 1017 struct yt921x_priv *priv = to_yt921x_priv(ds); 1018 struct dsa_port *dp = dsa_to_port(ds, port); 1019 int frame_size; 1020 int res; 1021 1022 frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN; 1023 if (dsa_port_is_cpu(dp)) 1024 frame_size += YT921X_TAG_LEN; 1025 1026 mutex_lock(&priv->reg_lock); 1027 res = yt921x_reg_update_bits(priv, YT921X_MACn_FRAME(port), 1028 YT921X_MAC_FRAME_SIZE_M, 1029 YT921X_MAC_FRAME_SIZE(frame_size)); 1030 mutex_unlock(&priv->reg_lock); 1031 1032 return res; 1033 } 1034 1035 static int yt921x_dsa_port_max_mtu(struct dsa_switch *ds, int port) 1036 { 1037 /* Only called for user ports, exclude tag len here */ 1038 return YT921X_FRAME_SIZE_MAX - ETH_HLEN - ETH_FCS_LEN - YT921X_TAG_LEN; 1039 } 1040 1041 static int 1042 yt921x_mirror_del(struct yt921x_priv *priv, int port, bool ingress) 1043 { 1044 u32 mask; 1045 1046 if (ingress) 1047 mask = YT921X_MIRROR_IGR_PORTn(port); 1048 else 1049 mask = YT921X_MIRROR_EGR_PORTn(port); 1050 return yt921x_reg_clear_bits(priv, YT921X_MIRROR, mask); 1051 } 1052 1053 static int 1054 yt921x_mirror_add(struct yt921x_priv *priv, int port, bool ingress, 1055 int to_local_port, struct netlink_ext_ack *extack) 1056 { 1057 u32 srcs; 1058 u32 ctrl; 1059 u32 val; 1060 u32 dst; 1061 int res; 1062 1063 if (ingress) 1064 srcs = YT921X_MIRROR_IGR_PORTn(port); 1065 else 1066 srcs = YT921X_MIRROR_EGR_PORTn(port); 1067 dst = YT921X_MIRROR_PORT(to_local_port); 1068 1069 res = yt921x_reg_read(priv, YT921X_MIRROR, &val); 1070 if (res) 1071 return res; 1072 1073 /* other mirror tasks & different dst port -> conflict */ 1074 if ((val & ~srcs & (YT921X_MIRROR_EGR_PORTS_M | 1075 YT921X_MIRROR_IGR_PORTS_M)) && 1076 (val & YT921X_MIRROR_PORT_M) != dst) { 1077 NL_SET_ERR_MSG_MOD(extack, 1078 "Sniffer port is already configured, delete existing rules & retry"); 1079 return -EBUSY; 1080 } 1081 1082 ctrl = val & ~YT921X_MIRROR_PORT_M; 1083 ctrl |= srcs; 1084 ctrl |= dst; 1085 1086 if (ctrl == val) 1087 return 0; 1088 1089 return yt921x_reg_write(priv, YT921X_MIRROR, ctrl); 1090 } 1091 1092 static void 1093 yt921x_dsa_port_mirror_del(struct dsa_switch *ds, int port, 1094 struct dsa_mall_mirror_tc_entry *mirror) 1095 { 1096 struct yt921x_priv *priv = to_yt921x_priv(ds); 1097 struct device *dev = to_device(priv); 1098 int res; 1099 1100 mutex_lock(&priv->reg_lock); 1101 res = yt921x_mirror_del(priv, port, mirror->ingress); 1102 mutex_unlock(&priv->reg_lock); 1103 1104 if (res) 1105 dev_err(dev, "Failed to %s port %d: %i\n", "unmirror", 1106 port, res); 1107 } 1108 1109 static int 1110 yt921x_dsa_port_mirror_add(struct dsa_switch *ds, int port, 1111 struct dsa_mall_mirror_tc_entry *mirror, 1112 bool ingress, struct netlink_ext_ack *extack) 1113 { 1114 struct yt921x_priv *priv = to_yt921x_priv(ds); 1115 int res; 1116 1117 mutex_lock(&priv->reg_lock); 1118 res = yt921x_mirror_add(priv, port, ingress, 1119 mirror->to_local_port, extack); 1120 mutex_unlock(&priv->reg_lock); 1121 1122 return res; 1123 } 1124 1125 static int yt921x_lag_hash(struct yt921x_priv *priv, u32 ctrl, bool unique_lag, 1126 struct netlink_ext_ack *extack) 1127 { 1128 u32 val; 1129 int res; 1130 1131 /* Hash Mode is global. Make sure the same Hash Mode is set to all the 1132 * 2 possible lags. 1133 * If we are the unique LAG we can set whatever hash mode we want. 1134 * To change hash mode it's needed to remove all LAG and change the mode 1135 * with the latest. 1136 */ 1137 if (unique_lag) { 1138 res = yt921x_reg_write(priv, YT921X_LAG_HASH, ctrl); 1139 if (res) 1140 return res; 1141 } else { 1142 res = yt921x_reg_read(priv, YT921X_LAG_HASH, &val); 1143 if (res) 1144 return res; 1145 1146 if (val != ctrl) { 1147 NL_SET_ERR_MSG_MOD(extack, 1148 "Mismatched Hash Mode across different lags is not supported"); 1149 return -EOPNOTSUPP; 1150 } 1151 } 1152 1153 return 0; 1154 } 1155 1156 static int yt921x_lag_set(struct yt921x_priv *priv, u8 index, u16 ports_mask) 1157 { 1158 unsigned long targets_mask = ports_mask; 1159 unsigned int cnt; 1160 u32 ctrl; 1161 int port; 1162 int res; 1163 1164 cnt = 0; 1165 for_each_set_bit(port, &targets_mask, YT921X_PORT_NUM) { 1166 ctrl = YT921X_LAG_MEMBER_PORT(port); 1167 res = yt921x_reg_write(priv, YT921X_LAG_MEMBERnm(index, cnt), 1168 ctrl); 1169 if (res) 1170 return res; 1171 1172 cnt++; 1173 } 1174 1175 ctrl = YT921X_LAG_GROUP_PORTS(ports_mask) | 1176 YT921X_LAG_GROUP_MEMBER_NUM(cnt); 1177 return yt921x_reg_write(priv, YT921X_LAG_GROUPn(index), ctrl); 1178 } 1179 1180 static int 1181 yt921x_dsa_port_lag_leave(struct dsa_switch *ds, int port, struct dsa_lag lag) 1182 { 1183 struct yt921x_priv *priv = to_yt921x_priv(ds); 1184 struct dsa_port *dp; 1185 u32 ctrl; 1186 int res; 1187 1188 if (!lag.id) 1189 return -EINVAL; 1190 1191 ctrl = 0; 1192 dsa_lag_foreach_port(dp, ds->dst, &lag) 1193 ctrl |= BIT(dp->index); 1194 1195 mutex_lock(&priv->reg_lock); 1196 res = yt921x_lag_set(priv, lag.id - 1, ctrl); 1197 mutex_unlock(&priv->reg_lock); 1198 1199 return res; 1200 } 1201 1202 static int 1203 yt921x_dsa_port_lag_check(struct dsa_switch *ds, struct dsa_lag lag, 1204 struct netdev_lag_upper_info *info, 1205 struct netlink_ext_ack *extack) 1206 { 1207 unsigned int members; 1208 struct dsa_port *dp; 1209 1210 if (!lag.id) 1211 return -EINVAL; 1212 1213 members = 0; 1214 dsa_lag_foreach_port(dp, ds->dst, &lag) 1215 /* Includes the port joining the LAG */ 1216 members++; 1217 1218 if (members > YT921X_LAG_PORT_NUM) { 1219 NL_SET_ERR_MSG_MOD(extack, 1220 "Cannot offload more than 4 LAG ports"); 1221 return -EOPNOTSUPP; 1222 } 1223 1224 if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 1225 NL_SET_ERR_MSG_MOD(extack, 1226 "Can only offload LAG using hash TX type"); 1227 return -EOPNOTSUPP; 1228 } 1229 1230 if (info->hash_type != NETDEV_LAG_HASH_L2 && 1231 info->hash_type != NETDEV_LAG_HASH_L23 && 1232 info->hash_type != NETDEV_LAG_HASH_L34) { 1233 NL_SET_ERR_MSG_MOD(extack, 1234 "Can only offload L2 or L2+L3 or L3+L4 TX hash"); 1235 return -EOPNOTSUPP; 1236 } 1237 1238 return 0; 1239 } 1240 1241 static int 1242 yt921x_dsa_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag, 1243 struct netdev_lag_upper_info *info, 1244 struct netlink_ext_ack *extack) 1245 { 1246 struct yt921x_priv *priv = to_yt921x_priv(ds); 1247 struct dsa_port *dp; 1248 bool unique_lag; 1249 unsigned int i; 1250 u32 ctrl; 1251 int res; 1252 1253 res = yt921x_dsa_port_lag_check(ds, lag, info, extack); 1254 if (res) 1255 return res; 1256 1257 ctrl = 0; 1258 switch (info->hash_type) { 1259 case NETDEV_LAG_HASH_L34: 1260 ctrl |= YT921X_LAG_HASH_IP_DST; 1261 ctrl |= YT921X_LAG_HASH_IP_SRC; 1262 ctrl |= YT921X_LAG_HASH_IP_PROTO; 1263 1264 ctrl |= YT921X_LAG_HASH_L4_DPORT; 1265 ctrl |= YT921X_LAG_HASH_L4_SPORT; 1266 break; 1267 case NETDEV_LAG_HASH_L23: 1268 ctrl |= YT921X_LAG_HASH_MAC_DA; 1269 ctrl |= YT921X_LAG_HASH_MAC_SA; 1270 1271 ctrl |= YT921X_LAG_HASH_IP_DST; 1272 ctrl |= YT921X_LAG_HASH_IP_SRC; 1273 ctrl |= YT921X_LAG_HASH_IP_PROTO; 1274 break; 1275 case NETDEV_LAG_HASH_L2: 1276 ctrl |= YT921X_LAG_HASH_MAC_DA; 1277 ctrl |= YT921X_LAG_HASH_MAC_SA; 1278 break; 1279 default: 1280 return -EOPNOTSUPP; 1281 } 1282 1283 /* Check if we are the unique configured LAG */ 1284 unique_lag = true; 1285 dsa_lags_foreach_id(i, ds->dst) 1286 if (i != lag.id && dsa_lag_by_id(ds->dst, i)) { 1287 unique_lag = false; 1288 break; 1289 } 1290 1291 mutex_lock(&priv->reg_lock); 1292 do { 1293 res = yt921x_lag_hash(priv, ctrl, unique_lag, extack); 1294 if (res) 1295 break; 1296 1297 ctrl = 0; 1298 dsa_lag_foreach_port(dp, ds->dst, &lag) 1299 ctrl |= BIT(dp->index); 1300 res = yt921x_lag_set(priv, lag.id - 1, ctrl); 1301 } while (0); 1302 mutex_unlock(&priv->reg_lock); 1303 1304 return res; 1305 } 1306 1307 static int yt921x_fdb_wait(struct yt921x_priv *priv, u32 *valp) 1308 { 1309 struct device *dev = to_device(priv); 1310 u32 val = YT921X_FDB_RESULT_DONE; 1311 int res; 1312 1313 res = yt921x_reg_wait(priv, YT921X_FDB_RESULT, YT921X_FDB_RESULT_DONE, 1314 &val); 1315 if (res) { 1316 dev_err(dev, "FDB probably stuck\n"); 1317 return res; 1318 } 1319 1320 *valp = val; 1321 return 0; 1322 } 1323 1324 static int 1325 yt921x_fdb_in01(struct yt921x_priv *priv, const unsigned char *addr, 1326 u16 vid, u32 ctrl1) 1327 { 1328 u32 ctrl; 1329 int res; 1330 1331 ctrl = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]; 1332 res = yt921x_reg_write(priv, YT921X_FDB_IN0, ctrl); 1333 if (res) 1334 return res; 1335 1336 ctrl = ctrl1 | YT921X_FDB_IO1_FID(vid) | (addr[4] << 8) | addr[5]; 1337 return yt921x_reg_write(priv, YT921X_FDB_IN1, ctrl); 1338 } 1339 1340 static int 1341 yt921x_fdb_has(struct yt921x_priv *priv, const unsigned char *addr, u16 vid, 1342 u16 *indexp) 1343 { 1344 u32 ctrl; 1345 u32 val; 1346 int res; 1347 1348 res = yt921x_fdb_in01(priv, addr, vid, 0); 1349 if (res) 1350 return res; 1351 1352 ctrl = 0; 1353 res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl); 1354 if (res) 1355 return res; 1356 1357 ctrl = YT921X_FDB_OP_OP_GET_ONE | YT921X_FDB_OP_START; 1358 res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); 1359 if (res) 1360 return res; 1361 1362 res = yt921x_fdb_wait(priv, &val); 1363 if (res) 1364 return res; 1365 if (val & YT921X_FDB_RESULT_NOTFOUND) { 1366 *indexp = YT921X_FDB_NUM; 1367 return 0; 1368 } 1369 1370 *indexp = FIELD_GET(YT921X_FDB_RESULT_INDEX_M, val); 1371 return 0; 1372 } 1373 1374 static int 1375 yt921x_fdb_read(struct yt921x_priv *priv, unsigned char *addr, u16 *vidp, 1376 u16 *ports_maskp, u16 *indexp, u8 *statusp) 1377 { 1378 struct device *dev = to_device(priv); 1379 u16 index; 1380 u32 data0; 1381 u32 data1; 1382 u32 data2; 1383 u32 val; 1384 int res; 1385 1386 res = yt921x_fdb_wait(priv, &val); 1387 if (res) 1388 return res; 1389 if (val & YT921X_FDB_RESULT_NOTFOUND) { 1390 *ports_maskp = 0; 1391 return 0; 1392 } 1393 index = FIELD_GET(YT921X_FDB_RESULT_INDEX_M, val); 1394 1395 res = yt921x_reg_read(priv, YT921X_FDB_OUT1, &data1); 1396 if (res) 1397 return res; 1398 if ((data1 & YT921X_FDB_IO1_STATUS_M) == 1399 YT921X_FDB_IO1_STATUS_INVALID) { 1400 *ports_maskp = 0; 1401 return 0; 1402 } 1403 1404 res = yt921x_reg_read(priv, YT921X_FDB_OUT0, &data0); 1405 if (res) 1406 return res; 1407 res = yt921x_reg_read(priv, YT921X_FDB_OUT2, &data2); 1408 if (res) 1409 return res; 1410 1411 addr[0] = data0 >> 24; 1412 addr[1] = data0 >> 16; 1413 addr[2] = data0 >> 8; 1414 addr[3] = data0; 1415 addr[4] = data1 >> 8; 1416 addr[5] = data1; 1417 *vidp = FIELD_GET(YT921X_FDB_IO1_FID_M, data1); 1418 *indexp = index; 1419 *ports_maskp = FIELD_GET(YT921X_FDB_IO2_EGR_PORTS_M, data2); 1420 *statusp = FIELD_GET(YT921X_FDB_IO1_STATUS_M, data1); 1421 1422 dev_dbg(dev, 1423 "%s: index 0x%x, mac %02x:%02x:%02x:%02x:%02x:%02x, vid %d, ports 0x%x, status %d\n", 1424 __func__, *indexp, addr[0], addr[1], addr[2], addr[3], 1425 addr[4], addr[5], *vidp, *ports_maskp, *statusp); 1426 return 0; 1427 } 1428 1429 static int 1430 yt921x_fdb_dump(struct yt921x_priv *priv, u16 ports_mask, 1431 dsa_fdb_dump_cb_t *cb, void *data) 1432 { 1433 unsigned char addr[ETH_ALEN]; 1434 u8 status; 1435 u16 pmask; 1436 u16 index; 1437 u32 ctrl; 1438 u16 vid; 1439 int res; 1440 1441 ctrl = YT921X_FDB_OP_INDEX(0) | YT921X_FDB_OP_MODE_INDEX | 1442 YT921X_FDB_OP_OP_GET_ONE | YT921X_FDB_OP_START; 1443 res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); 1444 if (res) 1445 return res; 1446 res = yt921x_fdb_read(priv, addr, &vid, &pmask, &index, &status); 1447 if (res) 1448 return res; 1449 if ((pmask & ports_mask) && !is_multicast_ether_addr(addr)) { 1450 res = cb(addr, vid, 1451 status == YT921X_FDB_ENTRY_STATUS_STATIC, data); 1452 if (res) 1453 return res; 1454 } 1455 1456 ctrl = YT921X_FDB_IO2_EGR_PORTS(ports_mask); 1457 res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl); 1458 if (res) 1459 return res; 1460 1461 index = 0; 1462 do { 1463 ctrl = YT921X_FDB_OP_INDEX(index) | YT921X_FDB_OP_MODE_INDEX | 1464 YT921X_FDB_OP_NEXT_TYPE_UCAST_PORT | 1465 YT921X_FDB_OP_OP_GET_NEXT | YT921X_FDB_OP_START; 1466 res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); 1467 if (res) 1468 return res; 1469 1470 res = yt921x_fdb_read(priv, addr, &vid, &pmask, &index, 1471 &status); 1472 if (res) 1473 return res; 1474 if (!pmask) 1475 break; 1476 1477 if ((pmask & ports_mask) && !is_multicast_ether_addr(addr)) { 1478 res = cb(addr, vid, 1479 status == YT921X_FDB_ENTRY_STATUS_STATIC, 1480 data); 1481 if (res) 1482 return res; 1483 } 1484 1485 /* Never call GET_NEXT with 4095, otherwise it will hang 1486 * forever until a reset! 1487 */ 1488 } while (index < YT921X_FDB_NUM - 1); 1489 1490 return 0; 1491 } 1492 1493 static int 1494 yt921x_fdb_flush_raw(struct yt921x_priv *priv, u16 ports_mask, u16 vid, 1495 bool flush_static) 1496 { 1497 u32 ctrl; 1498 u32 val; 1499 int res; 1500 1501 if (vid < 4096) { 1502 ctrl = YT921X_FDB_IO1_FID(vid); 1503 res = yt921x_reg_write(priv, YT921X_FDB_IN1, ctrl); 1504 if (res) 1505 return res; 1506 } 1507 1508 ctrl = YT921X_FDB_IO2_EGR_PORTS(ports_mask); 1509 res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl); 1510 if (res) 1511 return res; 1512 1513 ctrl = YT921X_FDB_OP_OP_FLUSH | YT921X_FDB_OP_START; 1514 if (vid >= 4096) 1515 ctrl |= YT921X_FDB_OP_FLUSH_PORT; 1516 else 1517 ctrl |= YT921X_FDB_OP_FLUSH_PORT_VID; 1518 if (flush_static) 1519 ctrl |= YT921X_FDB_OP_FLUSH_STATIC; 1520 res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); 1521 if (res) 1522 return res; 1523 1524 res = yt921x_fdb_wait(priv, &val); 1525 if (res) 1526 return res; 1527 1528 return 0; 1529 } 1530 1531 static int 1532 yt921x_fdb_flush_port(struct yt921x_priv *priv, int port, bool flush_static) 1533 { 1534 return yt921x_fdb_flush_raw(priv, BIT(port), 4096, flush_static); 1535 } 1536 1537 static int 1538 yt921x_fdb_add_index_in12(struct yt921x_priv *priv, u16 index, u16 ctrl1, 1539 u16 ctrl2) 1540 { 1541 u32 ctrl; 1542 u32 val; 1543 int res; 1544 1545 res = yt921x_reg_write(priv, YT921X_FDB_IN1, ctrl1); 1546 if (res) 1547 return res; 1548 res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl2); 1549 if (res) 1550 return res; 1551 1552 ctrl = YT921X_FDB_OP_INDEX(index) | YT921X_FDB_OP_MODE_INDEX | 1553 YT921X_FDB_OP_OP_ADD | YT921X_FDB_OP_START; 1554 res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); 1555 if (res) 1556 return res; 1557 1558 return yt921x_fdb_wait(priv, &val); 1559 } 1560 1561 static int 1562 yt921x_fdb_add(struct yt921x_priv *priv, const unsigned char *addr, u16 vid, 1563 u16 ports_mask) 1564 { 1565 u32 ctrl; 1566 u32 val; 1567 int res; 1568 1569 ctrl = YT921X_FDB_IO1_STATUS_STATIC; 1570 res = yt921x_fdb_in01(priv, addr, vid, ctrl); 1571 if (res) 1572 return res; 1573 1574 ctrl = YT921X_FDB_IO2_EGR_PORTS(ports_mask); 1575 res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl); 1576 if (res) 1577 return res; 1578 1579 ctrl = YT921X_FDB_OP_OP_ADD | YT921X_FDB_OP_START; 1580 res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); 1581 if (res) 1582 return res; 1583 1584 return yt921x_fdb_wait(priv, &val); 1585 } 1586 1587 static int 1588 yt921x_fdb_leave(struct yt921x_priv *priv, const unsigned char *addr, 1589 u16 vid, u16 ports_mask) 1590 { 1591 u16 index; 1592 u32 ctrl1; 1593 u32 ctrl2; 1594 u32 ctrl; 1595 u32 val2; 1596 u32 val; 1597 int res; 1598 1599 /* Check for presence */ 1600 res = yt921x_fdb_has(priv, addr, vid, &index); 1601 if (res) 1602 return res; 1603 if (index >= YT921X_FDB_NUM) 1604 return 0; 1605 1606 /* Check if action required */ 1607 res = yt921x_reg_read(priv, YT921X_FDB_OUT2, &val2); 1608 if (res) 1609 return res; 1610 1611 ctrl2 = val2 & ~YT921X_FDB_IO2_EGR_PORTS(ports_mask); 1612 if (ctrl2 == val2) 1613 return 0; 1614 if (!(ctrl2 & YT921X_FDB_IO2_EGR_PORTS_M)) { 1615 ctrl = YT921X_FDB_OP_OP_DEL | YT921X_FDB_OP_START; 1616 res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); 1617 if (res) 1618 return res; 1619 1620 return yt921x_fdb_wait(priv, &val); 1621 } 1622 1623 res = yt921x_reg_read(priv, YT921X_FDB_OUT1, &ctrl1); 1624 if (res) 1625 return res; 1626 1627 return yt921x_fdb_add_index_in12(priv, index, ctrl1, ctrl2); 1628 } 1629 1630 static int 1631 yt921x_fdb_join(struct yt921x_priv *priv, const unsigned char *addr, u16 vid, 1632 u16 ports_mask) 1633 { 1634 u16 index; 1635 u32 ctrl1; 1636 u32 ctrl2; 1637 u32 val1; 1638 u32 val2; 1639 int res; 1640 1641 /* Check for presence */ 1642 res = yt921x_fdb_has(priv, addr, vid, &index); 1643 if (res) 1644 return res; 1645 if (index >= YT921X_FDB_NUM) 1646 return yt921x_fdb_add(priv, addr, vid, ports_mask); 1647 1648 /* Check if action required */ 1649 res = yt921x_reg_read(priv, YT921X_FDB_OUT1, &val1); 1650 if (res) 1651 return res; 1652 res = yt921x_reg_read(priv, YT921X_FDB_OUT2, &val2); 1653 if (res) 1654 return res; 1655 1656 ctrl1 = val1 & ~YT921X_FDB_IO1_STATUS_M; 1657 ctrl1 |= YT921X_FDB_IO1_STATUS_STATIC; 1658 ctrl2 = val2 | YT921X_FDB_IO2_EGR_PORTS(ports_mask); 1659 if (ctrl1 == val1 && ctrl2 == val2) 1660 return 0; 1661 1662 return yt921x_fdb_add_index_in12(priv, index, ctrl1, ctrl2); 1663 } 1664 1665 static int 1666 yt921x_dsa_port_fdb_dump(struct dsa_switch *ds, int port, 1667 dsa_fdb_dump_cb_t *cb, void *data) 1668 { 1669 struct yt921x_priv *priv = to_yt921x_priv(ds); 1670 int res; 1671 1672 mutex_lock(&priv->reg_lock); 1673 /* Hardware FDB is shared for fdb and mdb, "bridge fdb show" 1674 * only wants to see unicast 1675 */ 1676 res = yt921x_fdb_dump(priv, BIT(port), cb, data); 1677 mutex_unlock(&priv->reg_lock); 1678 1679 return res; 1680 } 1681 1682 static void yt921x_dsa_port_fast_age(struct dsa_switch *ds, int port) 1683 { 1684 struct yt921x_priv *priv = to_yt921x_priv(ds); 1685 struct device *dev = to_device(priv); 1686 int res; 1687 1688 mutex_lock(&priv->reg_lock); 1689 res = yt921x_fdb_flush_port(priv, port, false); 1690 mutex_unlock(&priv->reg_lock); 1691 1692 if (res) 1693 dev_err(dev, "Failed to %s port %d: %i\n", "clear FDB for", 1694 port, res); 1695 } 1696 1697 static int 1698 yt921x_dsa_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) 1699 { 1700 struct yt921x_priv *priv = to_yt921x_priv(ds); 1701 u32 ctrl; 1702 int res; 1703 1704 /* AGEING reg is set in 5s step */ 1705 ctrl = clamp(msecs / 5000, 1, U16_MAX); 1706 1707 mutex_lock(&priv->reg_lock); 1708 res = yt921x_reg_write(priv, YT921X_AGEING, ctrl); 1709 mutex_unlock(&priv->reg_lock); 1710 1711 return res; 1712 } 1713 1714 static int 1715 yt921x_dsa_port_fdb_del(struct dsa_switch *ds, int port, 1716 const unsigned char *addr, u16 vid, struct dsa_db db) 1717 { 1718 struct yt921x_priv *priv = to_yt921x_priv(ds); 1719 int res; 1720 1721 mutex_lock(&priv->reg_lock); 1722 res = yt921x_fdb_leave(priv, addr, vid, BIT(port)); 1723 mutex_unlock(&priv->reg_lock); 1724 1725 return res; 1726 } 1727 1728 static int 1729 yt921x_dsa_port_fdb_add(struct dsa_switch *ds, int port, 1730 const unsigned char *addr, u16 vid, struct dsa_db db) 1731 { 1732 struct yt921x_priv *priv = to_yt921x_priv(ds); 1733 int res; 1734 1735 mutex_lock(&priv->reg_lock); 1736 res = yt921x_fdb_join(priv, addr, vid, BIT(port)); 1737 mutex_unlock(&priv->reg_lock); 1738 1739 return res; 1740 } 1741 1742 static int 1743 yt921x_dsa_port_mdb_del(struct dsa_switch *ds, int port, 1744 const struct switchdev_obj_port_mdb *mdb, 1745 struct dsa_db db) 1746 { 1747 struct yt921x_priv *priv = to_yt921x_priv(ds); 1748 const unsigned char *addr = mdb->addr; 1749 u16 vid = mdb->vid; 1750 int res; 1751 1752 mutex_lock(&priv->reg_lock); 1753 res = yt921x_fdb_leave(priv, addr, vid, BIT(port)); 1754 mutex_unlock(&priv->reg_lock); 1755 1756 return res; 1757 } 1758 1759 static int 1760 yt921x_dsa_port_mdb_add(struct dsa_switch *ds, int port, 1761 const struct switchdev_obj_port_mdb *mdb, 1762 struct dsa_db db) 1763 { 1764 struct yt921x_priv *priv = to_yt921x_priv(ds); 1765 const unsigned char *addr = mdb->addr; 1766 u16 vid = mdb->vid; 1767 int res; 1768 1769 mutex_lock(&priv->reg_lock); 1770 res = yt921x_fdb_join(priv, addr, vid, BIT(port)); 1771 mutex_unlock(&priv->reg_lock); 1772 1773 return res; 1774 } 1775 1776 static int 1777 yt921x_vlan_aware_set(struct yt921x_priv *priv, int port, bool vlan_aware) 1778 { 1779 u32 ctrl; 1780 1781 /* Abuse SVLAN for PCP parsing without polluting the FDB - it just works 1782 * despite YT921X_VLAN_CTRL_SVLAN_EN never being set 1783 */ 1784 if (!vlan_aware) 1785 ctrl = YT921X_PORT_IGR_TPIDn_STAG(0); 1786 else 1787 ctrl = YT921X_PORT_IGR_TPIDn_CTAG(0); 1788 return yt921x_reg_write(priv, YT921X_PORTn_IGR_TPID(port), ctrl); 1789 } 1790 1791 static int 1792 yt921x_port_set_pvid(struct yt921x_priv *priv, int port, u16 vid) 1793 { 1794 u32 mask; 1795 u32 ctrl; 1796 1797 mask = YT921X_PORT_VLAN_CTRL_CVID_M; 1798 ctrl = YT921X_PORT_VLAN_CTRL_CVID(vid); 1799 return yt921x_reg_update_bits(priv, YT921X_PORTn_VLAN_CTRL(port), 1800 mask, ctrl); 1801 } 1802 1803 static int 1804 yt921x_vlan_filtering(struct yt921x_priv *priv, int port, bool vlan_filtering) 1805 { 1806 struct dsa_port *dp = dsa_to_port(&priv->ds, port); 1807 struct net_device *bdev; 1808 u16 pvid; 1809 u32 mask; 1810 u32 ctrl; 1811 int res; 1812 1813 bdev = dsa_port_bridge_dev_get(dp); 1814 1815 if (!bdev || !vlan_filtering) 1816 pvid = YT921X_VID_UNWARE; 1817 else 1818 br_vlan_get_pvid(bdev, &pvid); 1819 res = yt921x_port_set_pvid(priv, port, pvid); 1820 if (res) 1821 return res; 1822 1823 mask = YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_TAGGED | 1824 YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED; 1825 ctrl = 0; 1826 /* Do not drop tagged frames here; let VLAN_IGR_FILTER do it */ 1827 if (vlan_filtering && !pvid) 1828 ctrl |= YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED; 1829 res = yt921x_reg_update_bits(priv, YT921X_PORTn_VLAN_CTRL1(port), 1830 mask, ctrl); 1831 if (res) 1832 return res; 1833 1834 res = yt921x_reg_toggle_bits(priv, YT921X_VLAN_IGR_FILTER, 1835 YT921X_VLAN_IGR_FILTER_PORTn(port), 1836 vlan_filtering); 1837 if (res) 1838 return res; 1839 1840 res = yt921x_vlan_aware_set(priv, port, vlan_filtering); 1841 if (res) 1842 return res; 1843 1844 return 0; 1845 } 1846 1847 static int 1848 yt921x_vlan_del(struct yt921x_priv *priv, int port, u16 vid) 1849 { 1850 u64 mask64; 1851 1852 mask64 = YT921X_VLAN_CTRL_PORTS(port) | 1853 YT921X_VLAN_CTRL_UNTAG_PORTn(port); 1854 1855 return yt921x_reg64_clear_bits(priv, YT921X_VLANn_CTRL(vid), mask64); 1856 } 1857 1858 static int 1859 yt921x_vlan_add(struct yt921x_priv *priv, int port, u16 vid, bool untagged) 1860 { 1861 u64 mask64; 1862 u64 ctrl64; 1863 1864 mask64 = YT921X_VLAN_CTRL_PORTn(port) | 1865 YT921X_VLAN_CTRL_PORTS(priv->cpu_ports_mask); 1866 ctrl64 = mask64; 1867 1868 mask64 |= YT921X_VLAN_CTRL_UNTAG_PORTn(port); 1869 if (untagged) 1870 ctrl64 |= YT921X_VLAN_CTRL_UNTAG_PORTn(port); 1871 1872 return yt921x_reg64_update_bits(priv, YT921X_VLANn_CTRL(vid), 1873 mask64, ctrl64); 1874 } 1875 1876 static int 1877 yt921x_pvid_clear(struct yt921x_priv *priv, int port) 1878 { 1879 struct dsa_port *dp = dsa_to_port(&priv->ds, port); 1880 bool vlan_filtering; 1881 u32 mask; 1882 int res; 1883 1884 vlan_filtering = dsa_port_is_vlan_filtering(dp); 1885 1886 res = yt921x_port_set_pvid(priv, port, 1887 vlan_filtering ? 0 : YT921X_VID_UNWARE); 1888 if (res) 1889 return res; 1890 1891 if (vlan_filtering) { 1892 mask = YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED; 1893 res = yt921x_reg_set_bits(priv, YT921X_PORTn_VLAN_CTRL1(port), 1894 mask); 1895 if (res) 1896 return res; 1897 } 1898 1899 return 0; 1900 } 1901 1902 static int 1903 yt921x_pvid_set(struct yt921x_priv *priv, int port, u16 vid) 1904 { 1905 struct dsa_port *dp = dsa_to_port(&priv->ds, port); 1906 bool vlan_filtering; 1907 u32 mask; 1908 int res; 1909 1910 vlan_filtering = dsa_port_is_vlan_filtering(dp); 1911 1912 if (vlan_filtering) { 1913 res = yt921x_port_set_pvid(priv, port, vid); 1914 if (res) 1915 return res; 1916 } 1917 1918 mask = YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED; 1919 res = yt921x_reg_clear_bits(priv, YT921X_PORTn_VLAN_CTRL1(port), mask); 1920 if (res) 1921 return res; 1922 1923 return 0; 1924 } 1925 1926 static int 1927 yt921x_dsa_port_vlan_filtering(struct dsa_switch *ds, int port, 1928 bool vlan_filtering, 1929 struct netlink_ext_ack *extack) 1930 { 1931 struct yt921x_priv *priv = to_yt921x_priv(ds); 1932 int res; 1933 1934 if (dsa_is_cpu_port(ds, port)) 1935 return 0; 1936 1937 mutex_lock(&priv->reg_lock); 1938 res = yt921x_vlan_filtering(priv, port, vlan_filtering); 1939 mutex_unlock(&priv->reg_lock); 1940 1941 return res; 1942 } 1943 1944 static int 1945 yt921x_dsa_port_vlan_del(struct dsa_switch *ds, int port, 1946 const struct switchdev_obj_port_vlan *vlan) 1947 { 1948 struct yt921x_priv *priv = to_yt921x_priv(ds); 1949 u16 vid = vlan->vid; 1950 u16 pvid; 1951 int res; 1952 1953 if (dsa_is_cpu_port(ds, port)) 1954 return 0; 1955 1956 mutex_lock(&priv->reg_lock); 1957 do { 1958 struct dsa_port *dp = dsa_to_port(ds, port); 1959 struct net_device *bdev; 1960 1961 res = yt921x_vlan_del(priv, port, vid); 1962 if (res) 1963 break; 1964 1965 bdev = dsa_port_bridge_dev_get(dp); 1966 if (bdev) { 1967 br_vlan_get_pvid(bdev, &pvid); 1968 if (pvid == vid) 1969 res = yt921x_pvid_clear(priv, port); 1970 } 1971 } while (0); 1972 mutex_unlock(&priv->reg_lock); 1973 1974 return res; 1975 } 1976 1977 static int 1978 yt921x_dsa_port_vlan_add(struct dsa_switch *ds, int port, 1979 const struct switchdev_obj_port_vlan *vlan, 1980 struct netlink_ext_ack *extack) 1981 { 1982 struct yt921x_priv *priv = to_yt921x_priv(ds); 1983 u16 vid = vlan->vid; 1984 u16 pvid; 1985 int res; 1986 1987 /* CPU port is supposed to be a member of every VLAN; see 1988 * yt921x_vlan_add() and yt921x_port_setup() 1989 */ 1990 if (dsa_is_cpu_port(ds, port)) 1991 return 0; 1992 1993 mutex_lock(&priv->reg_lock); 1994 do { 1995 struct dsa_port *dp = dsa_to_port(ds, port); 1996 struct net_device *bdev; 1997 1998 res = yt921x_vlan_add(priv, port, vid, 1999 vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED); 2000 if (res) 2001 break; 2002 2003 bdev = dsa_port_bridge_dev_get(dp); 2004 if (bdev) { 2005 if (vlan->flags & BRIDGE_VLAN_INFO_PVID) { 2006 res = yt921x_pvid_set(priv, port, vid); 2007 } else { 2008 br_vlan_get_pvid(bdev, &pvid); 2009 if (pvid == vid) 2010 res = yt921x_pvid_clear(priv, port); 2011 } 2012 } 2013 } while (0); 2014 mutex_unlock(&priv->reg_lock); 2015 2016 return res; 2017 } 2018 2019 static int yt921x_userport_standalone(struct yt921x_priv *priv, int port) 2020 { 2021 u32 mask; 2022 u32 ctrl; 2023 int res; 2024 2025 ctrl = ~priv->cpu_ports_mask; 2026 res = yt921x_reg_write(priv, YT921X_PORTn_ISOLATION(port), ctrl); 2027 if (res) 2028 return res; 2029 2030 /* Turn off FDB learning to prevent FDB pollution */ 2031 mask = YT921X_PORT_LEARN_DIS; 2032 res = yt921x_reg_set_bits(priv, YT921X_PORTn_LEARN(port), mask); 2033 if (res) 2034 return res; 2035 2036 /* Turn off VLAN awareness */ 2037 res = yt921x_vlan_aware_set(priv, port, false); 2038 if (res) 2039 return res; 2040 2041 /* Unrelated since learning is off and all packets are trapped; 2042 * set it anyway 2043 */ 2044 res = yt921x_port_set_pvid(priv, port, YT921X_VID_UNWARE); 2045 if (res) 2046 return res; 2047 2048 return 0; 2049 } 2050 2051 static int yt921x_userport_bridge(struct yt921x_priv *priv, int port) 2052 { 2053 u32 mask; 2054 int res; 2055 2056 mask = YT921X_PORT_LEARN_DIS; 2057 res = yt921x_reg_clear_bits(priv, YT921X_PORTn_LEARN(port), mask); 2058 if (res) 2059 return res; 2060 2061 return 0; 2062 } 2063 2064 static int yt921x_isolate(struct yt921x_priv *priv, int port) 2065 { 2066 u32 mask; 2067 int res; 2068 2069 mask = BIT(port); 2070 for (int i = 0; i < YT921X_PORT_NUM; i++) { 2071 if ((BIT(i) & priv->cpu_ports_mask) || i == port) 2072 continue; 2073 2074 res = yt921x_reg_set_bits(priv, YT921X_PORTn_ISOLATION(i), 2075 mask); 2076 if (res) 2077 return res; 2078 } 2079 2080 return 0; 2081 } 2082 2083 /* Make sure to include the CPU port in ports_mask, or your bridge will 2084 * not have it. 2085 */ 2086 static int yt921x_bridge(struct yt921x_priv *priv, u16 ports_mask) 2087 { 2088 unsigned long targets_mask = ports_mask & ~priv->cpu_ports_mask; 2089 u32 isolated_mask; 2090 u32 ctrl; 2091 int port; 2092 int res; 2093 2094 isolated_mask = 0; 2095 for_each_set_bit(port, &targets_mask, YT921X_PORT_NUM) { 2096 struct yt921x_port *pp = &priv->ports[port]; 2097 2098 if (pp->isolated) 2099 isolated_mask |= BIT(port); 2100 } 2101 2102 /* Block from non-cpu bridge ports ... */ 2103 for_each_set_bit(port, &targets_mask, YT921X_PORT_NUM) { 2104 struct yt921x_port *pp = &priv->ports[port]; 2105 2106 /* to non-bridge ports */ 2107 ctrl = ~ports_mask; 2108 /* to isolated ports when isolated */ 2109 if (pp->isolated) 2110 ctrl |= isolated_mask; 2111 /* to itself when non-hairpin */ 2112 if (!pp->hairpin) 2113 ctrl |= BIT(port); 2114 else 2115 ctrl &= ~BIT(port); 2116 2117 res = yt921x_reg_write(priv, YT921X_PORTn_ISOLATION(port), 2118 ctrl); 2119 if (res) 2120 return res; 2121 } 2122 2123 return 0; 2124 } 2125 2126 static int yt921x_bridge_leave(struct yt921x_priv *priv, int port) 2127 { 2128 int res; 2129 2130 res = yt921x_userport_standalone(priv, port); 2131 if (res) 2132 return res; 2133 2134 res = yt921x_isolate(priv, port); 2135 if (res) 2136 return res; 2137 2138 return 0; 2139 } 2140 2141 static int 2142 yt921x_bridge_join(struct yt921x_priv *priv, int port, u16 ports_mask) 2143 { 2144 int res; 2145 2146 res = yt921x_userport_bridge(priv, port); 2147 if (res) 2148 return res; 2149 2150 res = yt921x_bridge(priv, ports_mask); 2151 if (res) 2152 return res; 2153 2154 return 0; 2155 } 2156 2157 static int 2158 yt921x_bridge_flags(struct yt921x_priv *priv, int port, 2159 struct switchdev_brport_flags flags) 2160 { 2161 struct yt921x_port *pp = &priv->ports[port]; 2162 bool do_flush; 2163 u32 mask; 2164 int res; 2165 2166 if (flags.mask & BR_LEARNING) { 2167 bool learning = flags.val & BR_LEARNING; 2168 2169 mask = YT921X_PORT_LEARN_DIS; 2170 res = yt921x_reg_toggle_bits(priv, YT921X_PORTn_LEARN(port), 2171 mask, !learning); 2172 if (res) 2173 return res; 2174 } 2175 2176 /* BR_FLOOD, BR_MCAST_FLOOD: see the comment where ACT_UNK_ACTn_TRAP 2177 * is set 2178 */ 2179 2180 /* BR_BCAST_FLOOD: we can filter bcast, but cannot trap them */ 2181 2182 do_flush = false; 2183 if (flags.mask & BR_HAIRPIN_MODE) { 2184 pp->hairpin = flags.val & BR_HAIRPIN_MODE; 2185 do_flush = true; 2186 } 2187 if (flags.mask & BR_ISOLATED) { 2188 pp->isolated = flags.val & BR_ISOLATED; 2189 do_flush = true; 2190 } 2191 if (do_flush) { 2192 struct dsa_switch *ds = &priv->ds; 2193 struct dsa_port *dp = dsa_to_port(ds, port); 2194 struct net_device *bdev; 2195 2196 bdev = dsa_port_bridge_dev_get(dp); 2197 if (bdev) { 2198 u32 ports_mask; 2199 2200 ports_mask = dsa_bridge_ports(ds, bdev); 2201 ports_mask |= priv->cpu_ports_mask; 2202 res = yt921x_bridge(priv, ports_mask); 2203 if (res) 2204 return res; 2205 } 2206 } 2207 2208 return 0; 2209 } 2210 2211 static int 2212 yt921x_dsa_port_pre_bridge_flags(struct dsa_switch *ds, int port, 2213 struct switchdev_brport_flags flags, 2214 struct netlink_ext_ack *extack) 2215 { 2216 if (flags.mask & ~(BR_HAIRPIN_MODE | BR_LEARNING | BR_FLOOD | 2217 BR_MCAST_FLOOD | BR_ISOLATED)) 2218 return -EINVAL; 2219 return 0; 2220 } 2221 2222 static int 2223 yt921x_dsa_port_bridge_flags(struct dsa_switch *ds, int port, 2224 struct switchdev_brport_flags flags, 2225 struct netlink_ext_ack *extack) 2226 { 2227 struct yt921x_priv *priv = to_yt921x_priv(ds); 2228 int res; 2229 2230 if (dsa_is_cpu_port(ds, port)) 2231 return 0; 2232 2233 mutex_lock(&priv->reg_lock); 2234 res = yt921x_bridge_flags(priv, port, flags); 2235 mutex_unlock(&priv->reg_lock); 2236 2237 return res; 2238 } 2239 2240 static void 2241 yt921x_dsa_port_bridge_leave(struct dsa_switch *ds, int port, 2242 struct dsa_bridge bridge) 2243 { 2244 struct yt921x_priv *priv = to_yt921x_priv(ds); 2245 struct device *dev = to_device(priv); 2246 int res; 2247 2248 if (dsa_is_cpu_port(ds, port)) 2249 return; 2250 2251 mutex_lock(&priv->reg_lock); 2252 res = yt921x_bridge_leave(priv, port); 2253 mutex_unlock(&priv->reg_lock); 2254 2255 if (res) 2256 dev_err(dev, "Failed to %s port %d: %i\n", "unbridge", 2257 port, res); 2258 } 2259 2260 static int 2261 yt921x_dsa_port_bridge_join(struct dsa_switch *ds, int port, 2262 struct dsa_bridge bridge, bool *tx_fwd_offload, 2263 struct netlink_ext_ack *extack) 2264 { 2265 struct yt921x_priv *priv = to_yt921x_priv(ds); 2266 u16 ports_mask; 2267 int res; 2268 2269 if (dsa_is_cpu_port(ds, port)) 2270 return 0; 2271 2272 ports_mask = dsa_bridge_ports(ds, bridge.dev); 2273 ports_mask |= priv->cpu_ports_mask; 2274 2275 mutex_lock(&priv->reg_lock); 2276 res = yt921x_bridge_join(priv, port, ports_mask); 2277 mutex_unlock(&priv->reg_lock); 2278 2279 return res; 2280 } 2281 2282 static int 2283 yt921x_dsa_port_mst_state_set(struct dsa_switch *ds, int port, 2284 const struct switchdev_mst_state *st) 2285 { 2286 struct yt921x_priv *priv = to_yt921x_priv(ds); 2287 u32 mask; 2288 u32 ctrl; 2289 int res; 2290 2291 mask = YT921X_STP_PORTn_M(port); 2292 switch (st->state) { 2293 case BR_STATE_DISABLED: 2294 ctrl = YT921X_STP_PORTn_DISABLED(port); 2295 break; 2296 case BR_STATE_LISTENING: 2297 case BR_STATE_LEARNING: 2298 ctrl = YT921X_STP_PORTn_LEARNING(port); 2299 break; 2300 case BR_STATE_FORWARDING: 2301 default: 2302 ctrl = YT921X_STP_PORTn_FORWARD(port); 2303 break; 2304 case BR_STATE_BLOCKING: 2305 ctrl = YT921X_STP_PORTn_BLOCKING(port); 2306 break; 2307 } 2308 2309 mutex_lock(&priv->reg_lock); 2310 res = yt921x_reg_update_bits(priv, YT921X_STPn(st->msti), mask, ctrl); 2311 mutex_unlock(&priv->reg_lock); 2312 2313 return res; 2314 } 2315 2316 static int 2317 yt921x_dsa_vlan_msti_set(struct dsa_switch *ds, struct dsa_bridge bridge, 2318 const struct switchdev_vlan_msti *msti) 2319 { 2320 struct yt921x_priv *priv = to_yt921x_priv(ds); 2321 u64 mask64; 2322 u64 ctrl64; 2323 int res; 2324 2325 if (!msti->vid) 2326 return -EINVAL; 2327 if (!msti->msti || msti->msti >= YT921X_MSTI_NUM) 2328 return -EINVAL; 2329 2330 mask64 = YT921X_VLAN_CTRL_STP_ID_M; 2331 ctrl64 = YT921X_VLAN_CTRL_STP_ID(msti->msti); 2332 2333 mutex_lock(&priv->reg_lock); 2334 res = yt921x_reg64_update_bits(priv, YT921X_VLANn_CTRL(msti->vid), 2335 mask64, ctrl64); 2336 mutex_unlock(&priv->reg_lock); 2337 2338 return res; 2339 } 2340 2341 static void 2342 yt921x_dsa_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) 2343 { 2344 struct yt921x_priv *priv = to_yt921x_priv(ds); 2345 struct dsa_port *dp = dsa_to_port(ds, port); 2346 struct device *dev = to_device(priv); 2347 bool learning; 2348 u32 mask; 2349 u32 ctrl; 2350 int res; 2351 2352 mask = YT921X_STP_PORTn_M(port); 2353 learning = false; 2354 switch (state) { 2355 case BR_STATE_DISABLED: 2356 ctrl = YT921X_STP_PORTn_DISABLED(port); 2357 break; 2358 case BR_STATE_LISTENING: 2359 ctrl = YT921X_STP_PORTn_LEARNING(port); 2360 break; 2361 case BR_STATE_LEARNING: 2362 ctrl = YT921X_STP_PORTn_LEARNING(port); 2363 learning = dp->learning; 2364 break; 2365 case BR_STATE_FORWARDING: 2366 default: 2367 ctrl = YT921X_STP_PORTn_FORWARD(port); 2368 learning = dp->learning; 2369 break; 2370 case BR_STATE_BLOCKING: 2371 ctrl = YT921X_STP_PORTn_BLOCKING(port); 2372 break; 2373 } 2374 2375 mutex_lock(&priv->reg_lock); 2376 do { 2377 res = yt921x_reg_update_bits(priv, YT921X_STPn(0), mask, ctrl); 2378 if (res) 2379 break; 2380 2381 mask = YT921X_PORT_LEARN_DIS; 2382 ctrl = !learning ? YT921X_PORT_LEARN_DIS : 0; 2383 res = yt921x_reg_update_bits(priv, YT921X_PORTn_LEARN(port), 2384 mask, ctrl); 2385 } while (0); 2386 mutex_unlock(&priv->reg_lock); 2387 2388 if (res) 2389 dev_err(dev, "Failed to %s port %d: %i\n", "set STP state for", 2390 port, res); 2391 } 2392 2393 static int __maybe_unused 2394 yt921x_dsa_port_get_default_prio(struct dsa_switch *ds, int port) 2395 { 2396 struct yt921x_priv *priv = to_yt921x_priv(ds); 2397 u32 val; 2398 int res; 2399 2400 mutex_lock(&priv->reg_lock); 2401 res = yt921x_reg_read(priv, YT921X_PORTn_QOS(port), &val); 2402 mutex_unlock(&priv->reg_lock); 2403 2404 if (res) 2405 return res; 2406 2407 return FIELD_GET(YT921X_PORT_QOS_PRIO_M, val); 2408 } 2409 2410 static int __maybe_unused 2411 yt921x_dsa_port_set_default_prio(struct dsa_switch *ds, int port, u8 prio) 2412 { 2413 struct yt921x_priv *priv = to_yt921x_priv(ds); 2414 u32 mask; 2415 u32 ctrl; 2416 int res; 2417 2418 if (prio >= YT921X_PRIO_NUM) 2419 return -EINVAL; 2420 2421 mutex_lock(&priv->reg_lock); 2422 mask = YT921X_PORT_QOS_PRIO_M | YT921X_PORT_QOS_PRIO_EN; 2423 ctrl = YT921X_PORT_QOS_PRIO(prio) | YT921X_PORT_QOS_PRIO_EN; 2424 res = yt921x_reg_update_bits(priv, YT921X_PORTn_QOS(port), mask, ctrl); 2425 mutex_unlock(&priv->reg_lock); 2426 2427 return res; 2428 } 2429 2430 static int __maybe_unused appprios_cmp(const void *a, const void *b) 2431 { 2432 return ((const u8 *)b)[1] - ((const u8 *)a)[1]; 2433 } 2434 2435 static int __maybe_unused 2436 yt921x_dsa_port_get_apptrust(struct dsa_switch *ds, int port, u8 *sel, 2437 int *nselp) 2438 { 2439 struct yt921x_priv *priv = to_yt921x_priv(ds); 2440 u8 appprios[2][2] = {}; 2441 int nsel; 2442 u32 val; 2443 int res; 2444 2445 mutex_lock(&priv->reg_lock); 2446 res = yt921x_reg_read(priv, YT921X_PORTn_PRIO_ORD(port), &val); 2447 mutex_unlock(&priv->reg_lock); 2448 2449 if (res) 2450 return res; 2451 2452 appprios[0][0] = IEEE_8021QAZ_APP_SEL_DSCP; 2453 appprios[0][1] = (val >> (3 * YT921X_APP_SEL_DSCP)) & 7; 2454 appprios[1][0] = DCB_APP_SEL_PCP; 2455 appprios[1][1] = (val >> (3 * YT921X_APP_SEL_CVLAN_PCP)) & 7; 2456 sort(appprios, ARRAY_SIZE(appprios), sizeof(appprios[0]), appprios_cmp, 2457 NULL); 2458 2459 nsel = 0; 2460 for (int i = 0; i < ARRAY_SIZE(appprios) && appprios[i][1]; i++) { 2461 sel[nsel] = appprios[i][0]; 2462 nsel++; 2463 } 2464 *nselp = nsel; 2465 2466 return 0; 2467 } 2468 2469 static int __maybe_unused 2470 yt921x_dsa_port_set_apptrust(struct dsa_switch *ds, int port, const u8 *sel, 2471 int nsel) 2472 { 2473 struct yt921x_priv *priv = to_yt921x_priv(ds); 2474 struct device *dev = to_device(priv); 2475 u32 ctrl; 2476 int res; 2477 2478 if (nsel > YT921X_APP_SEL_NUM) 2479 return -EINVAL; 2480 2481 ctrl = 0; 2482 for (int i = 0; i < nsel; i++) { 2483 switch (sel[i]) { 2484 case IEEE_8021QAZ_APP_SEL_DSCP: 2485 ctrl |= YT921X_PORT_PRIO_ORD_APPm(YT921X_APP_SEL_DSCP, 2486 7 - i); 2487 break; 2488 case DCB_APP_SEL_PCP: 2489 ctrl |= YT921X_PORT_PRIO_ORD_APPm(YT921X_APP_SEL_CVLAN_PCP, 2490 7 - i); 2491 ctrl |= YT921X_PORT_PRIO_ORD_APPm(YT921X_APP_SEL_SVLAN_PCP, 2492 7 - i); 2493 break; 2494 default: 2495 dev_err(dev, 2496 "Invalid apptrust selector (at %d-th). Supported: dscp, pcp\n", 2497 i + 1); 2498 return -EOPNOTSUPP; 2499 } 2500 } 2501 2502 mutex_lock(&priv->reg_lock); 2503 res = yt921x_reg_write(priv, YT921X_PORTn_PRIO_ORD(port), ctrl); 2504 mutex_unlock(&priv->reg_lock); 2505 2506 return res; 2507 } 2508 2509 static int yt921x_port_down(struct yt921x_priv *priv, int port) 2510 { 2511 u32 mask; 2512 int res; 2513 2514 mask = YT921X_PORT_LINK | YT921X_PORT_RX_MAC_EN | YT921X_PORT_TX_MAC_EN; 2515 res = yt921x_reg_clear_bits(priv, YT921X_PORTn_CTRL(port), mask); 2516 if (res) 2517 return res; 2518 2519 if (yt921x_port_is_external(port)) { 2520 mask = YT921X_SERDES_LINK; 2521 res = yt921x_reg_clear_bits(priv, YT921X_SERDESn(port), mask); 2522 if (res) 2523 return res; 2524 2525 mask = YT921X_XMII_LINK; 2526 res = yt921x_reg_clear_bits(priv, YT921X_XMIIn(port), mask); 2527 if (res) 2528 return res; 2529 } 2530 2531 return 0; 2532 } 2533 2534 static int 2535 yt921x_port_up(struct yt921x_priv *priv, int port, unsigned int mode, 2536 phy_interface_t interface, int speed, int duplex, 2537 bool tx_pause, bool rx_pause) 2538 { 2539 u32 mask; 2540 u32 ctrl; 2541 int res; 2542 2543 switch (speed) { 2544 case SPEED_10: 2545 ctrl = YT921X_PORT_SPEED_10; 2546 break; 2547 case SPEED_100: 2548 ctrl = YT921X_PORT_SPEED_100; 2549 break; 2550 case SPEED_1000: 2551 ctrl = YT921X_PORT_SPEED_1000; 2552 break; 2553 case SPEED_2500: 2554 ctrl = YT921X_PORT_SPEED_2500; 2555 break; 2556 case SPEED_10000: 2557 ctrl = YT921X_PORT_SPEED_10000; 2558 break; 2559 default: 2560 return -EINVAL; 2561 } 2562 if (duplex == DUPLEX_FULL) 2563 ctrl |= YT921X_PORT_DUPLEX_FULL; 2564 if (tx_pause) 2565 ctrl |= YT921X_PORT_TX_PAUSE; 2566 if (rx_pause) 2567 ctrl |= YT921X_PORT_RX_PAUSE; 2568 ctrl |= YT921X_PORT_RX_MAC_EN | YT921X_PORT_TX_MAC_EN; 2569 res = yt921x_reg_write(priv, YT921X_PORTn_CTRL(port), ctrl); 2570 if (res) 2571 return res; 2572 2573 if (yt921x_port_is_external(port)) { 2574 mask = YT921X_SERDES_SPEED_M; 2575 switch (speed) { 2576 case SPEED_10: 2577 ctrl = YT921X_SERDES_SPEED_10; 2578 break; 2579 case SPEED_100: 2580 ctrl = YT921X_SERDES_SPEED_100; 2581 break; 2582 case SPEED_1000: 2583 ctrl = YT921X_SERDES_SPEED_1000; 2584 break; 2585 case SPEED_2500: 2586 ctrl = YT921X_SERDES_SPEED_2500; 2587 break; 2588 case SPEED_10000: 2589 ctrl = YT921X_SERDES_SPEED_10000; 2590 break; 2591 default: 2592 return -EINVAL; 2593 } 2594 mask |= YT921X_SERDES_DUPLEX_FULL; 2595 if (duplex == DUPLEX_FULL) 2596 ctrl |= YT921X_SERDES_DUPLEX_FULL; 2597 mask |= YT921X_SERDES_TX_PAUSE; 2598 if (tx_pause) 2599 ctrl |= YT921X_SERDES_TX_PAUSE; 2600 mask |= YT921X_SERDES_RX_PAUSE; 2601 if (rx_pause) 2602 ctrl |= YT921X_SERDES_RX_PAUSE; 2603 mask |= YT921X_SERDES_LINK; 2604 ctrl |= YT921X_SERDES_LINK; 2605 res = yt921x_reg_update_bits(priv, YT921X_SERDESn(port), 2606 mask, ctrl); 2607 if (res) 2608 return res; 2609 2610 mask = YT921X_XMII_LINK; 2611 res = yt921x_reg_set_bits(priv, YT921X_XMIIn(port), mask); 2612 if (res) 2613 return res; 2614 2615 switch (speed) { 2616 case SPEED_10: 2617 ctrl = YT921X_MDIO_POLLING_SPEED_10; 2618 break; 2619 case SPEED_100: 2620 ctrl = YT921X_MDIO_POLLING_SPEED_100; 2621 break; 2622 case SPEED_1000: 2623 ctrl = YT921X_MDIO_POLLING_SPEED_1000; 2624 break; 2625 case SPEED_2500: 2626 ctrl = YT921X_MDIO_POLLING_SPEED_2500; 2627 break; 2628 case SPEED_10000: 2629 ctrl = YT921X_MDIO_POLLING_SPEED_10000; 2630 break; 2631 default: 2632 return -EINVAL; 2633 } 2634 if (duplex == DUPLEX_FULL) 2635 ctrl |= YT921X_MDIO_POLLING_DUPLEX_FULL; 2636 ctrl |= YT921X_MDIO_POLLING_LINK; 2637 res = yt921x_reg_write(priv, YT921X_MDIO_POLLINGn(port), ctrl); 2638 if (res) 2639 return res; 2640 } 2641 2642 return 0; 2643 } 2644 2645 static int 2646 yt921x_port_config(struct yt921x_priv *priv, int port, unsigned int mode, 2647 phy_interface_t interface) 2648 { 2649 struct device *dev = to_device(priv); 2650 u32 mask; 2651 u32 ctrl; 2652 int res; 2653 2654 if (!yt921x_port_is_external(port)) { 2655 if (interface != PHY_INTERFACE_MODE_INTERNAL) { 2656 dev_err(dev, "Wrong mode %d on port %d\n", 2657 interface, port); 2658 return -EINVAL; 2659 } 2660 return 0; 2661 } 2662 2663 switch (interface) { 2664 /* SERDES */ 2665 case PHY_INTERFACE_MODE_SGMII: 2666 case PHY_INTERFACE_MODE_100BASEX: 2667 case PHY_INTERFACE_MODE_1000BASEX: 2668 case PHY_INTERFACE_MODE_2500BASEX: 2669 mask = YT921X_SERDES_CTRL_PORTn(port); 2670 res = yt921x_reg_set_bits(priv, YT921X_SERDES_CTRL, mask); 2671 if (res) 2672 return res; 2673 2674 mask = YT921X_XMII_CTRL_PORTn(port); 2675 res = yt921x_reg_clear_bits(priv, YT921X_XMII_CTRL, mask); 2676 if (res) 2677 return res; 2678 2679 mask = YT921X_SERDES_MODE_M; 2680 switch (interface) { 2681 case PHY_INTERFACE_MODE_SGMII: 2682 ctrl = YT921X_SERDES_MODE_SGMII; 2683 break; 2684 case PHY_INTERFACE_MODE_100BASEX: 2685 ctrl = YT921X_SERDES_MODE_100BASEX; 2686 break; 2687 case PHY_INTERFACE_MODE_1000BASEX: 2688 ctrl = YT921X_SERDES_MODE_1000BASEX; 2689 break; 2690 case PHY_INTERFACE_MODE_2500BASEX: 2691 ctrl = YT921X_SERDES_MODE_2500BASEX; 2692 break; 2693 default: 2694 return -EINVAL; 2695 } 2696 res = yt921x_reg_update_bits(priv, YT921X_SERDESn(port), 2697 mask, ctrl); 2698 if (res) 2699 return res; 2700 2701 break; 2702 /* add XMII support here */ 2703 default: 2704 return -EINVAL; 2705 } 2706 2707 return 0; 2708 } 2709 2710 static void 2711 yt921x_phylink_mac_link_down(struct phylink_config *config, unsigned int mode, 2712 phy_interface_t interface) 2713 { 2714 struct dsa_port *dp = dsa_phylink_to_port(config); 2715 struct yt921x_priv *priv = to_yt921x_priv(dp->ds); 2716 int port = dp->index; 2717 int res; 2718 2719 /* No need to sync; port control block is hold until device remove */ 2720 cancel_delayed_work(&priv->ports[port].mib_read); 2721 2722 mutex_lock(&priv->reg_lock); 2723 res = yt921x_port_down(priv, port); 2724 mutex_unlock(&priv->reg_lock); 2725 2726 if (res) 2727 dev_err(dp->ds->dev, "Failed to %s port %d: %i\n", "bring down", 2728 port, res); 2729 } 2730 2731 static void 2732 yt921x_phylink_mac_link_up(struct phylink_config *config, 2733 struct phy_device *phydev, unsigned int mode, 2734 phy_interface_t interface, int speed, int duplex, 2735 bool tx_pause, bool rx_pause) 2736 { 2737 struct dsa_port *dp = dsa_phylink_to_port(config); 2738 struct yt921x_priv *priv = to_yt921x_priv(dp->ds); 2739 int port = dp->index; 2740 int res; 2741 2742 mutex_lock(&priv->reg_lock); 2743 res = yt921x_port_up(priv, port, mode, interface, speed, duplex, 2744 tx_pause, rx_pause); 2745 mutex_unlock(&priv->reg_lock); 2746 2747 if (res) 2748 dev_err(dp->ds->dev, "Failed to %s port %d: %i\n", "bring up", 2749 port, res); 2750 2751 schedule_delayed_work(&priv->ports[port].mib_read, 0); 2752 } 2753 2754 static void 2755 yt921x_phylink_mac_config(struct phylink_config *config, unsigned int mode, 2756 const struct phylink_link_state *state) 2757 { 2758 struct dsa_port *dp = dsa_phylink_to_port(config); 2759 struct yt921x_priv *priv = to_yt921x_priv(dp->ds); 2760 int port = dp->index; 2761 int res; 2762 2763 mutex_lock(&priv->reg_lock); 2764 res = yt921x_port_config(priv, port, mode, state->interface); 2765 mutex_unlock(&priv->reg_lock); 2766 2767 if (res) 2768 dev_err(dp->ds->dev, "Failed to %s port %d: %i\n", "config", 2769 port, res); 2770 } 2771 2772 static void 2773 yt921x_dsa_phylink_get_caps(struct dsa_switch *ds, int port, 2774 struct phylink_config *config) 2775 { 2776 struct yt921x_priv *priv = to_yt921x_priv(ds); 2777 const struct yt921x_info *info = priv->info; 2778 2779 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | 2780 MAC_10 | MAC_100 | MAC_1000; 2781 2782 if (info->internal_mask & BIT(port)) { 2783 /* Port 10 for MCU should probably go here too. But since that 2784 * is untested yet, turn it down for the moment by letting it 2785 * fall to the default branch. 2786 */ 2787 __set_bit(PHY_INTERFACE_MODE_INTERNAL, 2788 config->supported_interfaces); 2789 } else if (info->external_mask & BIT(port)) { 2790 /* TODO: external ports may support SERDES only, XMII only, or 2791 * SERDES + XMII depending on the chip. However, we can't get 2792 * the accurate config table due to lack of document, thus 2793 * we simply declare SERDES + XMII and rely on the correctness 2794 * of devicetree for now. 2795 */ 2796 2797 /* SERDES */ 2798 __set_bit(PHY_INTERFACE_MODE_SGMII, 2799 config->supported_interfaces); 2800 /* REVSGMII (SGMII in PHY role) should go here, once 2801 * PHY_INTERFACE_MODE_REVSGMII is introduced. 2802 */ 2803 __set_bit(PHY_INTERFACE_MODE_100BASEX, 2804 config->supported_interfaces); 2805 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 2806 config->supported_interfaces); 2807 __set_bit(PHY_INTERFACE_MODE_2500BASEX, 2808 config->supported_interfaces); 2809 config->mac_capabilities |= MAC_2500FD; 2810 2811 /* XMII */ 2812 2813 /* Not tested. To add support for XMII: 2814 * - Add proper interface modes below 2815 * - Handle them in yt921x_port_config() 2816 */ 2817 } 2818 /* no such port: empty supported_interfaces causes phylink to turn it 2819 * down 2820 */ 2821 } 2822 2823 static int yt921x_port_setup(struct yt921x_priv *priv, int port) 2824 { 2825 struct dsa_switch *ds = &priv->ds; 2826 u32 ctrl; 2827 int res; 2828 2829 res = yt921x_userport_standalone(priv, port); 2830 if (res) 2831 return res; 2832 2833 /* Clear prio order (even if DCB is not enabled) to avoid unsolicited 2834 * priorities 2835 */ 2836 res = yt921x_reg_write(priv, YT921X_PORTn_PRIO_ORD(port), 0); 2837 if (res) 2838 return res; 2839 2840 if (dsa_is_cpu_port(ds, port)) { 2841 /* Egress of CPU port is supposed to be completely controlled 2842 * via tagging, so set to oneway isolated (drop all packets 2843 * without tag). 2844 */ 2845 ctrl = ~(u32)0; 2846 res = yt921x_reg_write(priv, YT921X_PORTn_ISOLATION(port), 2847 ctrl); 2848 if (res) 2849 return res; 2850 2851 /* To simplify FDB "isolation" simulation, we also disable 2852 * learning on the CPU port, and let software identify packets 2853 * towarding CPU (either trapped or a static FDB entry is 2854 * matched, no matter which bridge that entry is for), which is 2855 * already done by yt921x_userport_standalone(). As a result, 2856 * VLAN-awareness becomes unrelated on the CPU port (set to 2857 * VLAN-unaware by the way). 2858 */ 2859 } 2860 2861 return 0; 2862 } 2863 2864 static enum dsa_tag_protocol 2865 yt921x_dsa_get_tag_protocol(struct dsa_switch *ds, int port, 2866 enum dsa_tag_protocol m) 2867 { 2868 return DSA_TAG_PROTO_YT921X; 2869 } 2870 2871 static int yt921x_dsa_port_setup(struct dsa_switch *ds, int port) 2872 { 2873 struct yt921x_priv *priv = to_yt921x_priv(ds); 2874 int res; 2875 2876 mutex_lock(&priv->reg_lock); 2877 res = yt921x_port_setup(priv, port); 2878 mutex_unlock(&priv->reg_lock); 2879 2880 return res; 2881 } 2882 2883 /* Not "port" - DSCP mapping is global */ 2884 static int __maybe_unused 2885 yt921x_dsa_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp) 2886 { 2887 struct yt921x_priv *priv = to_yt921x_priv(ds); 2888 u32 val; 2889 int res; 2890 2891 mutex_lock(&priv->reg_lock); 2892 res = yt921x_reg_read(priv, YT921X_IPM_DSCPn(dscp), &val); 2893 mutex_unlock(&priv->reg_lock); 2894 2895 if (res) 2896 return res; 2897 2898 return FIELD_GET(YT921X_IPM_PRIO_M, val); 2899 } 2900 2901 static int __maybe_unused 2902 yt921x_dsa_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio) 2903 { 2904 struct yt921x_priv *priv = to_yt921x_priv(ds); 2905 u32 val; 2906 int res; 2907 2908 mutex_lock(&priv->reg_lock); 2909 /* During a "dcb app replace" command, the new app table entry will be 2910 * added first, then the old one will be deleted. But the hardware only 2911 * supports one QoS class per DSCP value (duh), so if we blindly delete 2912 * the app table entry for this DSCP value, we end up deleting the 2913 * entry with the new priority. Avoid that by checking whether user 2914 * space wants to delete the priority which is currently configured, or 2915 * something else which is no longer current. 2916 */ 2917 res = yt921x_reg_read(priv, YT921X_IPM_DSCPn(dscp), &val); 2918 if (!res && FIELD_GET(YT921X_IPM_PRIO_M, val) == prio) 2919 res = yt921x_reg_write(priv, YT921X_IPM_DSCPn(dscp), 2920 YT921X_IPM_PRIO(IEEE8021Q_TT_BK)); 2921 mutex_unlock(&priv->reg_lock); 2922 2923 return res; 2924 } 2925 2926 static int __maybe_unused 2927 yt921x_dsa_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio) 2928 { 2929 struct yt921x_priv *priv = to_yt921x_priv(ds); 2930 int res; 2931 2932 if (prio >= YT921X_PRIO_NUM) 2933 return -EINVAL; 2934 2935 mutex_lock(&priv->reg_lock); 2936 res = yt921x_reg_write(priv, YT921X_IPM_DSCPn(dscp), 2937 YT921X_IPM_PRIO(prio)); 2938 mutex_unlock(&priv->reg_lock); 2939 2940 return res; 2941 } 2942 2943 static int yt921x_edata_wait(struct yt921x_priv *priv, u32 *valp) 2944 { 2945 u32 val = YT921X_EDATA_DATA_IDLE; 2946 int res; 2947 2948 res = yt921x_reg_wait(priv, YT921X_EDATA_DATA, 2949 YT921X_EDATA_DATA_STATUS_M, &val); 2950 if (res) 2951 return res; 2952 2953 *valp = val; 2954 return 0; 2955 } 2956 2957 static int 2958 yt921x_edata_read_cont(struct yt921x_priv *priv, u8 addr, u8 *valp) 2959 { 2960 u32 ctrl; 2961 u32 val; 2962 int res; 2963 2964 ctrl = YT921X_EDATA_CTRL_ADDR(addr) | YT921X_EDATA_CTRL_READ; 2965 res = yt921x_reg_write(priv, YT921X_EDATA_CTRL, ctrl); 2966 if (res) 2967 return res; 2968 res = yt921x_edata_wait(priv, &val); 2969 if (res) 2970 return res; 2971 2972 *valp = FIELD_GET(YT921X_EDATA_DATA_DATA_M, val); 2973 return 0; 2974 } 2975 2976 static int yt921x_edata_read(struct yt921x_priv *priv, u8 addr, u8 *valp) 2977 { 2978 u32 val; 2979 int res; 2980 2981 res = yt921x_edata_wait(priv, &val); 2982 if (res) 2983 return res; 2984 return yt921x_edata_read_cont(priv, addr, valp); 2985 } 2986 2987 static int yt921x_chip_detect(struct yt921x_priv *priv) 2988 { 2989 struct device *dev = to_device(priv); 2990 const struct yt921x_info *info; 2991 u8 extmode; 2992 u32 chipid; 2993 u32 major; 2994 u32 mode; 2995 int res; 2996 2997 res = yt921x_reg_read(priv, YT921X_CHIP_ID, &chipid); 2998 if (res) 2999 return res; 3000 3001 major = FIELD_GET(YT921X_CHIP_ID_MAJOR, chipid); 3002 3003 for (info = yt921x_infos; info->name; info++) 3004 if (info->major == major) 3005 break; 3006 if (!info->name) { 3007 dev_err(dev, "Unexpected chipid 0x%x\n", chipid); 3008 return -ENODEV; 3009 } 3010 3011 res = yt921x_reg_read(priv, YT921X_CHIP_MODE, &mode); 3012 if (res) 3013 return res; 3014 res = yt921x_edata_read(priv, YT921X_EDATA_EXTMODE, &extmode); 3015 if (res) 3016 return res; 3017 3018 for (; info->name; info++) 3019 if (info->major == major && info->mode == mode && 3020 info->extmode == extmode) 3021 break; 3022 if (!info->name) { 3023 dev_err(dev, 3024 "Unsupported chipid 0x%x with chipmode 0x%x 0x%x\n", 3025 chipid, mode, extmode); 3026 return -ENODEV; 3027 } 3028 3029 /* Print chipid here since we are interested in lower 16 bits */ 3030 dev_info(dev, 3031 "Motorcomm %s ethernet switch, chipid: 0x%x, chipmode: 0x%x 0x%x\n", 3032 info->name, chipid, mode, extmode); 3033 3034 priv->info = info; 3035 return 0; 3036 } 3037 3038 static int yt921x_chip_reset(struct yt921x_priv *priv) 3039 { 3040 struct device *dev = to_device(priv); 3041 u16 eth_p_tag; 3042 u32 val; 3043 int res; 3044 3045 res = yt921x_chip_detect(priv); 3046 if (res) 3047 return res; 3048 3049 /* Reset */ 3050 res = yt921x_reg_write(priv, YT921X_RST, YT921X_RST_HW); 3051 if (res) 3052 return res; 3053 3054 /* RST_HW is almost same as GPIO hard reset, so we need this delay. */ 3055 fsleep(YT921X_RST_DELAY_US); 3056 3057 val = 0; 3058 res = yt921x_reg_wait(priv, YT921X_RST, ~0, &val); 3059 if (res) 3060 return res; 3061 3062 /* Check for tag EtherType; do it after reset in case you messed it up 3063 * before. 3064 */ 3065 res = yt921x_reg_read(priv, YT921X_CPU_TAG_TPID, &val); 3066 if (res) 3067 return res; 3068 eth_p_tag = FIELD_GET(YT921X_CPU_TAG_TPID_TPID_M, val); 3069 if (eth_p_tag != ETH_P_YT921X) { 3070 dev_err(dev, "Tag type 0x%x != 0x%x\n", eth_p_tag, 3071 ETH_P_YT921X); 3072 /* Despite being possible, we choose not to set CPU_TAG_TPID, 3073 * since there is no way it can be different unless you have the 3074 * wrong chip. 3075 */ 3076 return -EINVAL; 3077 } 3078 3079 return 0; 3080 } 3081 3082 static int yt921x_chip_setup_dsa(struct yt921x_priv *priv) 3083 { 3084 struct dsa_switch *ds = &priv->ds; 3085 unsigned long cpu_ports_mask; 3086 u64 ctrl64; 3087 u32 ctrl; 3088 int port; 3089 int res; 3090 3091 /* Enable DSA */ 3092 priv->cpu_ports_mask = dsa_cpu_ports(ds); 3093 3094 ctrl = YT921X_EXT_CPU_PORT_TAG_EN | YT921X_EXT_CPU_PORT_PORT_EN | 3095 YT921X_EXT_CPU_PORT_PORT(__ffs(priv->cpu_ports_mask)); 3096 res = yt921x_reg_write(priv, YT921X_EXT_CPU_PORT, ctrl); 3097 if (res) 3098 return res; 3099 3100 /* Setup software switch */ 3101 ctrl = YT921X_CPU_COPY_TO_EXT_CPU; 3102 res = yt921x_reg_write(priv, YT921X_CPU_COPY, ctrl); 3103 if (res) 3104 return res; 3105 3106 ctrl = GENMASK(10, 0); 3107 res = yt921x_reg_write(priv, YT921X_FILTER_UNK_UCAST, ctrl); 3108 if (res) 3109 return res; 3110 res = yt921x_reg_write(priv, YT921X_FILTER_UNK_MCAST, ctrl); 3111 if (res) 3112 return res; 3113 3114 /* YT921x does not support native DSA port bridging, so we use port 3115 * isolation to emulate it. However, be especially careful that port 3116 * isolation takes _after_ FDB lookups, i.e. if an FDB entry (from 3117 * another bridge) is matched and the destination port (in another 3118 * bridge) is blocked, the packet will be dropped instead of flooding to 3119 * the "bridged" ports, thus we need to trap and handle those packets by 3120 * software. 3121 * 3122 * If there is no more than one bridge, we might be able to drop them 3123 * directly given some conditions are met, but we trap them in all cases 3124 * for now. 3125 */ 3126 ctrl = 0; 3127 for (int i = 0; i < YT921X_PORT_NUM; i++) 3128 ctrl |= YT921X_ACT_UNK_ACTn_TRAP(i); 3129 /* Except for CPU ports, if any packets are sent via CPU ports without 3130 * tag, they should be dropped. 3131 */ 3132 cpu_ports_mask = priv->cpu_ports_mask; 3133 for_each_set_bit(port, &cpu_ports_mask, YT921X_PORT_NUM) { 3134 ctrl &= ~YT921X_ACT_UNK_ACTn_M(port); 3135 ctrl |= YT921X_ACT_UNK_ACTn_DROP(port); 3136 } 3137 res = yt921x_reg_write(priv, YT921X_ACT_UNK_UCAST, ctrl); 3138 if (res) 3139 return res; 3140 res = yt921x_reg_write(priv, YT921X_ACT_UNK_MCAST, ctrl); 3141 if (res) 3142 return res; 3143 3144 /* Tagged VID 0 should be treated as untagged, which confuses the 3145 * hardware a lot 3146 */ 3147 ctrl64 = YT921X_VLAN_CTRL_LEARN_DIS | YT921X_VLAN_CTRL_PORTS_M; 3148 res = yt921x_reg64_write(priv, YT921X_VLANn_CTRL(0), ctrl64); 3149 if (res) 3150 return res; 3151 3152 return 0; 3153 } 3154 3155 static int __maybe_unused yt921x_chip_setup_qos(struct yt921x_priv *priv) 3156 { 3157 u32 ctrl; 3158 int res; 3159 3160 /* DSCP to internal priorities */ 3161 for (u8 dscp = 0; dscp < DSCP_MAX; dscp++) { 3162 int prio = ietf_dscp_to_ieee8021q_tt(dscp); 3163 3164 if (prio < 0) 3165 return prio; 3166 3167 res = yt921x_reg_write(priv, YT921X_IPM_DSCPn(dscp), 3168 YT921X_IPM_PRIO(prio)); 3169 if (res) 3170 return res; 3171 } 3172 3173 /* 802.1Q QoS to internal priorities */ 3174 for (u8 pcp = 0; pcp < 8; pcp++) 3175 for (u8 dei = 0; dei < 2; dei++) { 3176 ctrl = YT921X_IPM_PRIO(pcp); 3177 if (dei) 3178 /* "Red" almost means drop, so it's not that 3179 * useful. Note that tc police does not support 3180 * Three-Color very well 3181 */ 3182 ctrl |= YT921X_IPM_COLOR_YELLOW; 3183 3184 for (u8 svlan = 0; svlan < 2; svlan++) { 3185 u32 reg = YT921X_IPM_PCPn(svlan, dei, pcp); 3186 3187 res = yt921x_reg_write(priv, reg, ctrl); 3188 if (res) 3189 return res; 3190 } 3191 } 3192 3193 return 0; 3194 } 3195 3196 static int yt921x_chip_setup(struct yt921x_priv *priv) 3197 { 3198 u32 ctrl; 3199 int res; 3200 3201 ctrl = YT921X_FUNC_MIB; 3202 res = yt921x_reg_set_bits(priv, YT921X_FUNC, ctrl); 3203 if (res) 3204 return res; 3205 3206 res = yt921x_chip_setup_dsa(priv); 3207 if (res) 3208 return res; 3209 3210 #if IS_ENABLED(CONFIG_DCB) 3211 res = yt921x_chip_setup_qos(priv); 3212 if (res) 3213 return res; 3214 #endif 3215 3216 /* Clear MIB */ 3217 ctrl = YT921X_MIB_CTRL_CLEAN | YT921X_MIB_CTRL_ALL_PORT; 3218 res = yt921x_reg_write(priv, YT921X_MIB_CTRL, ctrl); 3219 if (res) 3220 return res; 3221 3222 /* Miscellaneous */ 3223 res = yt921x_reg_set_bits(priv, YT921X_SENSOR, YT921X_SENSOR_TEMP); 3224 if (res) 3225 return res; 3226 3227 return 0; 3228 } 3229 3230 static int yt921x_dsa_setup(struct dsa_switch *ds) 3231 { 3232 struct yt921x_priv *priv = to_yt921x_priv(ds); 3233 struct device *dev = to_device(priv); 3234 struct device_node *np = dev->of_node; 3235 struct device_node *child; 3236 int res; 3237 3238 mutex_lock(&priv->reg_lock); 3239 res = yt921x_chip_reset(priv); 3240 mutex_unlock(&priv->reg_lock); 3241 3242 if (res) 3243 return res; 3244 3245 /* Register the internal mdio bus. Nodes for internal ports should have 3246 * proper phy-handle pointing to their PHYs. Not enabling the internal 3247 * bus is possible, though pretty wired, if internal ports are not used. 3248 */ 3249 child = of_get_child_by_name(np, "mdio"); 3250 if (child) { 3251 res = yt921x_mbus_int_init(priv, child); 3252 of_node_put(child); 3253 if (res) 3254 return res; 3255 } 3256 3257 /* External mdio bus is optional */ 3258 child = of_get_child_by_name(np, "mdio-external"); 3259 if (child) { 3260 res = yt921x_mbus_ext_init(priv, child); 3261 of_node_put(child); 3262 if (res) 3263 return res; 3264 3265 dev_err(dev, "Untested external mdio bus\n"); 3266 return -ENODEV; 3267 } 3268 3269 mutex_lock(&priv->reg_lock); 3270 res = yt921x_chip_setup(priv); 3271 mutex_unlock(&priv->reg_lock); 3272 3273 if (res) 3274 return res; 3275 3276 return 0; 3277 } 3278 3279 static const struct phylink_mac_ops yt921x_phylink_mac_ops = { 3280 .mac_link_down = yt921x_phylink_mac_link_down, 3281 .mac_link_up = yt921x_phylink_mac_link_up, 3282 .mac_config = yt921x_phylink_mac_config, 3283 }; 3284 3285 static const struct dsa_switch_ops yt921x_dsa_switch_ops = { 3286 /* mib */ 3287 .get_strings = yt921x_dsa_get_strings, 3288 .get_ethtool_stats = yt921x_dsa_get_ethtool_stats, 3289 .get_sset_count = yt921x_dsa_get_sset_count, 3290 .get_eth_mac_stats = yt921x_dsa_get_eth_mac_stats, 3291 .get_eth_ctrl_stats = yt921x_dsa_get_eth_ctrl_stats, 3292 .get_rmon_stats = yt921x_dsa_get_rmon_stats, 3293 .get_stats64 = yt921x_dsa_get_stats64, 3294 .get_pause_stats = yt921x_dsa_get_pause_stats, 3295 /* eee */ 3296 .support_eee = dsa_supports_eee, 3297 .set_mac_eee = yt921x_dsa_set_mac_eee, 3298 /* mtu */ 3299 .port_change_mtu = yt921x_dsa_port_change_mtu, 3300 .port_max_mtu = yt921x_dsa_port_max_mtu, 3301 /* hsr */ 3302 .port_hsr_leave = dsa_port_simple_hsr_leave, 3303 .port_hsr_join = dsa_port_simple_hsr_join, 3304 /* mirror */ 3305 .port_mirror_del = yt921x_dsa_port_mirror_del, 3306 .port_mirror_add = yt921x_dsa_port_mirror_add, 3307 /* lag */ 3308 .port_lag_leave = yt921x_dsa_port_lag_leave, 3309 .port_lag_join = yt921x_dsa_port_lag_join, 3310 /* fdb */ 3311 .port_fdb_dump = yt921x_dsa_port_fdb_dump, 3312 .port_fast_age = yt921x_dsa_port_fast_age, 3313 .set_ageing_time = yt921x_dsa_set_ageing_time, 3314 .port_fdb_del = yt921x_dsa_port_fdb_del, 3315 .port_fdb_add = yt921x_dsa_port_fdb_add, 3316 .port_mdb_del = yt921x_dsa_port_mdb_del, 3317 .port_mdb_add = yt921x_dsa_port_mdb_add, 3318 /* vlan */ 3319 .port_vlan_filtering = yt921x_dsa_port_vlan_filtering, 3320 .port_vlan_del = yt921x_dsa_port_vlan_del, 3321 .port_vlan_add = yt921x_dsa_port_vlan_add, 3322 /* bridge */ 3323 .port_pre_bridge_flags = yt921x_dsa_port_pre_bridge_flags, 3324 .port_bridge_flags = yt921x_dsa_port_bridge_flags, 3325 .port_bridge_leave = yt921x_dsa_port_bridge_leave, 3326 .port_bridge_join = yt921x_dsa_port_bridge_join, 3327 /* mst */ 3328 .port_mst_state_set = yt921x_dsa_port_mst_state_set, 3329 .vlan_msti_set = yt921x_dsa_vlan_msti_set, 3330 .port_stp_state_set = yt921x_dsa_port_stp_state_set, 3331 #if IS_ENABLED(CONFIG_DCB) 3332 /* dcb */ 3333 .port_get_default_prio = yt921x_dsa_port_get_default_prio, 3334 .port_set_default_prio = yt921x_dsa_port_set_default_prio, 3335 .port_get_apptrust = yt921x_dsa_port_get_apptrust, 3336 .port_set_apptrust = yt921x_dsa_port_set_apptrust, 3337 #endif 3338 /* port */ 3339 .get_tag_protocol = yt921x_dsa_get_tag_protocol, 3340 .phylink_get_caps = yt921x_dsa_phylink_get_caps, 3341 .port_setup = yt921x_dsa_port_setup, 3342 #if IS_ENABLED(CONFIG_DCB) 3343 /* dscp */ 3344 .port_get_dscp_prio = yt921x_dsa_port_get_dscp_prio, 3345 .port_del_dscp_prio = yt921x_dsa_port_del_dscp_prio, 3346 .port_add_dscp_prio = yt921x_dsa_port_add_dscp_prio, 3347 #endif 3348 /* chip */ 3349 .setup = yt921x_dsa_setup, 3350 }; 3351 3352 static void yt921x_mdio_shutdown(struct mdio_device *mdiodev) 3353 { 3354 struct yt921x_priv *priv = mdiodev_get_drvdata(mdiodev); 3355 3356 if (!priv) 3357 return; 3358 3359 dsa_switch_shutdown(&priv->ds); 3360 } 3361 3362 static void yt921x_mdio_remove(struct mdio_device *mdiodev) 3363 { 3364 struct yt921x_priv *priv = mdiodev_get_drvdata(mdiodev); 3365 3366 if (!priv) 3367 return; 3368 3369 for (size_t i = ARRAY_SIZE(priv->ports); i-- > 0; ) { 3370 struct yt921x_port *pp = &priv->ports[i]; 3371 3372 disable_delayed_work_sync(&pp->mib_read); 3373 } 3374 3375 dsa_unregister_switch(&priv->ds); 3376 3377 mutex_destroy(&priv->reg_lock); 3378 } 3379 3380 static int yt921x_mdio_probe(struct mdio_device *mdiodev) 3381 { 3382 struct device *dev = &mdiodev->dev; 3383 struct yt921x_reg_mdio *mdio; 3384 struct yt921x_priv *priv; 3385 struct dsa_switch *ds; 3386 3387 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 3388 if (!priv) 3389 return -ENOMEM; 3390 3391 mdio = devm_kzalloc(dev, sizeof(*mdio), GFP_KERNEL); 3392 if (!mdio) 3393 return -ENOMEM; 3394 3395 mdio->bus = mdiodev->bus; 3396 mdio->addr = mdiodev->addr; 3397 mdio->switchid = 0; 3398 3399 mutex_init(&priv->reg_lock); 3400 3401 priv->reg_ops = &yt921x_reg_ops_mdio; 3402 priv->reg_ctx = mdio; 3403 3404 for (size_t i = 0; i < ARRAY_SIZE(priv->ports); i++) { 3405 struct yt921x_port *pp = &priv->ports[i]; 3406 3407 pp->index = i; 3408 INIT_DELAYED_WORK(&pp->mib_read, yt921x_poll_mib); 3409 } 3410 3411 ds = &priv->ds; 3412 ds->dev = dev; 3413 ds->assisted_learning_on_cpu_port = true; 3414 ds->dscp_prio_mapping_is_global = true; 3415 ds->priv = priv; 3416 ds->ops = &yt921x_dsa_switch_ops; 3417 ds->ageing_time_min = 1 * 5000; 3418 ds->ageing_time_max = U16_MAX * 5000; 3419 ds->phylink_mac_ops = &yt921x_phylink_mac_ops; 3420 ds->num_lag_ids = YT921X_LAG_NUM; 3421 ds->num_ports = YT921X_PORT_NUM; 3422 3423 mdiodev_set_drvdata(mdiodev, priv); 3424 3425 return dsa_register_switch(ds); 3426 } 3427 3428 static const struct of_device_id yt921x_of_match[] = { 3429 { .compatible = "motorcomm,yt9215" }, 3430 {} 3431 }; 3432 MODULE_DEVICE_TABLE(of, yt921x_of_match); 3433 3434 static struct mdio_driver yt921x_mdio_driver = { 3435 .probe = yt921x_mdio_probe, 3436 .remove = yt921x_mdio_remove, 3437 .shutdown = yt921x_mdio_shutdown, 3438 .mdiodrv.driver = { 3439 .name = YT921X_NAME, 3440 .of_match_table = yt921x_of_match, 3441 }, 3442 }; 3443 3444 mdio_module_driver(yt921x_mdio_driver); 3445 3446 MODULE_AUTHOR("David Yang <mmyangfl@gmail.com>"); 3447 MODULE_DESCRIPTION("Driver for Motorcomm YT921x Switch"); 3448 MODULE_LICENSE("GPL"); 3449