1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Microchip KSZ9477 switch driver main logic 4 * 5 * Copyright (C) 2017-2019 Microchip Technology Inc. 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/iopoll.h> 11 #include <linux/platform_data/microchip-ksz.h> 12 #include <linux/phy.h> 13 #include <linux/if_bridge.h> 14 #include <linux/if_vlan.h> 15 #include <net/dsa.h> 16 #include <net/switchdev.h> 17 18 #include "ksz9477_reg.h" 19 #include "ksz_common.h" 20 #include "ksz9477.h" 21 22 static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set) 23 { 24 regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0); 25 } 26 27 static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits, 28 bool set) 29 { 30 regmap_update_bits(ksz_regmap_8(dev), PORT_CTRL_ADDR(port, offset), 31 bits, set ? bits : 0); 32 } 33 34 static void ksz9477_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set) 35 { 36 regmap_update_bits(ksz_regmap_32(dev), addr, bits, set ? bits : 0); 37 } 38 39 static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset, 40 u32 bits, bool set) 41 { 42 regmap_update_bits(ksz_regmap_32(dev), PORT_CTRL_ADDR(port, offset), 43 bits, set ? bits : 0); 44 } 45 46 int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu) 47 { 48 u16 frame_size; 49 50 if (!dsa_is_cpu_port(dev->ds, port)) 51 return 0; 52 53 frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; 54 55 return regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2, 56 REG_SW_MTU_MASK, frame_size); 57 } 58 59 /** 60 * ksz9477_handle_wake_reason - Handle wake reason on a specified port. 61 * @dev: The device structure. 62 * @port: The port number. 63 * 64 * This function reads the PME (Power Management Event) status register of a 65 * specified port to determine the wake reason. If there is no wake event, it 66 * returns early. Otherwise, it logs the wake reason which could be due to a 67 * "Magic Packet", "Link Up", or "Energy Detect" event. The PME status register 68 * is then cleared to acknowledge the handling of the wake event. 69 * 70 * Return: 0 on success, or an error code on failure. 71 */ 72 static int ksz9477_handle_wake_reason(struct ksz_device *dev, int port) 73 { 74 u8 pme_status; 75 int ret; 76 77 ret = ksz_pread8(dev, port, REG_PORT_PME_STATUS, &pme_status); 78 if (ret) 79 return ret; 80 81 if (!pme_status) 82 return 0; 83 84 dev_dbg(dev->dev, "Wake event on port %d due to:%s%s%s\n", port, 85 pme_status & PME_WOL_MAGICPKT ? " \"Magic Packet\"" : "", 86 pme_status & PME_WOL_LINKUP ? " \"Link Up\"" : "", 87 pme_status & PME_WOL_ENERGY ? " \"Energy detect\"" : ""); 88 89 return ksz_pwrite8(dev, port, REG_PORT_PME_STATUS, pme_status); 90 } 91 92 /** 93 * ksz9477_get_wol - Get Wake-on-LAN settings for a specified port. 94 * @dev: The device structure. 95 * @port: The port number. 96 * @wol: Pointer to ethtool Wake-on-LAN settings structure. 97 * 98 * This function checks the PME Pin Control Register to see if PME Pin Output 99 * Enable is set, indicating PME is enabled. If enabled, it sets the supported 100 * and active WoL flags. 101 */ 102 void ksz9477_get_wol(struct ksz_device *dev, int port, 103 struct ethtool_wolinfo *wol) 104 { 105 u8 pme_ctrl; 106 int ret; 107 108 if (!dev->wakeup_source) 109 return; 110 111 wol->supported = WAKE_PHY; 112 113 /* Check if the current MAC address on this port can be set 114 * as global for WAKE_MAGIC support. The result may vary 115 * dynamically based on other ports configurations. 116 */ 117 if (ksz_is_port_mac_global_usable(dev->ds, port)) 118 wol->supported |= WAKE_MAGIC; 119 120 ret = ksz_pread8(dev, port, REG_PORT_PME_CTRL, &pme_ctrl); 121 if (ret) 122 return; 123 124 if (pme_ctrl & PME_WOL_MAGICPKT) 125 wol->wolopts |= WAKE_MAGIC; 126 if (pme_ctrl & (PME_WOL_LINKUP | PME_WOL_ENERGY)) 127 wol->wolopts |= WAKE_PHY; 128 } 129 130 /** 131 * ksz9477_set_wol - Set Wake-on-LAN settings for a specified port. 132 * @dev: The device structure. 133 * @port: The port number. 134 * @wol: Pointer to ethtool Wake-on-LAN settings structure. 135 * 136 * This function configures Wake-on-LAN (WoL) settings for a specified port. 137 * It validates the provided WoL options, checks if PME is enabled via the 138 * switch's PME Pin Control Register, clears any previous wake reasons, 139 * and sets the Magic Packet flag in the port's PME control register if 140 * specified. 141 * 142 * Return: 0 on success, or other error codes on failure. 143 */ 144 int ksz9477_set_wol(struct ksz_device *dev, int port, 145 struct ethtool_wolinfo *wol) 146 { 147 u8 pme_ctrl = 0, pme_ctrl_old = 0; 148 bool magic_switched_off; 149 bool magic_switched_on; 150 int ret; 151 152 if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC)) 153 return -EINVAL; 154 155 if (!dev->wakeup_source) 156 return -EOPNOTSUPP; 157 158 ret = ksz9477_handle_wake_reason(dev, port); 159 if (ret) 160 return ret; 161 162 if (wol->wolopts & WAKE_MAGIC) 163 pme_ctrl |= PME_WOL_MAGICPKT; 164 if (wol->wolopts & WAKE_PHY) 165 pme_ctrl |= PME_WOL_LINKUP | PME_WOL_ENERGY; 166 167 ret = ksz_pread8(dev, port, REG_PORT_PME_CTRL, &pme_ctrl_old); 168 if (ret) 169 return ret; 170 171 if (pme_ctrl_old == pme_ctrl) 172 return 0; 173 174 magic_switched_off = (pme_ctrl_old & PME_WOL_MAGICPKT) && 175 !(pme_ctrl & PME_WOL_MAGICPKT); 176 magic_switched_on = !(pme_ctrl_old & PME_WOL_MAGICPKT) && 177 (pme_ctrl & PME_WOL_MAGICPKT); 178 179 /* To keep reference count of MAC address, we should do this 180 * operation only on change of WOL settings. 181 */ 182 if (magic_switched_on) { 183 ret = ksz_switch_macaddr_get(dev->ds, port, NULL); 184 if (ret) 185 return ret; 186 } else if (magic_switched_off) { 187 ksz_switch_macaddr_put(dev->ds); 188 } 189 190 ret = ksz_pwrite8(dev, port, REG_PORT_PME_CTRL, pme_ctrl); 191 if (ret) { 192 if (magic_switched_on) 193 ksz_switch_macaddr_put(dev->ds); 194 return ret; 195 } 196 197 return 0; 198 } 199 200 /** 201 * ksz9477_wol_pre_shutdown - Prepares the switch device for shutdown while 202 * considering Wake-on-LAN (WoL) settings. 203 * @dev: The switch device structure. 204 * @wol_enabled: Pointer to a boolean which will be set to true if WoL is 205 * enabled on any port. 206 * 207 * This function prepares the switch device for a safe shutdown while taking 208 * into account the Wake-on-LAN (WoL) settings on the user ports. It updates 209 * the wol_enabled flag accordingly to reflect whether WoL is active on any 210 * port. 211 */ 212 void ksz9477_wol_pre_shutdown(struct ksz_device *dev, bool *wol_enabled) 213 { 214 struct dsa_port *dp; 215 int ret; 216 217 *wol_enabled = false; 218 219 if (!dev->wakeup_source) 220 return; 221 222 dsa_switch_for_each_user_port(dp, dev->ds) { 223 u8 pme_ctrl = 0; 224 225 ret = ksz_pread8(dev, dp->index, REG_PORT_PME_CTRL, &pme_ctrl); 226 if (!ret && pme_ctrl) 227 *wol_enabled = true; 228 229 /* make sure there are no pending wake events which would 230 * prevent the device from going to sleep/shutdown. 231 */ 232 ksz9477_handle_wake_reason(dev, dp->index); 233 } 234 235 /* Now we are save to enable PME pin. */ 236 if (*wol_enabled) 237 ksz_write8(dev, REG_SW_PME_CTRL, PME_ENABLE); 238 } 239 240 static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev) 241 { 242 unsigned int val; 243 244 return regmap_read_poll_timeout(ksz_regmap_8(dev), REG_SW_VLAN_CTRL, 245 val, !(val & VLAN_START), 10, 1000); 246 } 247 248 static int ksz9477_get_vlan_table(struct ksz_device *dev, u16 vid, 249 u32 *vlan_table) 250 { 251 int ret; 252 253 mutex_lock(&dev->vlan_mutex); 254 255 ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M); 256 ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_READ | VLAN_START); 257 258 /* wait to be cleared */ 259 ret = ksz9477_wait_vlan_ctrl_ready(dev); 260 if (ret) { 261 dev_dbg(dev->dev, "Failed to read vlan table\n"); 262 goto exit; 263 } 264 265 ksz_read32(dev, REG_SW_VLAN_ENTRY__4, &vlan_table[0]); 266 ksz_read32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, &vlan_table[1]); 267 ksz_read32(dev, REG_SW_VLAN_ENTRY_PORTS__4, &vlan_table[2]); 268 269 ksz_write8(dev, REG_SW_VLAN_CTRL, 0); 270 271 exit: 272 mutex_unlock(&dev->vlan_mutex); 273 274 return ret; 275 } 276 277 static int ksz9477_set_vlan_table(struct ksz_device *dev, u16 vid, 278 u32 *vlan_table) 279 { 280 int ret; 281 282 mutex_lock(&dev->vlan_mutex); 283 284 ksz_write32(dev, REG_SW_VLAN_ENTRY__4, vlan_table[0]); 285 ksz_write32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, vlan_table[1]); 286 ksz_write32(dev, REG_SW_VLAN_ENTRY_PORTS__4, vlan_table[2]); 287 288 ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M); 289 ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_START | VLAN_WRITE); 290 291 /* wait to be cleared */ 292 ret = ksz9477_wait_vlan_ctrl_ready(dev); 293 if (ret) { 294 dev_dbg(dev->dev, "Failed to write vlan table\n"); 295 goto exit; 296 } 297 298 ksz_write8(dev, REG_SW_VLAN_CTRL, 0); 299 300 /* update vlan cache table */ 301 dev->vlan_cache[vid].table[0] = vlan_table[0]; 302 dev->vlan_cache[vid].table[1] = vlan_table[1]; 303 dev->vlan_cache[vid].table[2] = vlan_table[2]; 304 305 exit: 306 mutex_unlock(&dev->vlan_mutex); 307 308 return ret; 309 } 310 311 static void ksz9477_read_table(struct ksz_device *dev, u32 *table) 312 { 313 ksz_read32(dev, REG_SW_ALU_VAL_A, &table[0]); 314 ksz_read32(dev, REG_SW_ALU_VAL_B, &table[1]); 315 ksz_read32(dev, REG_SW_ALU_VAL_C, &table[2]); 316 ksz_read32(dev, REG_SW_ALU_VAL_D, &table[3]); 317 } 318 319 static void ksz9477_write_table(struct ksz_device *dev, u32 *table) 320 { 321 ksz_write32(dev, REG_SW_ALU_VAL_A, table[0]); 322 ksz_write32(dev, REG_SW_ALU_VAL_B, table[1]); 323 ksz_write32(dev, REG_SW_ALU_VAL_C, table[2]); 324 ksz_write32(dev, REG_SW_ALU_VAL_D, table[3]); 325 } 326 327 static int ksz9477_wait_alu_ready(struct ksz_device *dev) 328 { 329 unsigned int val; 330 331 return regmap_read_poll_timeout(ksz_regmap_32(dev), REG_SW_ALU_CTRL__4, 332 val, !(val & ALU_START), 10, 1000); 333 } 334 335 static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev) 336 { 337 unsigned int val; 338 339 return regmap_read_poll_timeout(ksz_regmap_32(dev), 340 REG_SW_ALU_STAT_CTRL__4, 341 val, !(val & ALU_STAT_START), 342 10, 1000); 343 } 344 345 int ksz9477_reset_switch(struct ksz_device *dev) 346 { 347 u8 data8; 348 u32 data32; 349 350 /* reset switch */ 351 ksz_cfg(dev, REG_SW_OPERATION, SW_RESET, true); 352 353 /* turn off SPI DO Edge select */ 354 regmap_update_bits(ksz_regmap_8(dev), REG_SW_GLOBAL_SERIAL_CTRL_0, 355 SPI_AUTO_EDGE_DETECTION, 0); 356 357 /* default configuration */ 358 ksz_write8(dev, REG_SW_LUE_CTRL_1, 359 SW_AGING_ENABLE | SW_LINK_AUTO_AGING | SW_SRC_ADDR_FILTER); 360 361 /* disable interrupts */ 362 ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK); 363 ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F); 364 ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32); 365 366 /* KSZ9893 compatible chips do not support refclk configuration */ 367 if (dev->chip_id == KSZ9893_CHIP_ID || 368 dev->chip_id == KSZ8563_CHIP_ID || 369 dev->chip_id == KSZ9563_CHIP_ID) 370 return 0; 371 372 data8 = SW_ENABLE_REFCLKO; 373 if (dev->synclko_disable) 374 data8 = 0; 375 else if (dev->synclko_125) 376 data8 = SW_ENABLE_REFCLKO | SW_REFCLKO_IS_125MHZ; 377 ksz_write8(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1, data8); 378 379 return 0; 380 } 381 382 void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt) 383 { 384 struct ksz_port *p = &dev->ports[port]; 385 unsigned int val; 386 u32 data; 387 int ret; 388 389 /* retain the flush/freeze bit */ 390 data = p->freeze ? MIB_COUNTER_FLUSH_FREEZE : 0; 391 data |= MIB_COUNTER_READ; 392 data |= (addr << MIB_COUNTER_INDEX_S); 393 ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data); 394 395 ret = regmap_read_poll_timeout(ksz_regmap_32(dev), 396 PORT_CTRL_ADDR(port, REG_PORT_MIB_CTRL_STAT__4), 397 val, !(val & MIB_COUNTER_READ), 10, 1000); 398 /* failed to read MIB. get out of loop */ 399 if (ret) { 400 dev_dbg(dev->dev, "Failed to get MIB\n"); 401 return; 402 } 403 404 /* count resets upon read */ 405 ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data); 406 *cnt += data; 407 } 408 409 void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, 410 u64 *dropped, u64 *cnt) 411 { 412 addr = dev->info->mib_names[addr].index; 413 ksz9477_r_mib_cnt(dev, port, addr, cnt); 414 } 415 416 void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze) 417 { 418 u32 val = freeze ? MIB_COUNTER_FLUSH_FREEZE : 0; 419 struct ksz_port *p = &dev->ports[port]; 420 421 /* enable/disable the port for flush/freeze function */ 422 mutex_lock(&p->mib.cnt_mutex); 423 ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, val); 424 425 /* used by MIB counter reading code to know freeze is enabled */ 426 p->freeze = freeze; 427 mutex_unlock(&p->mib.cnt_mutex); 428 } 429 430 int ksz9477_errata_monitor(struct ksz_device *dev, int port, 431 u64 tx_late_col) 432 { 433 u32 pmavbc; 434 u8 status; 435 u16 pqm; 436 int ret; 437 438 ret = ksz_pread8(dev, port, REG_PORT_STATUS_0, &status); 439 if (ret) 440 return ret; 441 if (!(FIELD_GET(PORT_INTF_SPEED_MASK, status) == PORT_INTF_SPEED_NONE) && 442 !(status & PORT_INTF_FULL_DUPLEX)) { 443 /* Errata DS80000754 recommends monitoring potential faults in 444 * half-duplex mode. The switch might not be able to communicate anymore 445 * in these states. 446 * If you see this message, please read the errata-sheet for more information: 447 * https://ww1.microchip.com/downloads/aemDocuments/documents/UNG/ProductDocuments/Errata/KSZ9477S-Errata-DS80000754.pdf 448 * To workaround this issue, half-duplex mode should be avoided. 449 * A software reset could be implemented to recover from this state. 450 */ 451 dev_warn_once(dev->dev, 452 "Half-duplex detected on port %d, transmission halt may occur\n", 453 port); 454 if (tx_late_col != 0) { 455 /* Transmission halt with late collisions */ 456 dev_crit_once(dev->dev, 457 "TX late collisions detected, transmission may be halted on port %d\n", 458 port); 459 } 460 ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &status); 461 if (ret) 462 return ret; 463 if (status & SW_VLAN_ENABLE) { 464 ret = ksz_pread16(dev, port, REG_PORT_QM_TX_CNT_0__4, &pqm); 465 if (ret) 466 return ret; 467 ret = ksz_read32(dev, REG_PMAVBC, &pmavbc); 468 if (ret) 469 return ret; 470 if ((FIELD_GET(PMAVBC_MASK, pmavbc) <= PMAVBC_MIN) || 471 (FIELD_GET(PORT_QM_TX_CNT_M, pqm) >= PORT_QM_TX_CNT_MAX)) { 472 /* Transmission halt with Half-Duplex and VLAN */ 473 dev_crit_once(dev->dev, 474 "resources out of limits, transmission may be halted\n"); 475 } 476 } 477 } 478 return ret; 479 } 480 481 void ksz9477_port_init_cnt(struct ksz_device *dev, int port) 482 { 483 struct ksz_port_mib *mib = &dev->ports[port].mib; 484 485 /* flush all enabled port MIB counters */ 486 mutex_lock(&mib->cnt_mutex); 487 ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, 488 MIB_COUNTER_FLUSH_FREEZE); 489 ksz_write8(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FLUSH); 490 ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, 0); 491 mutex_unlock(&mib->cnt_mutex); 492 } 493 494 static void ksz9477_r_phy_quirks(struct ksz_device *dev, u16 addr, u16 reg, 495 u16 *data) 496 { 497 /* KSZ8563R do not have extended registers but BMSR_ESTATEN and 498 * BMSR_ERCAP bits are set. 499 */ 500 if (dev->chip_id == KSZ8563_CHIP_ID && reg == MII_BMSR) 501 *data &= ~(BMSR_ESTATEN | BMSR_ERCAP); 502 } 503 504 int ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data) 505 { 506 u16 val = 0xffff; 507 int ret; 508 509 /* No real PHY after this. Simulate the PHY. 510 * A fixed PHY can be setup in the device tree, but this function is 511 * still called for that port during initialization. 512 * For RGMII PHY there is no way to access it so the fixed PHY should 513 * be used. For SGMII PHY the supporting code will be added later. 514 */ 515 if (!dev->info->internal_phy[addr]) { 516 struct ksz_port *p = &dev->ports[addr]; 517 518 switch (reg) { 519 case MII_BMCR: 520 val = 0x1140; 521 break; 522 case MII_BMSR: 523 val = 0x796d; 524 break; 525 case MII_PHYSID1: 526 val = 0x0022; 527 break; 528 case MII_PHYSID2: 529 val = 0x1631; 530 break; 531 case MII_ADVERTISE: 532 val = 0x05e1; 533 break; 534 case MII_LPA: 535 val = 0xc5e1; 536 break; 537 case MII_CTRL1000: 538 val = 0x0700; 539 break; 540 case MII_STAT1000: 541 if (p->phydev.speed == SPEED_1000) 542 val = 0x3800; 543 else 544 val = 0; 545 break; 546 } 547 } else { 548 ret = ksz_pread16(dev, addr, 0x100 + (reg << 1), &val); 549 if (ret) 550 return ret; 551 552 ksz9477_r_phy_quirks(dev, addr, reg, &val); 553 } 554 555 *data = val; 556 557 return 0; 558 } 559 560 int ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val) 561 { 562 u32 mask, val32; 563 564 /* No real PHY after this. */ 565 if (!dev->info->internal_phy[addr]) 566 return 0; 567 568 if (reg < 0x10) 569 return ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val); 570 571 /* Errata: When using SPI, I2C, or in-band register access, 572 * writes to certain PHY registers should be performed as 573 * 32-bit writes instead of 16-bit writes. 574 */ 575 val32 = val; 576 mask = 0xffff; 577 if ((reg & 1) == 0) { 578 val32 <<= 16; 579 mask <<= 16; 580 } 581 reg &= ~1; 582 return ksz_prmw32(dev, addr, 0x100 + (reg << 1), mask, val32); 583 } 584 585 void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member) 586 { 587 ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member); 588 } 589 590 void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port) 591 { 592 const u16 *regs = dev->info->regs; 593 u8 data; 594 595 regmap_update_bits(ksz_regmap_8(dev), REG_SW_LUE_CTRL_2, 596 SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S, 597 SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S); 598 599 if (port < dev->info->port_cnt) { 600 /* flush individual port */ 601 ksz_pread8(dev, port, regs[P_STP_CTRL], &data); 602 if (!(data & PORT_LEARN_DISABLE)) 603 ksz_pwrite8(dev, port, regs[P_STP_CTRL], 604 data | PORT_LEARN_DISABLE); 605 ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true); 606 ksz_pwrite8(dev, port, regs[P_STP_CTRL], data); 607 } else { 608 /* flush all */ 609 ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_STP_TABLE, true); 610 } 611 } 612 613 int ksz9477_port_vlan_filtering(struct ksz_device *dev, int port, 614 bool flag, struct netlink_ext_ack *extack) 615 { 616 if (flag) { 617 ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, 618 PORT_VLAN_LOOKUP_VID_0, true); 619 ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, true); 620 } else { 621 ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, false); 622 ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, 623 PORT_VLAN_LOOKUP_VID_0, false); 624 } 625 626 return 0; 627 } 628 629 int ksz9477_port_vlan_add(struct ksz_device *dev, int port, 630 const struct switchdev_obj_port_vlan *vlan, 631 struct netlink_ext_ack *extack) 632 { 633 u32 vlan_table[3]; 634 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 635 int err; 636 637 err = ksz9477_get_vlan_table(dev, vlan->vid, vlan_table); 638 if (err) { 639 NL_SET_ERR_MSG_MOD(extack, "Failed to get vlan table"); 640 return err; 641 } 642 643 vlan_table[0] = VLAN_VALID | (vlan->vid & VLAN_FID_M); 644 if (untagged) 645 vlan_table[1] |= BIT(port); 646 else 647 vlan_table[1] &= ~BIT(port); 648 vlan_table[1] &= ~(BIT(dev->cpu_port)); 649 650 vlan_table[2] |= BIT(port) | BIT(dev->cpu_port); 651 652 err = ksz9477_set_vlan_table(dev, vlan->vid, vlan_table); 653 if (err) { 654 NL_SET_ERR_MSG_MOD(extack, "Failed to set vlan table"); 655 return err; 656 } 657 658 /* change PVID */ 659 if (vlan->flags & BRIDGE_VLAN_INFO_PVID) 660 ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vlan->vid); 661 662 return 0; 663 } 664 665 int ksz9477_port_vlan_del(struct ksz_device *dev, int port, 666 const struct switchdev_obj_port_vlan *vlan) 667 { 668 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 669 u32 vlan_table[3]; 670 u16 pvid; 671 672 ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &pvid); 673 pvid = pvid & 0xFFF; 674 675 if (ksz9477_get_vlan_table(dev, vlan->vid, vlan_table)) { 676 dev_dbg(dev->dev, "Failed to get vlan table\n"); 677 return -ETIMEDOUT; 678 } 679 680 vlan_table[2] &= ~BIT(port); 681 682 if (pvid == vlan->vid) 683 pvid = 1; 684 685 if (untagged) 686 vlan_table[1] &= ~BIT(port); 687 688 if (ksz9477_set_vlan_table(dev, vlan->vid, vlan_table)) { 689 dev_dbg(dev->dev, "Failed to set vlan table\n"); 690 return -ETIMEDOUT; 691 } 692 693 ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, pvid); 694 695 return 0; 696 } 697 698 int ksz9477_fdb_add(struct ksz_device *dev, int port, 699 const unsigned char *addr, u16 vid, struct dsa_db db) 700 { 701 u32 alu_table[4]; 702 u32 data; 703 int ret = 0; 704 705 mutex_lock(&dev->alu_mutex); 706 707 /* find any entry with mac & vid */ 708 data = vid << ALU_FID_INDEX_S; 709 data |= ((addr[0] << 8) | addr[1]); 710 ksz_write32(dev, REG_SW_ALU_INDEX_0, data); 711 712 data = ((addr[2] << 24) | (addr[3] << 16)); 713 data |= ((addr[4] << 8) | addr[5]); 714 ksz_write32(dev, REG_SW_ALU_INDEX_1, data); 715 716 /* start read operation */ 717 ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START); 718 719 /* wait to be finished */ 720 ret = ksz9477_wait_alu_ready(dev); 721 if (ret) { 722 dev_dbg(dev->dev, "Failed to read ALU\n"); 723 goto exit; 724 } 725 726 /* read ALU entry */ 727 ksz9477_read_table(dev, alu_table); 728 729 /* update ALU entry */ 730 alu_table[0] = ALU_V_STATIC_VALID; 731 alu_table[1] |= BIT(port); 732 if (vid) 733 alu_table[1] |= ALU_V_USE_FID; 734 alu_table[2] = (vid << ALU_V_FID_S); 735 alu_table[2] |= ((addr[0] << 8) | addr[1]); 736 alu_table[3] = ((addr[2] << 24) | (addr[3] << 16)); 737 alu_table[3] |= ((addr[4] << 8) | addr[5]); 738 739 ksz9477_write_table(dev, alu_table); 740 741 ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START); 742 743 /* wait to be finished */ 744 ret = ksz9477_wait_alu_ready(dev); 745 if (ret) 746 dev_dbg(dev->dev, "Failed to write ALU\n"); 747 748 exit: 749 mutex_unlock(&dev->alu_mutex); 750 751 return ret; 752 } 753 754 int ksz9477_fdb_del(struct ksz_device *dev, int port, 755 const unsigned char *addr, u16 vid, struct dsa_db db) 756 { 757 u32 alu_table[4]; 758 u32 data; 759 int ret = 0; 760 761 mutex_lock(&dev->alu_mutex); 762 763 /* read any entry with mac & vid */ 764 data = vid << ALU_FID_INDEX_S; 765 data |= ((addr[0] << 8) | addr[1]); 766 ksz_write32(dev, REG_SW_ALU_INDEX_0, data); 767 768 data = ((addr[2] << 24) | (addr[3] << 16)); 769 data |= ((addr[4] << 8) | addr[5]); 770 ksz_write32(dev, REG_SW_ALU_INDEX_1, data); 771 772 /* start read operation */ 773 ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START); 774 775 /* wait to be finished */ 776 ret = ksz9477_wait_alu_ready(dev); 777 if (ret) { 778 dev_dbg(dev->dev, "Failed to read ALU\n"); 779 goto exit; 780 } 781 782 ksz_read32(dev, REG_SW_ALU_VAL_A, &alu_table[0]); 783 if (alu_table[0] & ALU_V_STATIC_VALID) { 784 ksz_read32(dev, REG_SW_ALU_VAL_B, &alu_table[1]); 785 ksz_read32(dev, REG_SW_ALU_VAL_C, &alu_table[2]); 786 ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]); 787 788 /* clear forwarding port */ 789 alu_table[1] &= ~BIT(port); 790 791 /* if there is no port to forward, clear table */ 792 if ((alu_table[1] & ALU_V_PORT_MAP) == 0) { 793 alu_table[0] = 0; 794 alu_table[1] = 0; 795 alu_table[2] = 0; 796 alu_table[3] = 0; 797 } 798 } else { 799 alu_table[0] = 0; 800 alu_table[1] = 0; 801 alu_table[2] = 0; 802 alu_table[3] = 0; 803 } 804 805 ksz9477_write_table(dev, alu_table); 806 807 ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START); 808 809 /* wait to be finished */ 810 ret = ksz9477_wait_alu_ready(dev); 811 if (ret) 812 dev_dbg(dev->dev, "Failed to write ALU\n"); 813 814 exit: 815 mutex_unlock(&dev->alu_mutex); 816 817 return ret; 818 } 819 820 static void ksz9477_convert_alu(struct alu_struct *alu, u32 *alu_table) 821 { 822 alu->is_static = !!(alu_table[0] & ALU_V_STATIC_VALID); 823 alu->is_src_filter = !!(alu_table[0] & ALU_V_SRC_FILTER); 824 alu->is_dst_filter = !!(alu_table[0] & ALU_V_DST_FILTER); 825 alu->prio_age = (alu_table[0] >> ALU_V_PRIO_AGE_CNT_S) & 826 ALU_V_PRIO_AGE_CNT_M; 827 alu->mstp = alu_table[0] & ALU_V_MSTP_M; 828 829 alu->is_override = !!(alu_table[1] & ALU_V_OVERRIDE); 830 alu->is_use_fid = !!(alu_table[1] & ALU_V_USE_FID); 831 alu->port_forward = alu_table[1] & ALU_V_PORT_MAP; 832 833 alu->fid = (alu_table[2] >> ALU_V_FID_S) & ALU_V_FID_M; 834 835 alu->mac[0] = (alu_table[2] >> 8) & 0xFF; 836 alu->mac[1] = alu_table[2] & 0xFF; 837 alu->mac[2] = (alu_table[3] >> 24) & 0xFF; 838 alu->mac[3] = (alu_table[3] >> 16) & 0xFF; 839 alu->mac[4] = (alu_table[3] >> 8) & 0xFF; 840 alu->mac[5] = alu_table[3] & 0xFF; 841 } 842 843 int ksz9477_fdb_dump(struct ksz_device *dev, int port, 844 dsa_fdb_dump_cb_t *cb, void *data) 845 { 846 int ret = 0; 847 u32 ksz_data; 848 u32 alu_table[4]; 849 struct alu_struct alu; 850 int timeout; 851 852 mutex_lock(&dev->alu_mutex); 853 854 /* start ALU search */ 855 ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_START | ALU_SEARCH); 856 857 do { 858 timeout = 1000; 859 do { 860 ksz_read32(dev, REG_SW_ALU_CTRL__4, &ksz_data); 861 if ((ksz_data & ALU_VALID) || !(ksz_data & ALU_START)) 862 break; 863 usleep_range(1, 10); 864 } while (timeout-- > 0); 865 866 if (!timeout) { 867 dev_dbg(dev->dev, "Failed to search ALU\n"); 868 ret = -ETIMEDOUT; 869 goto exit; 870 } 871 872 if (!(ksz_data & ALU_VALID)) 873 continue; 874 875 /* read ALU table */ 876 ksz9477_read_table(dev, alu_table); 877 878 ksz9477_convert_alu(&alu, alu_table); 879 880 if (alu.port_forward & BIT(port)) { 881 ret = cb(alu.mac, alu.fid, alu.is_static, data); 882 if (ret) 883 goto exit; 884 } 885 } while (ksz_data & ALU_START); 886 887 exit: 888 889 /* stop ALU search */ 890 ksz_write32(dev, REG_SW_ALU_CTRL__4, 0); 891 892 mutex_unlock(&dev->alu_mutex); 893 894 return ret; 895 } 896 897 int ksz9477_mdb_add(struct ksz_device *dev, int port, 898 const struct switchdev_obj_port_mdb *mdb, struct dsa_db db) 899 { 900 u32 static_table[4]; 901 const u8 *shifts; 902 const u32 *masks; 903 u32 data; 904 int index; 905 u32 mac_hi, mac_lo; 906 int err = 0; 907 908 shifts = dev->info->shifts; 909 masks = dev->info->masks; 910 911 mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]); 912 mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16)); 913 mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]); 914 915 mutex_lock(&dev->alu_mutex); 916 917 for (index = 0; index < dev->info->num_statics; index++) { 918 /* find empty slot first */ 919 data = (index << shifts[ALU_STAT_INDEX]) | 920 masks[ALU_STAT_READ] | ALU_STAT_START; 921 ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); 922 923 /* wait to be finished */ 924 err = ksz9477_wait_alu_sta_ready(dev); 925 if (err) { 926 dev_dbg(dev->dev, "Failed to read ALU STATIC\n"); 927 goto exit; 928 } 929 930 /* read ALU static table */ 931 ksz9477_read_table(dev, static_table); 932 933 if (static_table[0] & ALU_V_STATIC_VALID) { 934 /* check this has same vid & mac address */ 935 if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) && 936 ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) && 937 static_table[3] == mac_lo) { 938 /* found matching one */ 939 break; 940 } 941 } else { 942 /* found empty one */ 943 break; 944 } 945 } 946 947 /* no available entry */ 948 if (index == dev->info->num_statics) { 949 err = -ENOSPC; 950 goto exit; 951 } 952 953 /* add entry */ 954 static_table[0] = ALU_V_STATIC_VALID; 955 static_table[1] |= BIT(port); 956 if (mdb->vid) 957 static_table[1] |= ALU_V_USE_FID; 958 static_table[2] = (mdb->vid << ALU_V_FID_S); 959 static_table[2] |= mac_hi; 960 static_table[3] = mac_lo; 961 962 ksz9477_write_table(dev, static_table); 963 964 data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START; 965 ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); 966 967 /* wait to be finished */ 968 if (ksz9477_wait_alu_sta_ready(dev)) 969 dev_dbg(dev->dev, "Failed to read ALU STATIC\n"); 970 971 exit: 972 mutex_unlock(&dev->alu_mutex); 973 return err; 974 } 975 976 int ksz9477_mdb_del(struct ksz_device *dev, int port, 977 const struct switchdev_obj_port_mdb *mdb, struct dsa_db db) 978 { 979 u32 static_table[4]; 980 const u8 *shifts; 981 const u32 *masks; 982 u32 data; 983 int index; 984 int ret = 0; 985 u32 mac_hi, mac_lo; 986 987 shifts = dev->info->shifts; 988 masks = dev->info->masks; 989 990 mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]); 991 mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16)); 992 mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]); 993 994 mutex_lock(&dev->alu_mutex); 995 996 for (index = 0; index < dev->info->num_statics; index++) { 997 /* find empty slot first */ 998 data = (index << shifts[ALU_STAT_INDEX]) | 999 masks[ALU_STAT_READ] | ALU_STAT_START; 1000 ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); 1001 1002 /* wait to be finished */ 1003 ret = ksz9477_wait_alu_sta_ready(dev); 1004 if (ret) { 1005 dev_dbg(dev->dev, "Failed to read ALU STATIC\n"); 1006 goto exit; 1007 } 1008 1009 /* read ALU static table */ 1010 ksz9477_read_table(dev, static_table); 1011 1012 if (static_table[0] & ALU_V_STATIC_VALID) { 1013 /* check this has same vid & mac address */ 1014 1015 if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) && 1016 ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) && 1017 static_table[3] == mac_lo) { 1018 /* found matching one */ 1019 break; 1020 } 1021 } 1022 } 1023 1024 /* no available entry */ 1025 if (index == dev->info->num_statics) 1026 goto exit; 1027 1028 /* clear port */ 1029 static_table[1] &= ~BIT(port); 1030 1031 if ((static_table[1] & ALU_V_PORT_MAP) == 0) { 1032 /* delete entry */ 1033 static_table[0] = 0; 1034 static_table[1] = 0; 1035 static_table[2] = 0; 1036 static_table[3] = 0; 1037 } 1038 1039 ksz9477_write_table(dev, static_table); 1040 1041 data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START; 1042 ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); 1043 1044 /* wait to be finished */ 1045 ret = ksz9477_wait_alu_sta_ready(dev); 1046 if (ret) 1047 dev_dbg(dev->dev, "Failed to read ALU STATIC\n"); 1048 1049 exit: 1050 mutex_unlock(&dev->alu_mutex); 1051 1052 return ret; 1053 } 1054 1055 int ksz9477_port_mirror_add(struct ksz_device *dev, int port, 1056 struct dsa_mall_mirror_tc_entry *mirror, 1057 bool ingress, struct netlink_ext_ack *extack) 1058 { 1059 u8 data; 1060 int p; 1061 1062 /* Limit to one sniffer port 1063 * Check if any of the port is already set for sniffing 1064 * If yes, instruct the user to remove the previous entry & exit 1065 */ 1066 for (p = 0; p < dev->info->port_cnt; p++) { 1067 /* Skip the current sniffing port */ 1068 if (p == mirror->to_local_port) 1069 continue; 1070 1071 ksz_pread8(dev, p, P_MIRROR_CTRL, &data); 1072 1073 if (data & PORT_MIRROR_SNIFFER) { 1074 NL_SET_ERR_MSG_MOD(extack, 1075 "Sniffer port is already configured, delete existing rules & retry"); 1076 return -EBUSY; 1077 } 1078 } 1079 1080 if (ingress) 1081 ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true); 1082 else 1083 ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true); 1084 1085 /* configure mirror port */ 1086 ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL, 1087 PORT_MIRROR_SNIFFER, true); 1088 1089 ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false); 1090 1091 return 0; 1092 } 1093 1094 void ksz9477_port_mirror_del(struct ksz_device *dev, int port, 1095 struct dsa_mall_mirror_tc_entry *mirror) 1096 { 1097 bool in_use = false; 1098 u8 data; 1099 int p; 1100 1101 if (mirror->ingress) 1102 ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false); 1103 else 1104 ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false); 1105 1106 1107 /* Check if any of the port is still referring to sniffer port */ 1108 for (p = 0; p < dev->info->port_cnt; p++) { 1109 ksz_pread8(dev, p, P_MIRROR_CTRL, &data); 1110 1111 if ((data & (PORT_MIRROR_RX | PORT_MIRROR_TX))) { 1112 in_use = true; 1113 break; 1114 } 1115 } 1116 1117 /* delete sniffing if there are no other mirroring rules */ 1118 if (!in_use) 1119 ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL, 1120 PORT_MIRROR_SNIFFER, false); 1121 } 1122 1123 static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port) 1124 { 1125 phy_interface_t interface; 1126 bool gbit; 1127 1128 if (dev->info->internal_phy[port]) 1129 return PHY_INTERFACE_MODE_NA; 1130 1131 gbit = ksz_get_gbit(dev, port); 1132 1133 interface = ksz_get_xmii(dev, port, gbit); 1134 1135 return interface; 1136 } 1137 1138 void ksz9477_get_caps(struct ksz_device *dev, int port, 1139 struct phylink_config *config) 1140 { 1141 config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE | 1142 MAC_SYM_PAUSE; 1143 1144 if (dev->info->gbit_capable[port]) 1145 config->mac_capabilities |= MAC_1000FD; 1146 } 1147 1148 int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs) 1149 { 1150 u32 secs = msecs / 1000; 1151 u8 value; 1152 u8 data; 1153 int ret; 1154 1155 value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs); 1156 1157 ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value); 1158 if (ret < 0) 1159 return ret; 1160 1161 data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs); 1162 1163 ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value); 1164 if (ret < 0) 1165 return ret; 1166 1167 value &= ~SW_AGE_CNT_M; 1168 value |= FIELD_PREP(SW_AGE_CNT_M, data); 1169 1170 return ksz_write8(dev, REG_SW_LUE_CTRL_0, value); 1171 } 1172 1173 void ksz9477_port_queue_split(struct ksz_device *dev, int port) 1174 { 1175 u8 data; 1176 1177 if (dev->info->num_tx_queues == 8) 1178 data = PORT_EIGHT_QUEUE; 1179 else if (dev->info->num_tx_queues == 4) 1180 data = PORT_FOUR_QUEUE; 1181 else if (dev->info->num_tx_queues == 2) 1182 data = PORT_TWO_QUEUE; 1183 else 1184 data = PORT_SINGLE_QUEUE; 1185 1186 ksz_prmw8(dev, port, REG_PORT_CTRL_0, PORT_QUEUE_SPLIT_MASK, data); 1187 } 1188 1189 void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port) 1190 { 1191 struct dsa_switch *ds = dev->ds; 1192 u16 data16; 1193 u8 member; 1194 1195 /* enable tag tail for host port */ 1196 if (cpu_port) 1197 ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE, 1198 true); 1199 1200 ksz9477_port_queue_split(dev, port); 1201 1202 ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false); 1203 1204 /* set back pressure */ 1205 ksz_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE, true); 1206 1207 /* enable broadcast storm limit */ 1208 ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true); 1209 1210 /* replace priority */ 1211 ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING, 1212 false); 1213 ksz9477_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4, 1214 MTI_PVID_REPLACE, false); 1215 1216 /* force flow control for non-PHY ports only */ 1217 ksz_port_cfg(dev, port, REG_PORT_CTRL_0, 1218 PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL, 1219 !dev->info->internal_phy[port]); 1220 1221 if (cpu_port) 1222 member = dsa_user_ports(ds); 1223 else 1224 member = BIT(dsa_upstream_port(ds, port)); 1225 1226 ksz9477_cfg_port_member(dev, port, member); 1227 1228 /* clear pending interrupts */ 1229 if (dev->info->internal_phy[port]) 1230 ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16); 1231 1232 ksz9477_port_acl_init(dev, port); 1233 1234 /* clear pending wake flags */ 1235 ksz9477_handle_wake_reason(dev, port); 1236 1237 /* Disable all WoL options by default. Otherwise 1238 * ksz_switch_macaddr_get/put logic will not work properly. 1239 */ 1240 ksz_pwrite8(dev, port, REG_PORT_PME_CTRL, 0); 1241 } 1242 1243 void ksz9477_config_cpu_port(struct dsa_switch *ds) 1244 { 1245 struct ksz_device *dev = ds->priv; 1246 struct ksz_port *p; 1247 int i; 1248 1249 for (i = 0; i < dev->info->port_cnt; i++) { 1250 if (dsa_is_cpu_port(ds, i) && 1251 (dev->info->cpu_ports & (1 << i))) { 1252 phy_interface_t interface; 1253 const char *prev_msg; 1254 const char *prev_mode; 1255 1256 dev->cpu_port = i; 1257 p = &dev->ports[i]; 1258 1259 /* Read from XMII register to determine host port 1260 * interface. If set specifically in device tree 1261 * note the difference to help debugging. 1262 */ 1263 interface = ksz9477_get_interface(dev, i); 1264 if (!p->interface) { 1265 if (dev->compat_interface) { 1266 dev_warn(dev->dev, 1267 "Using legacy switch \"phy-mode\" property, because it is missing on port %d node. " 1268 "Please update your device tree.\n", 1269 i); 1270 p->interface = dev->compat_interface; 1271 } else { 1272 p->interface = interface; 1273 } 1274 } 1275 if (interface && interface != p->interface) { 1276 prev_msg = " instead of "; 1277 prev_mode = phy_modes(interface); 1278 } else { 1279 prev_msg = ""; 1280 prev_mode = ""; 1281 } 1282 dev_info(dev->dev, 1283 "Port%d: using phy mode %s%s%s\n", 1284 i, 1285 phy_modes(p->interface), 1286 prev_msg, 1287 prev_mode); 1288 1289 /* enable cpu port */ 1290 ksz9477_port_setup(dev, i, true); 1291 } 1292 } 1293 1294 for (i = 0; i < dev->info->port_cnt; i++) { 1295 if (i == dev->cpu_port) 1296 continue; 1297 ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED); 1298 } 1299 } 1300 1301 int ksz9477_enable_stp_addr(struct ksz_device *dev) 1302 { 1303 const u32 *masks; 1304 u32 data; 1305 int ret; 1306 1307 masks = dev->info->masks; 1308 1309 /* Enable Reserved multicast table */ 1310 ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_RESV_MCAST_ENABLE, true); 1311 1312 /* Set the Override bit for forwarding BPDU packet to CPU */ 1313 ret = ksz_write32(dev, REG_SW_ALU_VAL_B, 1314 ALU_V_OVERRIDE | BIT(dev->cpu_port)); 1315 if (ret < 0) 1316 return ret; 1317 1318 data = ALU_STAT_START | ALU_RESV_MCAST_ADDR | masks[ALU_STAT_WRITE]; 1319 1320 ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); 1321 if (ret < 0) 1322 return ret; 1323 1324 /* wait to be finished */ 1325 ret = ksz9477_wait_alu_sta_ready(dev); 1326 if (ret < 0) { 1327 dev_err(dev->dev, "Failed to update Reserved Multicast table\n"); 1328 return ret; 1329 } 1330 1331 return 0; 1332 } 1333 1334 int ksz9477_setup(struct dsa_switch *ds) 1335 { 1336 struct ksz_device *dev = ds->priv; 1337 int ret = 0; 1338 1339 ds->mtu_enforcement_ingress = true; 1340 1341 /* Required for port partitioning. */ 1342 ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY, 1343 true); 1344 1345 /* Do not work correctly with tail tagging. */ 1346 ksz_cfg(dev, REG_SW_MAC_CTRL_0, SW_CHECK_LENGTH, false); 1347 1348 /* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */ 1349 ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true); 1350 1351 /* Use collision based back pressure mode. */ 1352 ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_BACK_PRESSURE, 1353 SW_BACK_PRESSURE_COLLISION); 1354 1355 /* Now we can configure default MTU value */ 1356 ret = regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2, REG_SW_MTU_MASK, 1357 VLAN_ETH_FRAME_LEN + ETH_FCS_LEN); 1358 if (ret) 1359 return ret; 1360 1361 /* queue based egress rate limit */ 1362 ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true); 1363 1364 /* enable global MIB counter freeze function */ 1365 ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true); 1366 1367 /* Make sure PME (WoL) is not enabled. If requested, it will be 1368 * enabled by ksz9477_wol_pre_shutdown(). Otherwise, some PMICs do not 1369 * like PME events changes before shutdown. 1370 */ 1371 ksz_write8(dev, REG_SW_PME_CTRL, 0); 1372 1373 return 0; 1374 } 1375 1376 u32 ksz9477_get_port_addr(int port, int offset) 1377 { 1378 return PORT_CTRL_ADDR(port, offset); 1379 } 1380 1381 int ksz9477_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val) 1382 { 1383 val = val >> 8; 1384 1385 return ksz_pwrite16(dev, port, REG_PORT_MTI_CREDIT_INCREMENT, val); 1386 } 1387 1388 /* The KSZ9477 provides following HW features to accelerate 1389 * HSR frames handling: 1390 * 1391 * 1. TX PACKET DUPLICATION FROM HOST TO SWITCH 1392 * 2. RX PACKET DUPLICATION DISCARDING 1393 * 3. PREVENTING PACKET LOOP IN THE RING BY SELF-ADDRESS FILTERING 1394 * 1395 * Only one from point 1. has the NETIF_F* flag available. 1396 * 1397 * Ones from point 2 and 3 are "best effort" - i.e. those will 1398 * work correctly most of the time, but it may happen that some 1399 * frames will not be caught - to be more specific; there is a race 1400 * condition in hardware such that, when duplicate packets are received 1401 * on member ports very close in time to each other, the hardware fails 1402 * to detect that they are duplicates. 1403 * 1404 * Hence, the SW needs to handle those special cases. However, the speed 1405 * up gain is considerable when above features are used. 1406 * 1407 * Moreover, the NETIF_F_HW_HSR_FWD feature is also enabled, as HSR frames 1408 * can be forwarded in the switch fabric between HSR ports. 1409 */ 1410 #define KSZ9477_SUPPORTED_HSR_FEATURES (NETIF_F_HW_HSR_DUP | NETIF_F_HW_HSR_FWD) 1411 1412 void ksz9477_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr) 1413 { 1414 struct ksz_device *dev = ds->priv; 1415 struct net_device *user; 1416 struct dsa_port *hsr_dp; 1417 u8 data, hsr_ports = 0; 1418 1419 /* Program which port(s) shall support HSR */ 1420 ksz_rmw32(dev, REG_HSR_PORT_MAP__4, BIT(port), BIT(port)); 1421 1422 /* Forward frames between HSR ports (i.e. bridge together HSR ports) */ 1423 if (dev->hsr_ports) { 1424 dsa_hsr_foreach_port(hsr_dp, ds, hsr) 1425 hsr_ports |= BIT(hsr_dp->index); 1426 1427 hsr_ports |= BIT(dsa_upstream_port(ds, port)); 1428 dsa_hsr_foreach_port(hsr_dp, ds, hsr) 1429 ksz9477_cfg_port_member(dev, hsr_dp->index, hsr_ports); 1430 } 1431 1432 if (!dev->hsr_ports) { 1433 /* Enable discarding of received HSR frames */ 1434 ksz_read8(dev, REG_HSR_ALU_CTRL_0__1, &data); 1435 data |= HSR_DUPLICATE_DISCARD; 1436 data &= ~HSR_NODE_UNICAST; 1437 ksz_write8(dev, REG_HSR_ALU_CTRL_0__1, data); 1438 } 1439 1440 /* Enable per port self-address filtering. 1441 * The global self-address filtering has already been enabled in the 1442 * ksz9477_reset_switch() function. 1443 */ 1444 ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, PORT_SRC_ADDR_FILTER, true); 1445 1446 /* Setup HW supported features for lan HSR ports */ 1447 user = dsa_to_port(ds, port)->user; 1448 user->features |= KSZ9477_SUPPORTED_HSR_FEATURES; 1449 } 1450 1451 void ksz9477_hsr_leave(struct dsa_switch *ds, int port, struct net_device *hsr) 1452 { 1453 struct ksz_device *dev = ds->priv; 1454 1455 /* Clear port HSR support */ 1456 ksz_rmw32(dev, REG_HSR_PORT_MAP__4, BIT(port), 0); 1457 1458 /* Disable forwarding frames between HSR ports */ 1459 ksz9477_cfg_port_member(dev, port, BIT(dsa_upstream_port(ds, port))); 1460 1461 /* Disable per port self-address filtering */ 1462 ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, PORT_SRC_ADDR_FILTER, false); 1463 } 1464 1465 int ksz9477_switch_init(struct ksz_device *dev) 1466 { 1467 u8 data8; 1468 int ret; 1469 1470 dev->port_mask = (1 << dev->info->port_cnt) - 1; 1471 1472 /* turn off SPI DO Edge select */ 1473 ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8); 1474 if (ret) 1475 return ret; 1476 1477 data8 &= ~SPI_AUTO_EDGE_DETECTION; 1478 ret = ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8); 1479 if (ret) 1480 return ret; 1481 1482 return 0; 1483 } 1484 1485 void ksz9477_switch_exit(struct ksz_device *dev) 1486 { 1487 ksz9477_reset_switch(dev); 1488 } 1489 1490 MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>"); 1491 MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch DSA Driver"); 1492 MODULE_LICENSE("GPL"); 1493