1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Microchip KSZ9477 switch driver main logic 4 * 5 * Copyright (C) 2017-2019 Microchip Technology Inc. 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/iopoll.h> 11 #include <linux/platform_data/microchip-ksz.h> 12 #include <linux/phy.h> 13 #include <linux/if_bridge.h> 14 #include <linux/if_vlan.h> 15 #include <net/dsa.h> 16 #include <net/switchdev.h> 17 18 #include "ksz9477_reg.h" 19 #include "ksz_common.h" 20 #include "ksz9477.h" 21 22 static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set) 23 { 24 regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0); 25 } 26 27 static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits, 28 bool set) 29 { 30 regmap_update_bits(dev->regmap[0], PORT_CTRL_ADDR(port, offset), 31 bits, set ? bits : 0); 32 } 33 34 static void ksz9477_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set) 35 { 36 regmap_update_bits(dev->regmap[2], addr, bits, set ? bits : 0); 37 } 38 39 static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset, 40 u32 bits, bool set) 41 { 42 regmap_update_bits(dev->regmap[2], PORT_CTRL_ADDR(port, offset), 43 bits, set ? bits : 0); 44 } 45 46 int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu) 47 { 48 u16 frame_size, max_frame = 0; 49 int i; 50 51 frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; 52 53 /* Cache the per-port MTU setting */ 54 dev->ports[port].max_frame = frame_size; 55 56 for (i = 0; i < dev->info->port_cnt; i++) 57 max_frame = max(max_frame, dev->ports[i].max_frame); 58 59 return regmap_update_bits(dev->regmap[1], REG_SW_MTU__2, 60 REG_SW_MTU_MASK, max_frame); 61 } 62 63 int ksz9477_max_mtu(struct ksz_device *dev, int port) 64 { 65 return KSZ9477_MAX_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN; 66 } 67 68 static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev) 69 { 70 unsigned int val; 71 72 return regmap_read_poll_timeout(dev->regmap[0], REG_SW_VLAN_CTRL, 73 val, !(val & VLAN_START), 10, 1000); 74 } 75 76 static int ksz9477_get_vlan_table(struct ksz_device *dev, u16 vid, 77 u32 *vlan_table) 78 { 79 int ret; 80 81 mutex_lock(&dev->vlan_mutex); 82 83 ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M); 84 ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_READ | VLAN_START); 85 86 /* wait to be cleared */ 87 ret = ksz9477_wait_vlan_ctrl_ready(dev); 88 if (ret) { 89 dev_dbg(dev->dev, "Failed to read vlan table\n"); 90 goto exit; 91 } 92 93 ksz_read32(dev, REG_SW_VLAN_ENTRY__4, &vlan_table[0]); 94 ksz_read32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, &vlan_table[1]); 95 ksz_read32(dev, REG_SW_VLAN_ENTRY_PORTS__4, &vlan_table[2]); 96 97 ksz_write8(dev, REG_SW_VLAN_CTRL, 0); 98 99 exit: 100 mutex_unlock(&dev->vlan_mutex); 101 102 return ret; 103 } 104 105 static int ksz9477_set_vlan_table(struct ksz_device *dev, u16 vid, 106 u32 *vlan_table) 107 { 108 int ret; 109 110 mutex_lock(&dev->vlan_mutex); 111 112 ksz_write32(dev, REG_SW_VLAN_ENTRY__4, vlan_table[0]); 113 ksz_write32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, vlan_table[1]); 114 ksz_write32(dev, REG_SW_VLAN_ENTRY_PORTS__4, vlan_table[2]); 115 116 ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M); 117 ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_START | VLAN_WRITE); 118 119 /* wait to be cleared */ 120 ret = ksz9477_wait_vlan_ctrl_ready(dev); 121 if (ret) { 122 dev_dbg(dev->dev, "Failed to write vlan table\n"); 123 goto exit; 124 } 125 126 ksz_write8(dev, REG_SW_VLAN_CTRL, 0); 127 128 /* update vlan cache table */ 129 dev->vlan_cache[vid].table[0] = vlan_table[0]; 130 dev->vlan_cache[vid].table[1] = vlan_table[1]; 131 dev->vlan_cache[vid].table[2] = vlan_table[2]; 132 133 exit: 134 mutex_unlock(&dev->vlan_mutex); 135 136 return ret; 137 } 138 139 static void ksz9477_read_table(struct ksz_device *dev, u32 *table) 140 { 141 ksz_read32(dev, REG_SW_ALU_VAL_A, &table[0]); 142 ksz_read32(dev, REG_SW_ALU_VAL_B, &table[1]); 143 ksz_read32(dev, REG_SW_ALU_VAL_C, &table[2]); 144 ksz_read32(dev, REG_SW_ALU_VAL_D, &table[3]); 145 } 146 147 static void ksz9477_write_table(struct ksz_device *dev, u32 *table) 148 { 149 ksz_write32(dev, REG_SW_ALU_VAL_A, table[0]); 150 ksz_write32(dev, REG_SW_ALU_VAL_B, table[1]); 151 ksz_write32(dev, REG_SW_ALU_VAL_C, table[2]); 152 ksz_write32(dev, REG_SW_ALU_VAL_D, table[3]); 153 } 154 155 static int ksz9477_wait_alu_ready(struct ksz_device *dev) 156 { 157 unsigned int val; 158 159 return regmap_read_poll_timeout(dev->regmap[2], REG_SW_ALU_CTRL__4, 160 val, !(val & ALU_START), 10, 1000); 161 } 162 163 static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev) 164 { 165 unsigned int val; 166 167 return regmap_read_poll_timeout(dev->regmap[2], 168 REG_SW_ALU_STAT_CTRL__4, 169 val, !(val & ALU_STAT_START), 170 10, 1000); 171 } 172 173 int ksz9477_reset_switch(struct ksz_device *dev) 174 { 175 u8 data8; 176 u32 data32; 177 178 /* reset switch */ 179 ksz_cfg(dev, REG_SW_OPERATION, SW_RESET, true); 180 181 /* turn off SPI DO Edge select */ 182 regmap_update_bits(dev->regmap[0], REG_SW_GLOBAL_SERIAL_CTRL_0, 183 SPI_AUTO_EDGE_DETECTION, 0); 184 185 /* default configuration */ 186 ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8); 187 data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING | 188 SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE; 189 ksz_write8(dev, REG_SW_LUE_CTRL_1, data8); 190 191 /* disable interrupts */ 192 ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK); 193 ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F); 194 ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32); 195 196 /* KSZ9893 compatible chips do not support refclk configuration */ 197 if (dev->chip_id == KSZ9893_CHIP_ID || 198 dev->chip_id == KSZ8563_CHIP_ID || 199 dev->chip_id == KSZ9563_CHIP_ID) 200 return 0; 201 202 data8 = SW_ENABLE_REFCLKO; 203 if (dev->synclko_disable) 204 data8 = 0; 205 else if (dev->synclko_125) 206 data8 = SW_ENABLE_REFCLKO | SW_REFCLKO_IS_125MHZ; 207 ksz_write8(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1, data8); 208 209 return 0; 210 } 211 212 void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt) 213 { 214 struct ksz_port *p = &dev->ports[port]; 215 unsigned int val; 216 u32 data; 217 int ret; 218 219 /* retain the flush/freeze bit */ 220 data = p->freeze ? MIB_COUNTER_FLUSH_FREEZE : 0; 221 data |= MIB_COUNTER_READ; 222 data |= (addr << MIB_COUNTER_INDEX_S); 223 ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data); 224 225 ret = regmap_read_poll_timeout(dev->regmap[2], 226 PORT_CTRL_ADDR(port, REG_PORT_MIB_CTRL_STAT__4), 227 val, !(val & MIB_COUNTER_READ), 10, 1000); 228 /* failed to read MIB. get out of loop */ 229 if (ret) { 230 dev_dbg(dev->dev, "Failed to get MIB\n"); 231 return; 232 } 233 234 /* count resets upon read */ 235 ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data); 236 *cnt += data; 237 } 238 239 void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, 240 u64 *dropped, u64 *cnt) 241 { 242 addr = dev->info->mib_names[addr].index; 243 ksz9477_r_mib_cnt(dev, port, addr, cnt); 244 } 245 246 void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze) 247 { 248 u32 val = freeze ? MIB_COUNTER_FLUSH_FREEZE : 0; 249 struct ksz_port *p = &dev->ports[port]; 250 251 /* enable/disable the port for flush/freeze function */ 252 mutex_lock(&p->mib.cnt_mutex); 253 ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, val); 254 255 /* used by MIB counter reading code to know freeze is enabled */ 256 p->freeze = freeze; 257 mutex_unlock(&p->mib.cnt_mutex); 258 } 259 260 void ksz9477_port_init_cnt(struct ksz_device *dev, int port) 261 { 262 struct ksz_port_mib *mib = &dev->ports[port].mib; 263 264 /* flush all enabled port MIB counters */ 265 mutex_lock(&mib->cnt_mutex); 266 ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, 267 MIB_COUNTER_FLUSH_FREEZE); 268 ksz_write8(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FLUSH); 269 ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, 0); 270 mutex_unlock(&mib->cnt_mutex); 271 } 272 273 static void ksz9477_r_phy_quirks(struct ksz_device *dev, u16 addr, u16 reg, 274 u16 *data) 275 { 276 /* KSZ8563R do not have extended registers but BMSR_ESTATEN and 277 * BMSR_ERCAP bits are set. 278 */ 279 if (dev->chip_id == KSZ8563_CHIP_ID && reg == MII_BMSR) 280 *data &= ~(BMSR_ESTATEN | BMSR_ERCAP); 281 } 282 283 int ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data) 284 { 285 u16 val = 0xffff; 286 int ret; 287 288 /* No real PHY after this. Simulate the PHY. 289 * A fixed PHY can be setup in the device tree, but this function is 290 * still called for that port during initialization. 291 * For RGMII PHY there is no way to access it so the fixed PHY should 292 * be used. For SGMII PHY the supporting code will be added later. 293 */ 294 if (!dev->info->internal_phy[addr]) { 295 struct ksz_port *p = &dev->ports[addr]; 296 297 switch (reg) { 298 case MII_BMCR: 299 val = 0x1140; 300 break; 301 case MII_BMSR: 302 val = 0x796d; 303 break; 304 case MII_PHYSID1: 305 val = 0x0022; 306 break; 307 case MII_PHYSID2: 308 val = 0x1631; 309 break; 310 case MII_ADVERTISE: 311 val = 0x05e1; 312 break; 313 case MII_LPA: 314 val = 0xc5e1; 315 break; 316 case MII_CTRL1000: 317 val = 0x0700; 318 break; 319 case MII_STAT1000: 320 if (p->phydev.speed == SPEED_1000) 321 val = 0x3800; 322 else 323 val = 0; 324 break; 325 } 326 } else { 327 ret = ksz_pread16(dev, addr, 0x100 + (reg << 1), &val); 328 if (ret) 329 return ret; 330 331 ksz9477_r_phy_quirks(dev, addr, reg, &val); 332 } 333 334 *data = val; 335 336 return 0; 337 } 338 339 int ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val) 340 { 341 /* No real PHY after this. */ 342 if (!dev->info->internal_phy[addr]) 343 return 0; 344 345 return ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val); 346 } 347 348 void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member) 349 { 350 ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member); 351 } 352 353 void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port) 354 { 355 const u16 *regs = dev->info->regs; 356 u8 data; 357 358 regmap_update_bits(dev->regmap[0], REG_SW_LUE_CTRL_2, 359 SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S, 360 SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S); 361 362 if (port < dev->info->port_cnt) { 363 /* flush individual port */ 364 ksz_pread8(dev, port, regs[P_STP_CTRL], &data); 365 if (!(data & PORT_LEARN_DISABLE)) 366 ksz_pwrite8(dev, port, regs[P_STP_CTRL], 367 data | PORT_LEARN_DISABLE); 368 ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true); 369 ksz_pwrite8(dev, port, regs[P_STP_CTRL], data); 370 } else { 371 /* flush all */ 372 ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_STP_TABLE, true); 373 } 374 } 375 376 int ksz9477_port_vlan_filtering(struct ksz_device *dev, int port, 377 bool flag, struct netlink_ext_ack *extack) 378 { 379 if (flag) { 380 ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, 381 PORT_VLAN_LOOKUP_VID_0, true); 382 ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, true); 383 } else { 384 ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, false); 385 ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, 386 PORT_VLAN_LOOKUP_VID_0, false); 387 } 388 389 return 0; 390 } 391 392 int ksz9477_port_vlan_add(struct ksz_device *dev, int port, 393 const struct switchdev_obj_port_vlan *vlan, 394 struct netlink_ext_ack *extack) 395 { 396 u32 vlan_table[3]; 397 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 398 int err; 399 400 err = ksz9477_get_vlan_table(dev, vlan->vid, vlan_table); 401 if (err) { 402 NL_SET_ERR_MSG_MOD(extack, "Failed to get vlan table"); 403 return err; 404 } 405 406 vlan_table[0] = VLAN_VALID | (vlan->vid & VLAN_FID_M); 407 if (untagged) 408 vlan_table[1] |= BIT(port); 409 else 410 vlan_table[1] &= ~BIT(port); 411 vlan_table[1] &= ~(BIT(dev->cpu_port)); 412 413 vlan_table[2] |= BIT(port) | BIT(dev->cpu_port); 414 415 err = ksz9477_set_vlan_table(dev, vlan->vid, vlan_table); 416 if (err) { 417 NL_SET_ERR_MSG_MOD(extack, "Failed to set vlan table"); 418 return err; 419 } 420 421 /* change PVID */ 422 if (vlan->flags & BRIDGE_VLAN_INFO_PVID) 423 ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vlan->vid); 424 425 return 0; 426 } 427 428 int ksz9477_port_vlan_del(struct ksz_device *dev, int port, 429 const struct switchdev_obj_port_vlan *vlan) 430 { 431 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 432 u32 vlan_table[3]; 433 u16 pvid; 434 435 ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &pvid); 436 pvid = pvid & 0xFFF; 437 438 if (ksz9477_get_vlan_table(dev, vlan->vid, vlan_table)) { 439 dev_dbg(dev->dev, "Failed to get vlan table\n"); 440 return -ETIMEDOUT; 441 } 442 443 vlan_table[2] &= ~BIT(port); 444 445 if (pvid == vlan->vid) 446 pvid = 1; 447 448 if (untagged) 449 vlan_table[1] &= ~BIT(port); 450 451 if (ksz9477_set_vlan_table(dev, vlan->vid, vlan_table)) { 452 dev_dbg(dev->dev, "Failed to set vlan table\n"); 453 return -ETIMEDOUT; 454 } 455 456 ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, pvid); 457 458 return 0; 459 } 460 461 int ksz9477_fdb_add(struct ksz_device *dev, int port, 462 const unsigned char *addr, u16 vid, struct dsa_db db) 463 { 464 u32 alu_table[4]; 465 u32 data; 466 int ret = 0; 467 468 mutex_lock(&dev->alu_mutex); 469 470 /* find any entry with mac & vid */ 471 data = vid << ALU_FID_INDEX_S; 472 data |= ((addr[0] << 8) | addr[1]); 473 ksz_write32(dev, REG_SW_ALU_INDEX_0, data); 474 475 data = ((addr[2] << 24) | (addr[3] << 16)); 476 data |= ((addr[4] << 8) | addr[5]); 477 ksz_write32(dev, REG_SW_ALU_INDEX_1, data); 478 479 /* start read operation */ 480 ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START); 481 482 /* wait to be finished */ 483 ret = ksz9477_wait_alu_ready(dev); 484 if (ret) { 485 dev_dbg(dev->dev, "Failed to read ALU\n"); 486 goto exit; 487 } 488 489 /* read ALU entry */ 490 ksz9477_read_table(dev, alu_table); 491 492 /* update ALU entry */ 493 alu_table[0] = ALU_V_STATIC_VALID; 494 alu_table[1] |= BIT(port); 495 if (vid) 496 alu_table[1] |= ALU_V_USE_FID; 497 alu_table[2] = (vid << ALU_V_FID_S); 498 alu_table[2] |= ((addr[0] << 8) | addr[1]); 499 alu_table[3] = ((addr[2] << 24) | (addr[3] << 16)); 500 alu_table[3] |= ((addr[4] << 8) | addr[5]); 501 502 ksz9477_write_table(dev, alu_table); 503 504 ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START); 505 506 /* wait to be finished */ 507 ret = ksz9477_wait_alu_ready(dev); 508 if (ret) 509 dev_dbg(dev->dev, "Failed to write ALU\n"); 510 511 exit: 512 mutex_unlock(&dev->alu_mutex); 513 514 return ret; 515 } 516 517 int ksz9477_fdb_del(struct ksz_device *dev, int port, 518 const unsigned char *addr, u16 vid, struct dsa_db db) 519 { 520 u32 alu_table[4]; 521 u32 data; 522 int ret = 0; 523 524 mutex_lock(&dev->alu_mutex); 525 526 /* read any entry with mac & vid */ 527 data = vid << ALU_FID_INDEX_S; 528 data |= ((addr[0] << 8) | addr[1]); 529 ksz_write32(dev, REG_SW_ALU_INDEX_0, data); 530 531 data = ((addr[2] << 24) | (addr[3] << 16)); 532 data |= ((addr[4] << 8) | addr[5]); 533 ksz_write32(dev, REG_SW_ALU_INDEX_1, data); 534 535 /* start read operation */ 536 ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START); 537 538 /* wait to be finished */ 539 ret = ksz9477_wait_alu_ready(dev); 540 if (ret) { 541 dev_dbg(dev->dev, "Failed to read ALU\n"); 542 goto exit; 543 } 544 545 ksz_read32(dev, REG_SW_ALU_VAL_A, &alu_table[0]); 546 if (alu_table[0] & ALU_V_STATIC_VALID) { 547 ksz_read32(dev, REG_SW_ALU_VAL_B, &alu_table[1]); 548 ksz_read32(dev, REG_SW_ALU_VAL_C, &alu_table[2]); 549 ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]); 550 551 /* clear forwarding port */ 552 alu_table[2] &= ~BIT(port); 553 554 /* if there is no port to forward, clear table */ 555 if ((alu_table[2] & ALU_V_PORT_MAP) == 0) { 556 alu_table[0] = 0; 557 alu_table[1] = 0; 558 alu_table[2] = 0; 559 alu_table[3] = 0; 560 } 561 } else { 562 alu_table[0] = 0; 563 alu_table[1] = 0; 564 alu_table[2] = 0; 565 alu_table[3] = 0; 566 } 567 568 ksz9477_write_table(dev, alu_table); 569 570 ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START); 571 572 /* wait to be finished */ 573 ret = ksz9477_wait_alu_ready(dev); 574 if (ret) 575 dev_dbg(dev->dev, "Failed to write ALU\n"); 576 577 exit: 578 mutex_unlock(&dev->alu_mutex); 579 580 return ret; 581 } 582 583 static void ksz9477_convert_alu(struct alu_struct *alu, u32 *alu_table) 584 { 585 alu->is_static = !!(alu_table[0] & ALU_V_STATIC_VALID); 586 alu->is_src_filter = !!(alu_table[0] & ALU_V_SRC_FILTER); 587 alu->is_dst_filter = !!(alu_table[0] & ALU_V_DST_FILTER); 588 alu->prio_age = (alu_table[0] >> ALU_V_PRIO_AGE_CNT_S) & 589 ALU_V_PRIO_AGE_CNT_M; 590 alu->mstp = alu_table[0] & ALU_V_MSTP_M; 591 592 alu->is_override = !!(alu_table[1] & ALU_V_OVERRIDE); 593 alu->is_use_fid = !!(alu_table[1] & ALU_V_USE_FID); 594 alu->port_forward = alu_table[1] & ALU_V_PORT_MAP; 595 596 alu->fid = (alu_table[2] >> ALU_V_FID_S) & ALU_V_FID_M; 597 598 alu->mac[0] = (alu_table[2] >> 8) & 0xFF; 599 alu->mac[1] = alu_table[2] & 0xFF; 600 alu->mac[2] = (alu_table[3] >> 24) & 0xFF; 601 alu->mac[3] = (alu_table[3] >> 16) & 0xFF; 602 alu->mac[4] = (alu_table[3] >> 8) & 0xFF; 603 alu->mac[5] = alu_table[3] & 0xFF; 604 } 605 606 int ksz9477_fdb_dump(struct ksz_device *dev, int port, 607 dsa_fdb_dump_cb_t *cb, void *data) 608 { 609 int ret = 0; 610 u32 ksz_data; 611 u32 alu_table[4]; 612 struct alu_struct alu; 613 int timeout; 614 615 mutex_lock(&dev->alu_mutex); 616 617 /* start ALU search */ 618 ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_START | ALU_SEARCH); 619 620 do { 621 timeout = 1000; 622 do { 623 ksz_read32(dev, REG_SW_ALU_CTRL__4, &ksz_data); 624 if ((ksz_data & ALU_VALID) || !(ksz_data & ALU_START)) 625 break; 626 usleep_range(1, 10); 627 } while (timeout-- > 0); 628 629 if (!timeout) { 630 dev_dbg(dev->dev, "Failed to search ALU\n"); 631 ret = -ETIMEDOUT; 632 goto exit; 633 } 634 635 if (!(ksz_data & ALU_VALID)) 636 continue; 637 638 /* read ALU table */ 639 ksz9477_read_table(dev, alu_table); 640 641 ksz9477_convert_alu(&alu, alu_table); 642 643 if (alu.port_forward & BIT(port)) { 644 ret = cb(alu.mac, alu.fid, alu.is_static, data); 645 if (ret) 646 goto exit; 647 } 648 } while (ksz_data & ALU_START); 649 650 exit: 651 652 /* stop ALU search */ 653 ksz_write32(dev, REG_SW_ALU_CTRL__4, 0); 654 655 mutex_unlock(&dev->alu_mutex); 656 657 return ret; 658 } 659 660 int ksz9477_mdb_add(struct ksz_device *dev, int port, 661 const struct switchdev_obj_port_mdb *mdb, struct dsa_db db) 662 { 663 u32 static_table[4]; 664 const u8 *shifts; 665 const u32 *masks; 666 u32 data; 667 int index; 668 u32 mac_hi, mac_lo; 669 int err = 0; 670 671 shifts = dev->info->shifts; 672 masks = dev->info->masks; 673 674 mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]); 675 mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16)); 676 mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]); 677 678 mutex_lock(&dev->alu_mutex); 679 680 for (index = 0; index < dev->info->num_statics; index++) { 681 /* find empty slot first */ 682 data = (index << shifts[ALU_STAT_INDEX]) | 683 masks[ALU_STAT_READ] | ALU_STAT_START; 684 ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); 685 686 /* wait to be finished */ 687 err = ksz9477_wait_alu_sta_ready(dev); 688 if (err) { 689 dev_dbg(dev->dev, "Failed to read ALU STATIC\n"); 690 goto exit; 691 } 692 693 /* read ALU static table */ 694 ksz9477_read_table(dev, static_table); 695 696 if (static_table[0] & ALU_V_STATIC_VALID) { 697 /* check this has same vid & mac address */ 698 if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) && 699 ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) && 700 static_table[3] == mac_lo) { 701 /* found matching one */ 702 break; 703 } 704 } else { 705 /* found empty one */ 706 break; 707 } 708 } 709 710 /* no available entry */ 711 if (index == dev->info->num_statics) { 712 err = -ENOSPC; 713 goto exit; 714 } 715 716 /* add entry */ 717 static_table[0] = ALU_V_STATIC_VALID; 718 static_table[1] |= BIT(port); 719 if (mdb->vid) 720 static_table[1] |= ALU_V_USE_FID; 721 static_table[2] = (mdb->vid << ALU_V_FID_S); 722 static_table[2] |= mac_hi; 723 static_table[3] = mac_lo; 724 725 ksz9477_write_table(dev, static_table); 726 727 data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START; 728 ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); 729 730 /* wait to be finished */ 731 if (ksz9477_wait_alu_sta_ready(dev)) 732 dev_dbg(dev->dev, "Failed to read ALU STATIC\n"); 733 734 exit: 735 mutex_unlock(&dev->alu_mutex); 736 return err; 737 } 738 739 int ksz9477_mdb_del(struct ksz_device *dev, int port, 740 const struct switchdev_obj_port_mdb *mdb, struct dsa_db db) 741 { 742 u32 static_table[4]; 743 const u8 *shifts; 744 const u32 *masks; 745 u32 data; 746 int index; 747 int ret = 0; 748 u32 mac_hi, mac_lo; 749 750 shifts = dev->info->shifts; 751 masks = dev->info->masks; 752 753 mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]); 754 mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16)); 755 mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]); 756 757 mutex_lock(&dev->alu_mutex); 758 759 for (index = 0; index < dev->info->num_statics; index++) { 760 /* find empty slot first */ 761 data = (index << shifts[ALU_STAT_INDEX]) | 762 masks[ALU_STAT_READ] | ALU_STAT_START; 763 ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); 764 765 /* wait to be finished */ 766 ret = ksz9477_wait_alu_sta_ready(dev); 767 if (ret) { 768 dev_dbg(dev->dev, "Failed to read ALU STATIC\n"); 769 goto exit; 770 } 771 772 /* read ALU static table */ 773 ksz9477_read_table(dev, static_table); 774 775 if (static_table[0] & ALU_V_STATIC_VALID) { 776 /* check this has same vid & mac address */ 777 778 if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) && 779 ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) && 780 static_table[3] == mac_lo) { 781 /* found matching one */ 782 break; 783 } 784 } 785 } 786 787 /* no available entry */ 788 if (index == dev->info->num_statics) 789 goto exit; 790 791 /* clear port */ 792 static_table[1] &= ~BIT(port); 793 794 if ((static_table[1] & ALU_V_PORT_MAP) == 0) { 795 /* delete entry */ 796 static_table[0] = 0; 797 static_table[1] = 0; 798 static_table[2] = 0; 799 static_table[3] = 0; 800 } 801 802 ksz9477_write_table(dev, static_table); 803 804 data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START; 805 ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); 806 807 /* wait to be finished */ 808 ret = ksz9477_wait_alu_sta_ready(dev); 809 if (ret) 810 dev_dbg(dev->dev, "Failed to read ALU STATIC\n"); 811 812 exit: 813 mutex_unlock(&dev->alu_mutex); 814 815 return ret; 816 } 817 818 int ksz9477_port_mirror_add(struct ksz_device *dev, int port, 819 struct dsa_mall_mirror_tc_entry *mirror, 820 bool ingress, struct netlink_ext_ack *extack) 821 { 822 u8 data; 823 int p; 824 825 /* Limit to one sniffer port 826 * Check if any of the port is already set for sniffing 827 * If yes, instruct the user to remove the previous entry & exit 828 */ 829 for (p = 0; p < dev->info->port_cnt; p++) { 830 /* Skip the current sniffing port */ 831 if (p == mirror->to_local_port) 832 continue; 833 834 ksz_pread8(dev, p, P_MIRROR_CTRL, &data); 835 836 if (data & PORT_MIRROR_SNIFFER) { 837 NL_SET_ERR_MSG_MOD(extack, 838 "Sniffer port is already configured, delete existing rules & retry"); 839 return -EBUSY; 840 } 841 } 842 843 if (ingress) 844 ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true); 845 else 846 ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true); 847 848 /* configure mirror port */ 849 ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL, 850 PORT_MIRROR_SNIFFER, true); 851 852 ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false); 853 854 return 0; 855 } 856 857 void ksz9477_port_mirror_del(struct ksz_device *dev, int port, 858 struct dsa_mall_mirror_tc_entry *mirror) 859 { 860 bool in_use = false; 861 u8 data; 862 int p; 863 864 if (mirror->ingress) 865 ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false); 866 else 867 ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false); 868 869 870 /* Check if any of the port is still referring to sniffer port */ 871 for (p = 0; p < dev->info->port_cnt; p++) { 872 ksz_pread8(dev, p, P_MIRROR_CTRL, &data); 873 874 if ((data & (PORT_MIRROR_RX | PORT_MIRROR_TX))) { 875 in_use = true; 876 break; 877 } 878 } 879 880 /* delete sniffing if there are no other mirroring rules */ 881 if (!in_use) 882 ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL, 883 PORT_MIRROR_SNIFFER, false); 884 } 885 886 static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port) 887 { 888 phy_interface_t interface; 889 bool gbit; 890 891 if (dev->info->internal_phy[port]) 892 return PHY_INTERFACE_MODE_NA; 893 894 gbit = ksz_get_gbit(dev, port); 895 896 interface = ksz_get_xmii(dev, port, gbit); 897 898 return interface; 899 } 900 901 static void ksz9477_port_mmd_write(struct ksz_device *dev, int port, 902 u8 dev_addr, u16 reg_addr, u16 val) 903 { 904 ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_SETUP, 905 MMD_SETUP(PORT_MMD_OP_INDEX, dev_addr)); 906 ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_INDEX_DATA, reg_addr); 907 ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_SETUP, 908 MMD_SETUP(PORT_MMD_OP_DATA_NO_INCR, dev_addr)); 909 ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_INDEX_DATA, val); 910 } 911 912 static void ksz9477_phy_errata_setup(struct ksz_device *dev, int port) 913 { 914 /* Apply PHY settings to address errata listed in 915 * KSZ9477, KSZ9897, KSZ9896, KSZ9567, KSZ8565 916 * Silicon Errata and Data Sheet Clarification documents: 917 * 918 * Register settings are needed to improve PHY receive performance 919 */ 920 ksz9477_port_mmd_write(dev, port, 0x01, 0x6f, 0xdd0b); 921 ksz9477_port_mmd_write(dev, port, 0x01, 0x8f, 0x6032); 922 ksz9477_port_mmd_write(dev, port, 0x01, 0x9d, 0x248c); 923 ksz9477_port_mmd_write(dev, port, 0x01, 0x75, 0x0060); 924 ksz9477_port_mmd_write(dev, port, 0x01, 0xd3, 0x7777); 925 ksz9477_port_mmd_write(dev, port, 0x1c, 0x06, 0x3008); 926 ksz9477_port_mmd_write(dev, port, 0x1c, 0x08, 0x2001); 927 928 /* Transmit waveform amplitude can be improved 929 * (1000BASE-T, 100BASE-TX, 10BASE-Te) 930 */ 931 ksz9477_port_mmd_write(dev, port, 0x1c, 0x04, 0x00d0); 932 933 /* Energy Efficient Ethernet (EEE) feature select must 934 * be manually disabled (except on KSZ8565 which is 100Mbit) 935 */ 936 if (dev->info->gbit_capable[port]) 937 ksz9477_port_mmd_write(dev, port, 0x07, 0x3c, 0x0000); 938 939 /* Register settings are required to meet data sheet 940 * supply current specifications 941 */ 942 ksz9477_port_mmd_write(dev, port, 0x1c, 0x13, 0x6eff); 943 ksz9477_port_mmd_write(dev, port, 0x1c, 0x14, 0xe6ff); 944 ksz9477_port_mmd_write(dev, port, 0x1c, 0x15, 0x6eff); 945 ksz9477_port_mmd_write(dev, port, 0x1c, 0x16, 0xe6ff); 946 ksz9477_port_mmd_write(dev, port, 0x1c, 0x17, 0x00ff); 947 ksz9477_port_mmd_write(dev, port, 0x1c, 0x18, 0x43ff); 948 ksz9477_port_mmd_write(dev, port, 0x1c, 0x19, 0xc3ff); 949 ksz9477_port_mmd_write(dev, port, 0x1c, 0x1a, 0x6fff); 950 ksz9477_port_mmd_write(dev, port, 0x1c, 0x1b, 0x07ff); 951 ksz9477_port_mmd_write(dev, port, 0x1c, 0x1c, 0x0fff); 952 ksz9477_port_mmd_write(dev, port, 0x1c, 0x1d, 0xe7ff); 953 ksz9477_port_mmd_write(dev, port, 0x1c, 0x1e, 0xefff); 954 ksz9477_port_mmd_write(dev, port, 0x1c, 0x20, 0xeeee); 955 } 956 957 void ksz9477_get_caps(struct ksz_device *dev, int port, 958 struct phylink_config *config) 959 { 960 config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE | 961 MAC_SYM_PAUSE; 962 963 if (dev->info->gbit_capable[port]) 964 config->mac_capabilities |= MAC_1000FD; 965 } 966 967 int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs) 968 { 969 u32 secs = msecs / 1000; 970 u8 value; 971 u8 data; 972 int ret; 973 974 value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs); 975 976 ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value); 977 if (ret < 0) 978 return ret; 979 980 data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs); 981 982 ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value); 983 if (ret < 0) 984 return ret; 985 986 value &= ~SW_AGE_CNT_M; 987 value |= FIELD_PREP(SW_AGE_CNT_M, data); 988 989 return ksz_write8(dev, REG_SW_LUE_CTRL_0, value); 990 } 991 992 void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port) 993 { 994 struct dsa_switch *ds = dev->ds; 995 u16 data16; 996 u8 member; 997 998 /* enable tag tail for host port */ 999 if (cpu_port) 1000 ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE, 1001 true); 1002 1003 ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false); 1004 1005 /* set back pressure */ 1006 ksz_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE, true); 1007 1008 /* enable broadcast storm limit */ 1009 ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true); 1010 1011 /* disable DiffServ priority */ 1012 ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_PRIO_ENABLE, false); 1013 1014 /* replace priority */ 1015 ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING, 1016 false); 1017 ksz9477_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4, 1018 MTI_PVID_REPLACE, false); 1019 1020 /* enable 802.1p priority */ 1021 ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true); 1022 1023 if (dev->info->internal_phy[port]) { 1024 /* do not force flow control */ 1025 ksz_port_cfg(dev, port, REG_PORT_CTRL_0, 1026 PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL, 1027 false); 1028 1029 if (dev->info->phy_errata_9477) 1030 ksz9477_phy_errata_setup(dev, port); 1031 } else { 1032 /* force flow control */ 1033 ksz_port_cfg(dev, port, REG_PORT_CTRL_0, 1034 PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL, 1035 true); 1036 } 1037 1038 if (cpu_port) 1039 member = dsa_user_ports(ds); 1040 else 1041 member = BIT(dsa_upstream_port(ds, port)); 1042 1043 ksz9477_cfg_port_member(dev, port, member); 1044 1045 /* clear pending interrupts */ 1046 if (dev->info->internal_phy[port]) 1047 ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16); 1048 } 1049 1050 void ksz9477_config_cpu_port(struct dsa_switch *ds) 1051 { 1052 struct ksz_device *dev = ds->priv; 1053 struct ksz_port *p; 1054 int i; 1055 1056 for (i = 0; i < dev->info->port_cnt; i++) { 1057 if (dsa_is_cpu_port(ds, i) && 1058 (dev->info->cpu_ports & (1 << i))) { 1059 phy_interface_t interface; 1060 const char *prev_msg; 1061 const char *prev_mode; 1062 1063 dev->cpu_port = i; 1064 p = &dev->ports[i]; 1065 1066 /* Read from XMII register to determine host port 1067 * interface. If set specifically in device tree 1068 * note the difference to help debugging. 1069 */ 1070 interface = ksz9477_get_interface(dev, i); 1071 if (!p->interface) { 1072 if (dev->compat_interface) { 1073 dev_warn(dev->dev, 1074 "Using legacy switch \"phy-mode\" property, because it is missing on port %d node. " 1075 "Please update your device tree.\n", 1076 i); 1077 p->interface = dev->compat_interface; 1078 } else { 1079 p->interface = interface; 1080 } 1081 } 1082 if (interface && interface != p->interface) { 1083 prev_msg = " instead of "; 1084 prev_mode = phy_modes(interface); 1085 } else { 1086 prev_msg = ""; 1087 prev_mode = ""; 1088 } 1089 dev_info(dev->dev, 1090 "Port%d: using phy mode %s%s%s\n", 1091 i, 1092 phy_modes(p->interface), 1093 prev_msg, 1094 prev_mode); 1095 1096 /* enable cpu port */ 1097 ksz9477_port_setup(dev, i, true); 1098 } 1099 } 1100 1101 for (i = 0; i < dev->info->port_cnt; i++) { 1102 if (i == dev->cpu_port) 1103 continue; 1104 ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED); 1105 } 1106 } 1107 1108 int ksz9477_enable_stp_addr(struct ksz_device *dev) 1109 { 1110 const u32 *masks; 1111 u32 data; 1112 int ret; 1113 1114 masks = dev->info->masks; 1115 1116 /* Enable Reserved multicast table */ 1117 ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_RESV_MCAST_ENABLE, true); 1118 1119 /* Set the Override bit for forwarding BPDU packet to CPU */ 1120 ret = ksz_write32(dev, REG_SW_ALU_VAL_B, 1121 ALU_V_OVERRIDE | BIT(dev->cpu_port)); 1122 if (ret < 0) 1123 return ret; 1124 1125 data = ALU_STAT_START | ALU_RESV_MCAST_ADDR | masks[ALU_STAT_WRITE]; 1126 1127 ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); 1128 if (ret < 0) 1129 return ret; 1130 1131 /* wait to be finished */ 1132 ret = ksz9477_wait_alu_sta_ready(dev); 1133 if (ret < 0) { 1134 dev_err(dev->dev, "Failed to update Reserved Multicast table\n"); 1135 return ret; 1136 } 1137 1138 return 0; 1139 } 1140 1141 int ksz9477_setup(struct dsa_switch *ds) 1142 { 1143 struct ksz_device *dev = ds->priv; 1144 int ret = 0; 1145 1146 /* Required for port partitioning. */ 1147 ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY, 1148 true); 1149 1150 /* Do not work correctly with tail tagging. */ 1151 ksz_cfg(dev, REG_SW_MAC_CTRL_0, SW_CHECK_LENGTH, false); 1152 1153 /* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */ 1154 ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true); 1155 1156 /* Now we can configure default MTU value */ 1157 ret = regmap_update_bits(dev->regmap[1], REG_SW_MTU__2, REG_SW_MTU_MASK, 1158 VLAN_ETH_FRAME_LEN + ETH_FCS_LEN); 1159 if (ret) 1160 return ret; 1161 1162 /* queue based egress rate limit */ 1163 ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true); 1164 1165 /* enable global MIB counter freeze function */ 1166 ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true); 1167 1168 return 0; 1169 } 1170 1171 u32 ksz9477_get_port_addr(int port, int offset) 1172 { 1173 return PORT_CTRL_ADDR(port, offset); 1174 } 1175 1176 int ksz9477_switch_init(struct ksz_device *dev) 1177 { 1178 u8 data8; 1179 int ret; 1180 1181 dev->port_mask = (1 << dev->info->port_cnt) - 1; 1182 1183 /* turn off SPI DO Edge select */ 1184 ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8); 1185 if (ret) 1186 return ret; 1187 1188 data8 &= ~SPI_AUTO_EDGE_DETECTION; 1189 ret = ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8); 1190 if (ret) 1191 return ret; 1192 1193 return 0; 1194 } 1195 1196 void ksz9477_switch_exit(struct ksz_device *dev) 1197 { 1198 ksz9477_reset_switch(dev); 1199 } 1200 1201 MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>"); 1202 MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch DSA Driver"); 1203 MODULE_LICENSE("GPL"); 1204