1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2015-2017 Broadcom 4 */ 5 6 #include "bcm-phy-lib.h" 7 #include <linux/bitfield.h> 8 #include <linux/brcmphy.h> 9 #include <linux/etherdevice.h> 10 #include <linux/export.h> 11 #include <linux/mdio.h> 12 #include <linux/module.h> 13 #include <linux/phy.h> 14 #include <linux/ethtool.h> 15 #include <linux/ethtool_netlink.h> 16 #include <linux/netdevice.h> 17 18 #define MII_BCM_CHANNEL_WIDTH 0x2000 19 #define BCM_CL45VEN_EEE_ADV 0x3c 20 21 int __bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val) 22 { 23 int rc; 24 25 rc = __phy_write(phydev, MII_BCM54XX_EXP_SEL, reg); 26 if (rc < 0) 27 return rc; 28 29 return __phy_write(phydev, MII_BCM54XX_EXP_DATA, val); 30 } 31 EXPORT_SYMBOL_GPL(__bcm_phy_write_exp); 32 33 int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val) 34 { 35 int rc; 36 37 phy_lock_mdio_bus(phydev); 38 rc = __bcm_phy_write_exp(phydev, reg, val); 39 phy_unlock_mdio_bus(phydev); 40 41 return rc; 42 } 43 EXPORT_SYMBOL_GPL(bcm_phy_write_exp); 44 45 int __bcm_phy_read_exp(struct phy_device *phydev, u16 reg) 46 { 47 int val; 48 49 val = __phy_write(phydev, MII_BCM54XX_EXP_SEL, reg); 50 if (val < 0) 51 return val; 52 53 val = __phy_read(phydev, MII_BCM54XX_EXP_DATA); 54 55 /* Restore default value. It's O.K. if this write fails. */ 56 __phy_write(phydev, MII_BCM54XX_EXP_SEL, 0); 57 58 return val; 59 } 60 EXPORT_SYMBOL_GPL(__bcm_phy_read_exp); 61 62 int bcm_phy_read_exp(struct phy_device *phydev, u16 reg) 63 { 64 int rc; 65 66 phy_lock_mdio_bus(phydev); 67 rc = __bcm_phy_read_exp(phydev, reg); 68 phy_unlock_mdio_bus(phydev); 69 70 return rc; 71 } 72 EXPORT_SYMBOL_GPL(bcm_phy_read_exp); 73 74 int __bcm_phy_modify_exp(struct phy_device *phydev, u16 reg, u16 mask, u16 set) 75 { 76 int new, ret; 77 78 ret = __phy_write(phydev, MII_BCM54XX_EXP_SEL, reg); 79 if (ret < 0) 80 return ret; 81 82 ret = __phy_read(phydev, MII_BCM54XX_EXP_DATA); 83 if (ret < 0) 84 return ret; 85 86 new = (ret & ~mask) | set; 87 if (new == ret) 88 return 0; 89 90 return __phy_write(phydev, MII_BCM54XX_EXP_DATA, new); 91 } 92 EXPORT_SYMBOL_GPL(__bcm_phy_modify_exp); 93 94 int bcm_phy_modify_exp(struct phy_device *phydev, u16 reg, u16 mask, u16 set) 95 { 96 int ret; 97 98 phy_lock_mdio_bus(phydev); 99 ret = __bcm_phy_modify_exp(phydev, reg, mask, set); 100 phy_unlock_mdio_bus(phydev); 101 102 return ret; 103 } 104 EXPORT_SYMBOL_GPL(bcm_phy_modify_exp); 105 106 int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum) 107 { 108 /* The register must be written to both the Shadow Register Select and 109 * the Shadow Read Register Selector 110 */ 111 phy_write(phydev, MII_BCM54XX_AUX_CTL, MII_BCM54XX_AUXCTL_SHDWSEL_MASK | 112 regnum << MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT); 113 return phy_read(phydev, MII_BCM54XX_AUX_CTL); 114 } 115 EXPORT_SYMBOL_GPL(bcm54xx_auxctl_read); 116 117 int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val) 118 { 119 return phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum | val); 120 } 121 EXPORT_SYMBOL(bcm54xx_auxctl_write); 122 123 int bcm_phy_write_misc(struct phy_device *phydev, 124 u16 reg, u16 chl, u16 val) 125 { 126 int rc; 127 int tmp; 128 129 rc = phy_write(phydev, MII_BCM54XX_AUX_CTL, 130 MII_BCM54XX_AUXCTL_SHDWSEL_MISC); 131 if (rc < 0) 132 return rc; 133 134 tmp = phy_read(phydev, MII_BCM54XX_AUX_CTL); 135 tmp |= MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA; 136 rc = phy_write(phydev, MII_BCM54XX_AUX_CTL, tmp); 137 if (rc < 0) 138 return rc; 139 140 tmp = (chl * MII_BCM_CHANNEL_WIDTH) | reg; 141 rc = bcm_phy_write_exp(phydev, tmp, val); 142 143 return rc; 144 } 145 EXPORT_SYMBOL_GPL(bcm_phy_write_misc); 146 147 int bcm_phy_read_misc(struct phy_device *phydev, 148 u16 reg, u16 chl) 149 { 150 int rc; 151 int tmp; 152 153 rc = phy_write(phydev, MII_BCM54XX_AUX_CTL, 154 MII_BCM54XX_AUXCTL_SHDWSEL_MISC); 155 if (rc < 0) 156 return rc; 157 158 tmp = phy_read(phydev, MII_BCM54XX_AUX_CTL); 159 tmp |= MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA; 160 rc = phy_write(phydev, MII_BCM54XX_AUX_CTL, tmp); 161 if (rc < 0) 162 return rc; 163 164 tmp = (chl * MII_BCM_CHANNEL_WIDTH) | reg; 165 rc = bcm_phy_read_exp(phydev, tmp); 166 167 return rc; 168 } 169 EXPORT_SYMBOL_GPL(bcm_phy_read_misc); 170 171 int bcm_phy_ack_intr(struct phy_device *phydev) 172 { 173 int reg; 174 175 /* Clear pending interrupts. */ 176 reg = phy_read(phydev, MII_BCM54XX_ISR); 177 if (reg < 0) 178 return reg; 179 180 return 0; 181 } 182 EXPORT_SYMBOL_GPL(bcm_phy_ack_intr); 183 184 int bcm_phy_config_intr(struct phy_device *phydev) 185 { 186 int reg, err; 187 188 reg = phy_read(phydev, MII_BCM54XX_ECR); 189 if (reg < 0) 190 return reg; 191 192 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { 193 err = bcm_phy_ack_intr(phydev); 194 if (err) 195 return err; 196 197 reg &= ~MII_BCM54XX_ECR_IM; 198 err = phy_write(phydev, MII_BCM54XX_ECR, reg); 199 } else { 200 reg |= MII_BCM54XX_ECR_IM; 201 err = phy_write(phydev, MII_BCM54XX_ECR, reg); 202 if (err) 203 return err; 204 205 err = bcm_phy_ack_intr(phydev); 206 } 207 return err; 208 } 209 EXPORT_SYMBOL_GPL(bcm_phy_config_intr); 210 211 irqreturn_t bcm_phy_handle_interrupt(struct phy_device *phydev) 212 { 213 int irq_status, irq_mask; 214 215 irq_status = phy_read(phydev, MII_BCM54XX_ISR); 216 if (irq_status < 0) { 217 phy_error(phydev); 218 return IRQ_NONE; 219 } 220 221 /* If a bit from the Interrupt Mask register is set, the corresponding 222 * bit from the Interrupt Status register is masked. So read the IMR 223 * and then flip the bits to get the list of possible interrupt 224 * sources. 225 */ 226 irq_mask = phy_read(phydev, MII_BCM54XX_IMR); 227 if (irq_mask < 0) { 228 phy_error(phydev); 229 return IRQ_NONE; 230 } 231 irq_mask = ~irq_mask; 232 233 if (!(irq_status & irq_mask)) 234 return IRQ_NONE; 235 236 phy_trigger_machine(phydev); 237 238 return IRQ_HANDLED; 239 } 240 EXPORT_SYMBOL_GPL(bcm_phy_handle_interrupt); 241 242 int bcm_phy_read_shadow(struct phy_device *phydev, u16 shadow) 243 { 244 phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow)); 245 return MII_BCM54XX_SHD_DATA(phy_read(phydev, MII_BCM54XX_SHD)); 246 } 247 EXPORT_SYMBOL_GPL(bcm_phy_read_shadow); 248 249 int bcm_phy_write_shadow(struct phy_device *phydev, u16 shadow, 250 u16 val) 251 { 252 return phy_write(phydev, MII_BCM54XX_SHD, 253 MII_BCM54XX_SHD_WRITE | 254 MII_BCM54XX_SHD_VAL(shadow) | 255 MII_BCM54XX_SHD_DATA(val)); 256 } 257 EXPORT_SYMBOL_GPL(bcm_phy_write_shadow); 258 259 int __bcm_phy_read_rdb(struct phy_device *phydev, u16 rdb) 260 { 261 int val; 262 263 val = __phy_write(phydev, MII_BCM54XX_RDB_ADDR, rdb); 264 if (val < 0) 265 return val; 266 267 return __phy_read(phydev, MII_BCM54XX_RDB_DATA); 268 } 269 EXPORT_SYMBOL_GPL(__bcm_phy_read_rdb); 270 271 int bcm_phy_read_rdb(struct phy_device *phydev, u16 rdb) 272 { 273 int ret; 274 275 phy_lock_mdio_bus(phydev); 276 ret = __bcm_phy_read_rdb(phydev, rdb); 277 phy_unlock_mdio_bus(phydev); 278 279 return ret; 280 } 281 EXPORT_SYMBOL_GPL(bcm_phy_read_rdb); 282 283 int __bcm_phy_write_rdb(struct phy_device *phydev, u16 rdb, u16 val) 284 { 285 int ret; 286 287 ret = __phy_write(phydev, MII_BCM54XX_RDB_ADDR, rdb); 288 if (ret < 0) 289 return ret; 290 291 return __phy_write(phydev, MII_BCM54XX_RDB_DATA, val); 292 } 293 EXPORT_SYMBOL_GPL(__bcm_phy_write_rdb); 294 295 int bcm_phy_write_rdb(struct phy_device *phydev, u16 rdb, u16 val) 296 { 297 int ret; 298 299 phy_lock_mdio_bus(phydev); 300 ret = __bcm_phy_write_rdb(phydev, rdb, val); 301 phy_unlock_mdio_bus(phydev); 302 303 return ret; 304 } 305 EXPORT_SYMBOL_GPL(bcm_phy_write_rdb); 306 307 int __bcm_phy_modify_rdb(struct phy_device *phydev, u16 rdb, u16 mask, u16 set) 308 { 309 int new, ret; 310 311 ret = __phy_write(phydev, MII_BCM54XX_RDB_ADDR, rdb); 312 if (ret < 0) 313 return ret; 314 315 ret = __phy_read(phydev, MII_BCM54XX_RDB_DATA); 316 if (ret < 0) 317 return ret; 318 319 new = (ret & ~mask) | set; 320 if (new == ret) 321 return 0; 322 323 return __phy_write(phydev, MII_BCM54XX_RDB_DATA, new); 324 } 325 EXPORT_SYMBOL_GPL(__bcm_phy_modify_rdb); 326 327 int bcm_phy_modify_rdb(struct phy_device *phydev, u16 rdb, u16 mask, u16 set) 328 { 329 int ret; 330 331 phy_lock_mdio_bus(phydev); 332 ret = __bcm_phy_modify_rdb(phydev, rdb, mask, set); 333 phy_unlock_mdio_bus(phydev); 334 335 return ret; 336 } 337 EXPORT_SYMBOL_GPL(bcm_phy_modify_rdb); 338 339 int bcm_phy_enable_apd(struct phy_device *phydev, bool dll_pwr_down) 340 { 341 int val; 342 343 if (dll_pwr_down) { 344 val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_SCR3); 345 if (val < 0) 346 return val; 347 348 val |= BCM54XX_SHD_SCR3_DLLAPD_DIS; 349 bcm_phy_write_shadow(phydev, BCM54XX_SHD_SCR3, val); 350 } 351 352 val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_APD); 353 if (val < 0) 354 return val; 355 356 /* Clear APD bits */ 357 val &= BCM_APD_CLR_MASK; 358 359 if (phydev->autoneg == AUTONEG_ENABLE) 360 val |= BCM54XX_SHD_APD_EN; 361 else 362 val |= BCM_NO_ANEG_APD_EN; 363 364 /* Enable energy detect single link pulse for easy wakeup */ 365 val |= BCM_APD_SINGLELP_EN; 366 367 /* Enable Auto Power-Down (APD) for the PHY */ 368 return bcm_phy_write_shadow(phydev, BCM54XX_SHD_APD, val); 369 } 370 EXPORT_SYMBOL_GPL(bcm_phy_enable_apd); 371 372 int bcm_phy_set_eee(struct phy_device *phydev, bool enable) 373 { 374 int val, mask = 0; 375 376 /* Enable EEE at PHY level */ 377 val = phy_read_mmd(phydev, MDIO_MMD_AN, BRCM_CL45VEN_EEE_CONTROL); 378 if (val < 0) 379 return val; 380 381 if (enable) 382 val |= LPI_FEATURE_EN | LPI_FEATURE_EN_DIG1000X; 383 else 384 val &= ~(LPI_FEATURE_EN | LPI_FEATURE_EN_DIG1000X); 385 386 phy_write_mmd(phydev, MDIO_MMD_AN, BRCM_CL45VEN_EEE_CONTROL, (u32)val); 387 388 /* Advertise EEE */ 389 val = phy_read_mmd(phydev, MDIO_MMD_AN, BCM_CL45VEN_EEE_ADV); 390 if (val < 0) 391 return val; 392 393 if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 394 phydev->supported)) 395 mask |= MDIO_EEE_1000T; 396 if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, 397 phydev->supported)) 398 mask |= MDIO_EEE_100TX; 399 400 if (enable) 401 val |= mask; 402 else 403 val &= ~mask; 404 405 phy_write_mmd(phydev, MDIO_MMD_AN, BCM_CL45VEN_EEE_ADV, (u32)val); 406 407 return 0; 408 } 409 EXPORT_SYMBOL_GPL(bcm_phy_set_eee); 410 411 int bcm_phy_downshift_get(struct phy_device *phydev, u8 *count) 412 { 413 int val; 414 415 val = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC); 416 if (val < 0) 417 return val; 418 419 /* Check if wirespeed is enabled or not */ 420 if (!(val & MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN)) { 421 *count = DOWNSHIFT_DEV_DISABLE; 422 return 0; 423 } 424 425 val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_SCR2); 426 if (val < 0) 427 return val; 428 429 /* Downgrade after one link attempt */ 430 if (val & BCM54XX_SHD_SCR2_WSPD_RTRY_DIS) { 431 *count = 1; 432 } else { 433 /* Downgrade after configured retry count */ 434 val >>= BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_SHIFT; 435 val &= BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_MASK; 436 *count = val + BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_OFFSET; 437 } 438 439 return 0; 440 } 441 EXPORT_SYMBOL_GPL(bcm_phy_downshift_get); 442 443 int bcm_phy_downshift_set(struct phy_device *phydev, u8 count) 444 { 445 int val = 0, ret = 0; 446 447 /* Range check the number given */ 448 if (count - BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_OFFSET > 449 BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_MASK && 450 count != DOWNSHIFT_DEV_DEFAULT_COUNT) { 451 return -ERANGE; 452 } 453 454 val = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC); 455 if (val < 0) 456 return val; 457 458 /* Se the write enable bit */ 459 val |= MII_BCM54XX_AUXCTL_MISC_WREN; 460 461 if (count == DOWNSHIFT_DEV_DISABLE) { 462 val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN; 463 return bcm54xx_auxctl_write(phydev, 464 MII_BCM54XX_AUXCTL_SHDWSEL_MISC, 465 val); 466 } else { 467 val |= MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN; 468 ret = bcm54xx_auxctl_write(phydev, 469 MII_BCM54XX_AUXCTL_SHDWSEL_MISC, 470 val); 471 if (ret < 0) 472 return ret; 473 } 474 475 val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_SCR2); 476 val &= ~(BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_MASK << 477 BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_SHIFT | 478 BCM54XX_SHD_SCR2_WSPD_RTRY_DIS); 479 480 switch (count) { 481 case 1: 482 val |= BCM54XX_SHD_SCR2_WSPD_RTRY_DIS; 483 break; 484 case DOWNSHIFT_DEV_DEFAULT_COUNT: 485 val |= 1 << BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_SHIFT; 486 break; 487 default: 488 val |= (count - BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_OFFSET) << 489 BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_SHIFT; 490 break; 491 } 492 493 return bcm_phy_write_shadow(phydev, BCM54XX_SHD_SCR2, val); 494 } 495 EXPORT_SYMBOL_GPL(bcm_phy_downshift_set); 496 497 struct bcm_phy_hw_stat { 498 const char *string; 499 u8 reg; 500 u8 shift; 501 u8 bits; 502 }; 503 504 /* Counters freeze at either 0xffff or 0xff, better than nothing */ 505 static const struct bcm_phy_hw_stat bcm_phy_hw_stats[] = { 506 { "phy_receive_errors", MII_BRCM_CORE_BASE12, 0, 16 }, 507 { "phy_serdes_ber_errors", MII_BRCM_CORE_BASE13, 8, 8 }, 508 { "phy_false_carrier_sense_errors", MII_BRCM_CORE_BASE13, 0, 8 }, 509 { "phy_local_rcvr_nok", MII_BRCM_CORE_BASE14, 8, 8 }, 510 { "phy_remote_rcv_nok", MII_BRCM_CORE_BASE14, 0, 8 }, 511 }; 512 513 int bcm_phy_get_sset_count(struct phy_device *phydev) 514 { 515 return ARRAY_SIZE(bcm_phy_hw_stats); 516 } 517 EXPORT_SYMBOL_GPL(bcm_phy_get_sset_count); 518 519 void bcm_phy_get_strings(struct phy_device *phydev, u8 *data) 520 { 521 unsigned int i; 522 523 for (i = 0; i < ARRAY_SIZE(bcm_phy_hw_stats); i++) 524 strscpy(data + i * ETH_GSTRING_LEN, 525 bcm_phy_hw_stats[i].string, ETH_GSTRING_LEN); 526 } 527 EXPORT_SYMBOL_GPL(bcm_phy_get_strings); 528 529 /* Caller is supposed to provide appropriate storage for the library code to 530 * access the shadow copy 531 */ 532 static u64 bcm_phy_get_stat(struct phy_device *phydev, u64 *shadow, 533 unsigned int i) 534 { 535 struct bcm_phy_hw_stat stat = bcm_phy_hw_stats[i]; 536 int val; 537 u64 ret; 538 539 val = phy_read(phydev, stat.reg); 540 if (val < 0) { 541 ret = U64_MAX; 542 } else { 543 val >>= stat.shift; 544 val = val & ((1 << stat.bits) - 1); 545 shadow[i] += val; 546 ret = shadow[i]; 547 } 548 549 return ret; 550 } 551 552 void bcm_phy_get_stats(struct phy_device *phydev, u64 *shadow, 553 struct ethtool_stats *stats, u64 *data) 554 { 555 unsigned int i; 556 557 for (i = 0; i < ARRAY_SIZE(bcm_phy_hw_stats); i++) 558 data[i] = bcm_phy_get_stat(phydev, shadow, i); 559 } 560 EXPORT_SYMBOL_GPL(bcm_phy_get_stats); 561 562 void bcm_phy_r_rc_cal_reset(struct phy_device *phydev) 563 { 564 /* Reset R_CAL/RC_CAL Engine */ 565 bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010); 566 567 /* Disable Reset R_AL/RC_CAL Engine */ 568 bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000); 569 } 570 EXPORT_SYMBOL_GPL(bcm_phy_r_rc_cal_reset); 571 572 int bcm_phy_28nm_a0b0_afe_config_init(struct phy_device *phydev) 573 { 574 /* Increase VCO range to prevent unlocking problem of PLL at low 575 * temp 576 */ 577 bcm_phy_write_misc(phydev, PLL_PLLCTRL_1, 0x0048); 578 579 /* Change Ki to 011 */ 580 bcm_phy_write_misc(phydev, PLL_PLLCTRL_2, 0x021b); 581 582 /* Disable loading of TVCO buffer to bandgap, set bandgap trim 583 * to 111 584 */ 585 bcm_phy_write_misc(phydev, PLL_PLLCTRL_4, 0x0e20); 586 587 /* Adjust bias current trim by -3 */ 588 bcm_phy_write_misc(phydev, DSP_TAP10, 0x690b); 589 590 /* Switch to CORE_BASE1E */ 591 phy_write(phydev, MII_BRCM_CORE_BASE1E, 0xd); 592 593 bcm_phy_r_rc_cal_reset(phydev); 594 595 /* write AFE_RXCONFIG_0 */ 596 bcm_phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb19); 597 598 /* write AFE_RXCONFIG_1 */ 599 bcm_phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9a3f); 600 601 /* write AFE_RX_LP_COUNTER */ 602 bcm_phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0); 603 604 /* write AFE_HPF_TRIM_OTHERS */ 605 bcm_phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x000b); 606 607 /* write AFTE_TX_CONFIG */ 608 bcm_phy_write_misc(phydev, AFE_TX_CONFIG, 0x0800); 609 610 return 0; 611 } 612 EXPORT_SYMBOL_GPL(bcm_phy_28nm_a0b0_afe_config_init); 613 614 int bcm_phy_enable_jumbo(struct phy_device *phydev) 615 { 616 int ret; 617 618 ret = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL); 619 if (ret < 0) 620 return ret; 621 622 /* Enable extended length packet reception */ 623 ret = bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL, 624 ret | MII_BCM54XX_AUXCTL_ACTL_EXT_PKT_LEN); 625 if (ret < 0) 626 return ret; 627 628 /* Enable the elastic FIFO for raising the transmission limit from 629 * 4.5KB to 10KB, at the expense of an additional 16 ns in propagation 630 * latency. 631 */ 632 return phy_set_bits(phydev, MII_BCM54XX_ECR, MII_BCM54XX_ECR_FIFOE); 633 } 634 EXPORT_SYMBOL_GPL(bcm_phy_enable_jumbo); 635 636 static int __bcm_phy_enable_rdb_access(struct phy_device *phydev) 637 { 638 return __bcm_phy_write_exp(phydev, BCM54XX_EXP_REG7E, 0); 639 } 640 641 static int __bcm_phy_enable_legacy_access(struct phy_device *phydev) 642 { 643 return __bcm_phy_write_rdb(phydev, BCM54XX_RDB_REG0087, 644 BCM54XX_ACCESS_MODE_LEGACY_EN); 645 } 646 647 static int _bcm_phy_cable_test_start(struct phy_device *phydev, bool is_rdb) 648 { 649 u16 mask, set; 650 int ret; 651 652 /* Auto-negotiation must be enabled for cable diagnostics to work, but 653 * don't advertise any capabilities. 654 */ 655 phy_write(phydev, MII_BMCR, BMCR_ANENABLE); 656 phy_write(phydev, MII_ADVERTISE, ADVERTISE_CSMA); 657 phy_write(phydev, MII_CTRL1000, 0); 658 659 phy_lock_mdio_bus(phydev); 660 if (is_rdb) { 661 ret = __bcm_phy_enable_legacy_access(phydev); 662 if (ret) 663 goto out; 664 } 665 666 mask = BCM54XX_ECD_CTRL_CROSS_SHORT_DIS | BCM54XX_ECD_CTRL_UNIT_MASK; 667 set = BCM54XX_ECD_CTRL_RUN | BCM54XX_ECD_CTRL_BREAK_LINK | 668 FIELD_PREP(BCM54XX_ECD_CTRL_UNIT_MASK, 669 BCM54XX_ECD_CTRL_UNIT_CM); 670 671 ret = __bcm_phy_modify_exp(phydev, BCM54XX_EXP_ECD_CTRL, mask, set); 672 673 out: 674 /* re-enable the RDB access even if there was an error */ 675 if (is_rdb) 676 ret = __bcm_phy_enable_rdb_access(phydev) ? : ret; 677 678 phy_unlock_mdio_bus(phydev); 679 680 return ret; 681 } 682 683 static int bcm_phy_cable_test_report_trans(int result) 684 { 685 switch (result) { 686 case BCM54XX_ECD_FAULT_TYPE_OK: 687 return ETHTOOL_A_CABLE_RESULT_CODE_OK; 688 case BCM54XX_ECD_FAULT_TYPE_OPEN: 689 return ETHTOOL_A_CABLE_RESULT_CODE_OPEN; 690 case BCM54XX_ECD_FAULT_TYPE_SAME_SHORT: 691 return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT; 692 case BCM54XX_ECD_FAULT_TYPE_CROSS_SHORT: 693 return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT; 694 case BCM54XX_ECD_FAULT_TYPE_INVALID: 695 case BCM54XX_ECD_FAULT_TYPE_BUSY: 696 default: 697 return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC; 698 } 699 } 700 701 static bool bcm_phy_distance_valid(int result) 702 { 703 switch (result) { 704 case BCM54XX_ECD_FAULT_TYPE_OPEN: 705 case BCM54XX_ECD_FAULT_TYPE_SAME_SHORT: 706 case BCM54XX_ECD_FAULT_TYPE_CROSS_SHORT: 707 return true; 708 } 709 return false; 710 } 711 712 static int bcm_phy_report_length(struct phy_device *phydev, int pair) 713 { 714 int val; 715 716 val = __bcm_phy_read_exp(phydev, 717 BCM54XX_EXP_ECD_PAIR_A_LENGTH_RESULTS + pair); 718 if (val < 0) 719 return val; 720 721 if (val == BCM54XX_ECD_LENGTH_RESULTS_INVALID) 722 return 0; 723 724 ethnl_cable_test_fault_length(phydev, pair, val); 725 726 return 0; 727 } 728 729 static int _bcm_phy_cable_test_get_status(struct phy_device *phydev, 730 bool *finished, bool is_rdb) 731 { 732 int pair_a, pair_b, pair_c, pair_d, ret; 733 734 *finished = false; 735 736 phy_lock_mdio_bus(phydev); 737 738 if (is_rdb) { 739 ret = __bcm_phy_enable_legacy_access(phydev); 740 if (ret) 741 goto out; 742 } 743 744 ret = __bcm_phy_read_exp(phydev, BCM54XX_EXP_ECD_CTRL); 745 if (ret < 0) 746 goto out; 747 748 if (ret & BCM54XX_ECD_CTRL_IN_PROGRESS) { 749 ret = 0; 750 goto out; 751 } 752 753 ret = __bcm_phy_read_exp(phydev, BCM54XX_EXP_ECD_FAULT_TYPE); 754 if (ret < 0) 755 goto out; 756 757 pair_a = FIELD_GET(BCM54XX_ECD_FAULT_TYPE_PAIR_A_MASK, ret); 758 pair_b = FIELD_GET(BCM54XX_ECD_FAULT_TYPE_PAIR_B_MASK, ret); 759 pair_c = FIELD_GET(BCM54XX_ECD_FAULT_TYPE_PAIR_C_MASK, ret); 760 pair_d = FIELD_GET(BCM54XX_ECD_FAULT_TYPE_PAIR_D_MASK, ret); 761 762 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, 763 bcm_phy_cable_test_report_trans(pair_a)); 764 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_B, 765 bcm_phy_cable_test_report_trans(pair_b)); 766 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_C, 767 bcm_phy_cable_test_report_trans(pair_c)); 768 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_D, 769 bcm_phy_cable_test_report_trans(pair_d)); 770 771 if (bcm_phy_distance_valid(pair_a)) 772 bcm_phy_report_length(phydev, 0); 773 if (bcm_phy_distance_valid(pair_b)) 774 bcm_phy_report_length(phydev, 1); 775 if (bcm_phy_distance_valid(pair_c)) 776 bcm_phy_report_length(phydev, 2); 777 if (bcm_phy_distance_valid(pair_d)) 778 bcm_phy_report_length(phydev, 3); 779 780 ret = 0; 781 *finished = true; 782 out: 783 /* re-enable the RDB access even if there was an error */ 784 if (is_rdb) 785 ret = __bcm_phy_enable_rdb_access(phydev) ? : ret; 786 787 phy_unlock_mdio_bus(phydev); 788 789 return ret; 790 } 791 792 int bcm_phy_cable_test_start(struct phy_device *phydev) 793 { 794 return _bcm_phy_cable_test_start(phydev, false); 795 } 796 EXPORT_SYMBOL_GPL(bcm_phy_cable_test_start); 797 798 int bcm_phy_cable_test_get_status(struct phy_device *phydev, bool *finished) 799 { 800 return _bcm_phy_cable_test_get_status(phydev, finished, false); 801 } 802 EXPORT_SYMBOL_GPL(bcm_phy_cable_test_get_status); 803 804 /* We assume that all PHYs which support RDB access can be switched to legacy 805 * mode. If, in the future, this is not true anymore, we have to re-implement 806 * this with RDB access. 807 */ 808 int bcm_phy_cable_test_start_rdb(struct phy_device *phydev) 809 { 810 return _bcm_phy_cable_test_start(phydev, true); 811 } 812 EXPORT_SYMBOL_GPL(bcm_phy_cable_test_start_rdb); 813 814 int bcm_phy_cable_test_get_status_rdb(struct phy_device *phydev, 815 bool *finished) 816 { 817 return _bcm_phy_cable_test_get_status(phydev, finished, true); 818 } 819 EXPORT_SYMBOL_GPL(bcm_phy_cable_test_get_status_rdb); 820 821 #define BCM54XX_WOL_SUPPORTED_MASK (WAKE_UCAST | \ 822 WAKE_MCAST | \ 823 WAKE_BCAST | \ 824 WAKE_MAGIC | \ 825 WAKE_MAGICSECURE) 826 827 int bcm_phy_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) 828 { 829 struct net_device *ndev = phydev->attached_dev; 830 u8 da[ETH_ALEN], mask[ETH_ALEN]; 831 unsigned int i; 832 u16 ctl; 833 int ret; 834 835 /* Allow a MAC driver to play through its own Wake-on-LAN 836 * implementation 837 */ 838 if (wol->wolopts & ~BCM54XX_WOL_SUPPORTED_MASK) 839 return -EOPNOTSUPP; 840 841 /* The PHY supports passwords of 4, 6 and 8 bytes in size, but Linux's 842 * ethtool only supports 6, for now. 843 */ 844 BUILD_BUG_ON(sizeof(wol->sopass) != ETH_ALEN); 845 846 /* Clear previous interrupts */ 847 ret = bcm_phy_read_exp(phydev, BCM54XX_WOL_INT_STATUS); 848 if (ret < 0) 849 return ret; 850 851 ret = bcm_phy_read_exp(phydev, BCM54XX_WOL_MAIN_CTL); 852 if (ret < 0) 853 return ret; 854 855 ctl = ret; 856 857 if (!wol->wolopts) { 858 if (phy_interrupt_is_valid(phydev)) 859 disable_irq_wake(phydev->irq); 860 861 /* Leave all interrupts disabled */ 862 ret = bcm_phy_write_exp(phydev, BCM54XX_WOL_INT_MASK, 863 BCM54XX_WOL_ALL_INTRS); 864 if (ret < 0) 865 return ret; 866 867 /* Disable the global Wake-on-LAN enable bit */ 868 ctl &= ~BCM54XX_WOL_EN; 869 870 return bcm_phy_write_exp(phydev, BCM54XX_WOL_MAIN_CTL, ctl); 871 } 872 873 /* Clear the previously configured mode and mask mode for Wake-on-LAN */ 874 ctl &= ~(BCM54XX_WOL_MODE_MASK << BCM54XX_WOL_MODE_SHIFT); 875 ctl &= ~(BCM54XX_WOL_MASK_MODE_MASK << BCM54XX_WOL_MASK_MODE_SHIFT); 876 ctl &= ~BCM54XX_WOL_DIR_PKT_EN; 877 ctl &= ~(BCM54XX_WOL_SECKEY_OPT_MASK << BCM54XX_WOL_SECKEY_OPT_SHIFT); 878 879 /* When using WAKE_MAGIC, we program the magic pattern filter to match 880 * the device's MAC address and we accept any MAC DA in the Ethernet 881 * frame. 882 * 883 * When using WAKE_UCAST, WAKE_BCAST or WAKE_MCAST, we program the 884 * following: 885 * - WAKE_UCAST -> MAC DA is the device's MAC with a perfect match 886 * - WAKE_MCAST -> MAC DA is X1:XX:XX:XX:XX:XX where XX is don't care 887 * - WAKE_BCAST -> MAC DA is FF:FF:FF:FF:FF:FF with a perfect match 888 * 889 * Note that the Broadcast MAC DA is inherently going to match the 890 * multicast pattern being matched. 891 */ 892 memset(mask, 0, sizeof(mask)); 893 894 if (wol->wolopts & WAKE_MCAST) { 895 memset(da, 0, sizeof(da)); 896 memset(mask, 0xff, sizeof(mask)); 897 da[0] = 0x01; 898 mask[0] = ~da[0]; 899 } else { 900 if (wol->wolopts & WAKE_UCAST) { 901 ether_addr_copy(da, ndev->dev_addr); 902 } else if (wol->wolopts & WAKE_BCAST) { 903 eth_broadcast_addr(da); 904 } else if (wol->wolopts & WAKE_MAGICSECURE) { 905 ether_addr_copy(da, wol->sopass); 906 } else if (wol->wolopts & WAKE_MAGIC) { 907 memset(da, 0, sizeof(da)); 908 memset(mask, 0xff, sizeof(mask)); 909 } 910 } 911 912 for (i = 0; i < ETH_ALEN / 2; i++) { 913 if (wol->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) { 914 ret = bcm_phy_write_exp(phydev, 915 BCM54XX_WOL_MPD_DATA1(2 - i), 916 ndev->dev_addr[i * 2] << 8 | 917 ndev->dev_addr[i * 2 + 1]); 918 if (ret < 0) 919 return ret; 920 } 921 922 ret = bcm_phy_write_exp(phydev, BCM54XX_WOL_MPD_DATA2(2 - i), 923 da[i * 2] << 8 | da[i * 2 + 1]); 924 if (ret < 0) 925 return ret; 926 927 ret = bcm_phy_write_exp(phydev, BCM54XX_WOL_MASK(2 - i), 928 mask[i * 2] << 8 | mask[i * 2 + 1]); 929 if (ret) 930 return ret; 931 } 932 933 if (wol->wolopts & WAKE_MAGICSECURE) { 934 ctl |= BCM54XX_WOL_SECKEY_OPT_6B << 935 BCM54XX_WOL_SECKEY_OPT_SHIFT; 936 ctl |= BCM54XX_WOL_MODE_SINGLE_MPDSEC << BCM54XX_WOL_MODE_SHIFT; 937 ctl |= BCM54XX_WOL_MASK_MODE_DA_FF << 938 BCM54XX_WOL_MASK_MODE_SHIFT; 939 } else { 940 if (wol->wolopts & WAKE_MAGIC) 941 ctl |= BCM54XX_WOL_MODE_SINGLE_MPD; 942 else 943 ctl |= BCM54XX_WOL_DIR_PKT_EN; 944 ctl |= BCM54XX_WOL_MASK_MODE_DA_ONLY << 945 BCM54XX_WOL_MASK_MODE_SHIFT; 946 } 947 948 /* Globally enable Wake-on-LAN */ 949 ctl |= BCM54XX_WOL_EN | BCM54XX_WOL_CRC_CHK; 950 951 ret = bcm_phy_write_exp(phydev, BCM54XX_WOL_MAIN_CTL, ctl); 952 if (ret < 0) 953 return ret; 954 955 /* Enable WOL interrupt on LED4 */ 956 ret = bcm_phy_read_exp(phydev, BCM54XX_TOP_MISC_LED_CTL); 957 if (ret < 0) 958 return ret; 959 960 ret |= BCM54XX_LED4_SEL_INTR; 961 ret = bcm_phy_write_exp(phydev, BCM54XX_TOP_MISC_LED_CTL, ret); 962 if (ret < 0) 963 return ret; 964 965 /* Enable all Wake-on-LAN interrupt sources */ 966 ret = bcm_phy_write_exp(phydev, BCM54XX_WOL_INT_MASK, 0); 967 if (ret < 0) 968 return ret; 969 970 if (phy_interrupt_is_valid(phydev)) 971 enable_irq_wake(phydev->irq); 972 973 return 0; 974 } 975 EXPORT_SYMBOL_GPL(bcm_phy_set_wol); 976 977 void bcm_phy_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) 978 { 979 struct net_device *ndev = phydev->attached_dev; 980 u8 da[ETH_ALEN]; 981 unsigned int i; 982 int ret; 983 u16 ctl; 984 985 wol->supported = BCM54XX_WOL_SUPPORTED_MASK; 986 wol->wolopts = 0; 987 988 ret = bcm_phy_read_exp(phydev, BCM54XX_WOL_MAIN_CTL); 989 if (ret < 0) 990 return; 991 992 ctl = ret; 993 994 if (!(ctl & BCM54XX_WOL_EN)) 995 return; 996 997 for (i = 0; i < sizeof(da) / 2; i++) { 998 ret = bcm_phy_read_exp(phydev, 999 BCM54XX_WOL_MPD_DATA2(2 - i)); 1000 if (ret < 0) 1001 return; 1002 1003 da[i * 2] = ret >> 8; 1004 da[i * 2 + 1] = ret & 0xff; 1005 } 1006 1007 if (ctl & BCM54XX_WOL_DIR_PKT_EN) { 1008 if (is_broadcast_ether_addr(da)) 1009 wol->wolopts |= WAKE_BCAST; 1010 else if (is_multicast_ether_addr(da)) 1011 wol->wolopts |= WAKE_MCAST; 1012 else if (ether_addr_equal(da, ndev->dev_addr)) 1013 wol->wolopts |= WAKE_UCAST; 1014 } else { 1015 ctl = (ctl >> BCM54XX_WOL_MODE_SHIFT) & BCM54XX_WOL_MODE_MASK; 1016 switch (ctl) { 1017 case BCM54XX_WOL_MODE_SINGLE_MPD: 1018 wol->wolopts |= WAKE_MAGIC; 1019 break; 1020 case BCM54XX_WOL_MODE_SINGLE_MPDSEC: 1021 wol->wolopts |= WAKE_MAGICSECURE; 1022 memcpy(wol->sopass, da, sizeof(da)); 1023 break; 1024 default: 1025 break; 1026 } 1027 } 1028 } 1029 EXPORT_SYMBOL_GPL(bcm_phy_get_wol); 1030 1031 irqreturn_t bcm_phy_wol_isr(int irq, void *dev_id) 1032 { 1033 return IRQ_HANDLED; 1034 } 1035 EXPORT_SYMBOL_GPL(bcm_phy_wol_isr); 1036 1037 MODULE_DESCRIPTION("Broadcom PHY Library"); 1038 MODULE_LICENSE("GPL v2"); 1039 MODULE_AUTHOR("Broadcom Corporation"); 1040