1 /* Framework for configuring and reading PHY devices 2 * Based on code in sungem_phy.c and gianfar_phy.c 3 * 4 * Author: Andy Fleming 5 * 6 * Copyright (c) 2004 Freescale Semiconductor, Inc. 7 * Copyright (c) 2006, 2007 Maciej W. Rozycki 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the 11 * Free Software Foundation; either version 2 of the License, or (at your 12 * option) any later version. 13 * 14 */ 15 16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 18 #include <linux/kernel.h> 19 #include <linux/string.h> 20 #include <linux/errno.h> 21 #include <linux/unistd.h> 22 #include <linux/interrupt.h> 23 #include <linux/delay.h> 24 #include <linux/netdevice.h> 25 #include <linux/etherdevice.h> 26 #include <linux/skbuff.h> 27 #include <linux/mm.h> 28 #include <linux/module.h> 29 #include <linux/mii.h> 30 #include <linux/ethtool.h> 31 #include <linux/phy.h> 32 #include <linux/phy_led_triggers.h> 33 #include <linux/timer.h> 34 #include <linux/workqueue.h> 35 #include <linux/mdio.h> 36 #include <linux/io.h> 37 #include <linux/uaccess.h> 38 #include <linux/atomic.h> 39 40 #include <asm/irq.h> 41 42 static const char *phy_speed_to_str(int speed) 43 { 44 switch (speed) { 45 case SPEED_10: 46 return "10Mbps"; 47 case SPEED_100: 48 return "100Mbps"; 49 case SPEED_1000: 50 return "1Gbps"; 51 case SPEED_2500: 52 return "2.5Gbps"; 53 case SPEED_5000: 54 return "5Gbps"; 55 case SPEED_10000: 56 return "10Gbps"; 57 case SPEED_14000: 58 return "14Gbps"; 59 case SPEED_20000: 60 return "20Gbps"; 61 case SPEED_25000: 62 return "25Gbps"; 63 case SPEED_40000: 64 return "40Gbps"; 65 case SPEED_50000: 66 return "50Gbps"; 67 case SPEED_56000: 68 return "56Gbps"; 69 case SPEED_100000: 70 return "100Gbps"; 71 case SPEED_UNKNOWN: 72 return "Unknown"; 73 default: 74 return "Unsupported (update phy.c)"; 75 } 76 } 77 78 #define PHY_STATE_STR(_state) \ 79 case PHY_##_state: \ 80 return __stringify(_state); \ 81 82 static const char *phy_state_to_str(enum phy_state st) 83 { 84 switch (st) { 85 PHY_STATE_STR(DOWN) 86 PHY_STATE_STR(STARTING) 87 PHY_STATE_STR(READY) 88 PHY_STATE_STR(PENDING) 89 PHY_STATE_STR(UP) 90 PHY_STATE_STR(AN) 91 PHY_STATE_STR(RUNNING) 92 PHY_STATE_STR(NOLINK) 93 PHY_STATE_STR(FORCING) 94 PHY_STATE_STR(CHANGELINK) 95 PHY_STATE_STR(HALTED) 96 PHY_STATE_STR(RESUMING) 97 } 98 99 return NULL; 100 } 101 102 103 /** 104 * phy_print_status - Convenience function to print out the current phy status 105 * @phydev: the phy_device struct 106 */ 107 void phy_print_status(struct phy_device *phydev) 108 { 109 if (phydev->link) { 110 netdev_info(phydev->attached_dev, 111 "Link is Up - %s/%s - flow control %s\n", 112 phy_speed_to_str(phydev->speed), 113 DUPLEX_FULL == phydev->duplex ? "Full" : "Half", 114 phydev->pause ? "rx/tx" : "off"); 115 } else { 116 netdev_info(phydev->attached_dev, "Link is Down\n"); 117 } 118 } 119 EXPORT_SYMBOL(phy_print_status); 120 121 /** 122 * phy_clear_interrupt - Ack the phy device's interrupt 123 * @phydev: the phy_device struct 124 * 125 * If the @phydev driver has an ack_interrupt function, call it to 126 * ack and clear the phy device's interrupt. 127 * 128 * Returns 0 on success or < 0 on error. 129 */ 130 static int phy_clear_interrupt(struct phy_device *phydev) 131 { 132 if (phydev->drv->ack_interrupt) 133 return phydev->drv->ack_interrupt(phydev); 134 135 return 0; 136 } 137 138 /** 139 * phy_config_interrupt - configure the PHY device for the requested interrupts 140 * @phydev: the phy_device struct 141 * @interrupts: interrupt flags to configure for this @phydev 142 * 143 * Returns 0 on success or < 0 on error. 144 */ 145 static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts) 146 { 147 phydev->interrupts = interrupts; 148 if (phydev->drv->config_intr) 149 return phydev->drv->config_intr(phydev); 150 151 return 0; 152 } 153 154 /** 155 * phy_restart_aneg - restart auto-negotiation 156 * @phydev: target phy_device struct 157 * 158 * Restart the autonegotiation on @phydev. Returns >= 0 on success or 159 * negative errno on error. 160 */ 161 int phy_restart_aneg(struct phy_device *phydev) 162 { 163 int ret; 164 165 if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0))) 166 ret = genphy_c45_restart_aneg(phydev); 167 else 168 ret = genphy_restart_aneg(phydev); 169 170 return ret; 171 } 172 EXPORT_SYMBOL_GPL(phy_restart_aneg); 173 174 /** 175 * phy_aneg_done - return auto-negotiation status 176 * @phydev: target phy_device struct 177 * 178 * Description: Return the auto-negotiation status from this @phydev 179 * Returns > 0 on success or < 0 on error. 0 means that auto-negotiation 180 * is still pending. 181 */ 182 int phy_aneg_done(struct phy_device *phydev) 183 { 184 if (phydev->drv && phydev->drv->aneg_done) 185 return phydev->drv->aneg_done(phydev); 186 187 /* Avoid genphy_aneg_done() if the Clause 45 PHY does not 188 * implement Clause 22 registers 189 */ 190 if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0))) 191 return -EINVAL; 192 193 return genphy_aneg_done(phydev); 194 } 195 EXPORT_SYMBOL(phy_aneg_done); 196 197 /* A structure for mapping a particular speed and duplex 198 * combination to a particular SUPPORTED and ADVERTISED value 199 */ 200 struct phy_setting { 201 int speed; 202 int duplex; 203 u32 setting; 204 }; 205 206 /* A mapping of all SUPPORTED settings to speed/duplex. This table 207 * must be grouped by speed and sorted in descending match priority 208 * - iow, descending speed. */ 209 static const struct phy_setting settings[] = { 210 { 211 .speed = SPEED_10000, 212 .duplex = DUPLEX_FULL, 213 .setting = SUPPORTED_10000baseKR_Full, 214 }, 215 { 216 .speed = SPEED_10000, 217 .duplex = DUPLEX_FULL, 218 .setting = SUPPORTED_10000baseKX4_Full, 219 }, 220 { 221 .speed = SPEED_10000, 222 .duplex = DUPLEX_FULL, 223 .setting = SUPPORTED_10000baseT_Full, 224 }, 225 { 226 .speed = SPEED_2500, 227 .duplex = DUPLEX_FULL, 228 .setting = SUPPORTED_2500baseX_Full, 229 }, 230 { 231 .speed = SPEED_1000, 232 .duplex = DUPLEX_FULL, 233 .setting = SUPPORTED_1000baseKX_Full, 234 }, 235 { 236 .speed = SPEED_1000, 237 .duplex = DUPLEX_FULL, 238 .setting = SUPPORTED_1000baseT_Full, 239 }, 240 { 241 .speed = SPEED_1000, 242 .duplex = DUPLEX_HALF, 243 .setting = SUPPORTED_1000baseT_Half, 244 }, 245 { 246 .speed = SPEED_100, 247 .duplex = DUPLEX_FULL, 248 .setting = SUPPORTED_100baseT_Full, 249 }, 250 { 251 .speed = SPEED_100, 252 .duplex = DUPLEX_HALF, 253 .setting = SUPPORTED_100baseT_Half, 254 }, 255 { 256 .speed = SPEED_10, 257 .duplex = DUPLEX_FULL, 258 .setting = SUPPORTED_10baseT_Full, 259 }, 260 { 261 .speed = SPEED_10, 262 .duplex = DUPLEX_HALF, 263 .setting = SUPPORTED_10baseT_Half, 264 }, 265 }; 266 267 /** 268 * phy_lookup_setting - lookup a PHY setting 269 * @speed: speed to match 270 * @duplex: duplex to match 271 * @features: allowed link modes 272 * @exact: an exact match is required 273 * 274 * Search the settings array for a setting that matches the speed and 275 * duplex, and which is supported. 276 * 277 * If @exact is unset, either an exact match or %NULL for no match will 278 * be returned. 279 * 280 * If @exact is set, an exact match, the fastest supported setting at 281 * or below the specified speed, the slowest supported setting, or if 282 * they all fail, %NULL will be returned. 283 */ 284 static const struct phy_setting * 285 phy_lookup_setting(int speed, int duplex, u32 features, bool exact) 286 { 287 const struct phy_setting *p, *match = NULL, *last = NULL; 288 int i; 289 290 for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) { 291 if (p->setting & features) { 292 last = p; 293 if (p->speed == speed && p->duplex == duplex) { 294 /* Exact match for speed and duplex */ 295 match = p; 296 break; 297 } else if (!exact) { 298 if (!match && p->speed <= speed) 299 /* Candidate */ 300 match = p; 301 302 if (p->speed < speed) 303 break; 304 } 305 } 306 } 307 308 if (!match && !exact) 309 match = last; 310 311 return match; 312 } 313 314 /** 315 * phy_find_valid - find a PHY setting that matches the requested parameters 316 * @speed: desired speed 317 * @duplex: desired duplex 318 * @supported: mask of supported link modes 319 * 320 * Locate a supported phy setting that is, in priority order: 321 * - an exact match for the specified speed and duplex mode 322 * - a match for the specified speed, or slower speed 323 * - the slowest supported speed 324 * Returns the matched phy_setting entry, or %NULL if no supported phy 325 * settings were found. 326 */ 327 static const struct phy_setting * 328 phy_find_valid(int speed, int duplex, u32 supported) 329 { 330 return phy_lookup_setting(speed, duplex, supported, false); 331 } 332 333 /** 334 * phy_supported_speeds - return all speeds currently supported by a phy device 335 * @phy: The phy device to return supported speeds of. 336 * @speeds: buffer to store supported speeds in. 337 * @size: size of speeds buffer. 338 * 339 * Description: Returns the number of supported speeds, and fills the speeds 340 * buffer with the supported speeds. If speeds buffer is too small to contain 341 * all currently supported speeds, will return as many speeds as can fit. 342 */ 343 unsigned int phy_supported_speeds(struct phy_device *phy, 344 unsigned int *speeds, 345 unsigned int size) 346 { 347 unsigned int count = 0; 348 unsigned int idx = 0; 349 350 for (idx = 0; idx < ARRAY_SIZE(settings) && count < size; idx++) 351 /* Assumes settings are grouped by speed */ 352 if ((settings[idx].setting & phy->supported) && 353 (count == 0 || speeds[count - 1] != settings[idx].speed)) 354 speeds[count++] = settings[idx].speed; 355 356 return count; 357 } 358 359 /** 360 * phy_check_valid - check if there is a valid PHY setting which matches 361 * speed, duplex, and feature mask 362 * @speed: speed to match 363 * @duplex: duplex to match 364 * @features: A mask of the valid settings 365 * 366 * Description: Returns true if there is a valid setting, false otherwise. 367 */ 368 static inline bool phy_check_valid(int speed, int duplex, u32 features) 369 { 370 return !!phy_lookup_setting(speed, duplex, features, true); 371 } 372 373 /** 374 * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex 375 * @phydev: the target phy_device struct 376 * 377 * Description: Make sure the PHY is set to supported speeds and 378 * duplexes. Drop down by one in this order: 1000/FULL, 379 * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF. 380 */ 381 static void phy_sanitize_settings(struct phy_device *phydev) 382 { 383 const struct phy_setting *setting; 384 u32 features = phydev->supported; 385 386 /* Sanitize settings based on PHY capabilities */ 387 if ((features & SUPPORTED_Autoneg) == 0) 388 phydev->autoneg = AUTONEG_DISABLE; 389 390 setting = phy_find_valid(phydev->speed, phydev->duplex, features); 391 if (setting) { 392 phydev->speed = setting->speed; 393 phydev->duplex = setting->duplex; 394 } else { 395 /* We failed to find anything (no supported speeds?) */ 396 phydev->speed = SPEED_UNKNOWN; 397 phydev->duplex = DUPLEX_UNKNOWN; 398 } 399 } 400 401 /** 402 * phy_ethtool_sset - generic ethtool sset function, handles all the details 403 * @phydev: target phy_device struct 404 * @cmd: ethtool_cmd 405 * 406 * A few notes about parameter checking: 407 * - We don't set port or transceiver, so we don't care what they 408 * were set to. 409 * - phy_start_aneg() will make sure forced settings are sane, and 410 * choose the next best ones from the ones selected, so we don't 411 * care if ethtool tries to give us bad values. 412 */ 413 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd) 414 { 415 u32 speed = ethtool_cmd_speed(cmd); 416 417 if (cmd->phy_address != phydev->mdio.addr) 418 return -EINVAL; 419 420 /* We make sure that we don't pass unsupported values in to the PHY */ 421 cmd->advertising &= phydev->supported; 422 423 /* Verify the settings we care about. */ 424 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE) 425 return -EINVAL; 426 427 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0) 428 return -EINVAL; 429 430 if (cmd->autoneg == AUTONEG_DISABLE && 431 ((speed != SPEED_1000 && 432 speed != SPEED_100 && 433 speed != SPEED_10) || 434 (cmd->duplex != DUPLEX_HALF && 435 cmd->duplex != DUPLEX_FULL))) 436 return -EINVAL; 437 438 phydev->autoneg = cmd->autoneg; 439 440 phydev->speed = speed; 441 442 phydev->advertising = cmd->advertising; 443 444 if (AUTONEG_ENABLE == cmd->autoneg) 445 phydev->advertising |= ADVERTISED_Autoneg; 446 else 447 phydev->advertising &= ~ADVERTISED_Autoneg; 448 449 phydev->duplex = cmd->duplex; 450 451 phydev->mdix_ctrl = cmd->eth_tp_mdix_ctrl; 452 453 /* Restart the PHY */ 454 phy_start_aneg(phydev); 455 456 return 0; 457 } 458 EXPORT_SYMBOL(phy_ethtool_sset); 459 460 int phy_ethtool_ksettings_set(struct phy_device *phydev, 461 const struct ethtool_link_ksettings *cmd) 462 { 463 u8 autoneg = cmd->base.autoneg; 464 u8 duplex = cmd->base.duplex; 465 u32 speed = cmd->base.speed; 466 u32 advertising; 467 468 if (cmd->base.phy_address != phydev->mdio.addr) 469 return -EINVAL; 470 471 ethtool_convert_link_mode_to_legacy_u32(&advertising, 472 cmd->link_modes.advertising); 473 474 /* We make sure that we don't pass unsupported values in to the PHY */ 475 advertising &= phydev->supported; 476 477 /* Verify the settings we care about. */ 478 if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE) 479 return -EINVAL; 480 481 if (autoneg == AUTONEG_ENABLE && advertising == 0) 482 return -EINVAL; 483 484 if (autoneg == AUTONEG_DISABLE && 485 ((speed != SPEED_1000 && 486 speed != SPEED_100 && 487 speed != SPEED_10) || 488 (duplex != DUPLEX_HALF && 489 duplex != DUPLEX_FULL))) 490 return -EINVAL; 491 492 phydev->autoneg = autoneg; 493 494 phydev->speed = speed; 495 496 phydev->advertising = advertising; 497 498 if (autoneg == AUTONEG_ENABLE) 499 phydev->advertising |= ADVERTISED_Autoneg; 500 else 501 phydev->advertising &= ~ADVERTISED_Autoneg; 502 503 phydev->duplex = duplex; 504 505 phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl; 506 507 /* Restart the PHY */ 508 phy_start_aneg(phydev); 509 510 return 0; 511 } 512 EXPORT_SYMBOL(phy_ethtool_ksettings_set); 513 514 void phy_ethtool_ksettings_get(struct phy_device *phydev, 515 struct ethtool_link_ksettings *cmd) 516 { 517 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 518 phydev->supported); 519 520 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 521 phydev->advertising); 522 523 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, 524 phydev->lp_advertising); 525 526 cmd->base.speed = phydev->speed; 527 cmd->base.duplex = phydev->duplex; 528 if (phydev->interface == PHY_INTERFACE_MODE_MOCA) 529 cmd->base.port = PORT_BNC; 530 else 531 cmd->base.port = PORT_MII; 532 533 cmd->base.phy_address = phydev->mdio.addr; 534 cmd->base.autoneg = phydev->autoneg; 535 cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl; 536 cmd->base.eth_tp_mdix = phydev->mdix; 537 } 538 EXPORT_SYMBOL(phy_ethtool_ksettings_get); 539 540 /** 541 * phy_mii_ioctl - generic PHY MII ioctl interface 542 * @phydev: the phy_device struct 543 * @ifr: &struct ifreq for socket ioctl's 544 * @cmd: ioctl cmd to execute 545 * 546 * Note that this function is currently incompatible with the 547 * PHYCONTROL layer. It changes registers without regard to 548 * current state. Use at own risk. 549 */ 550 int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) 551 { 552 struct mii_ioctl_data *mii_data = if_mii(ifr); 553 u16 val = mii_data->val_in; 554 bool change_autoneg = false; 555 556 switch (cmd) { 557 case SIOCGMIIPHY: 558 mii_data->phy_id = phydev->mdio.addr; 559 /* fall through */ 560 561 case SIOCGMIIREG: 562 mii_data->val_out = mdiobus_read(phydev->mdio.bus, 563 mii_data->phy_id, 564 mii_data->reg_num); 565 return 0; 566 567 case SIOCSMIIREG: 568 if (mii_data->phy_id == phydev->mdio.addr) { 569 switch (mii_data->reg_num) { 570 case MII_BMCR: 571 if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) { 572 if (phydev->autoneg == AUTONEG_ENABLE) 573 change_autoneg = true; 574 phydev->autoneg = AUTONEG_DISABLE; 575 if (val & BMCR_FULLDPLX) 576 phydev->duplex = DUPLEX_FULL; 577 else 578 phydev->duplex = DUPLEX_HALF; 579 if (val & BMCR_SPEED1000) 580 phydev->speed = SPEED_1000; 581 else if (val & BMCR_SPEED100) 582 phydev->speed = SPEED_100; 583 else phydev->speed = SPEED_10; 584 } 585 else { 586 if (phydev->autoneg == AUTONEG_DISABLE) 587 change_autoneg = true; 588 phydev->autoneg = AUTONEG_ENABLE; 589 } 590 break; 591 case MII_ADVERTISE: 592 phydev->advertising = mii_adv_to_ethtool_adv_t(val); 593 change_autoneg = true; 594 break; 595 default: 596 /* do nothing */ 597 break; 598 } 599 } 600 601 mdiobus_write(phydev->mdio.bus, mii_data->phy_id, 602 mii_data->reg_num, val); 603 604 if (mii_data->phy_id == phydev->mdio.addr && 605 mii_data->reg_num == MII_BMCR && 606 val & BMCR_RESET) 607 return phy_init_hw(phydev); 608 609 if (change_autoneg) 610 return phy_start_aneg(phydev); 611 612 return 0; 613 614 case SIOCSHWTSTAMP: 615 if (phydev->drv && phydev->drv->hwtstamp) 616 return phydev->drv->hwtstamp(phydev, ifr); 617 /* fall through */ 618 619 default: 620 return -EOPNOTSUPP; 621 } 622 } 623 EXPORT_SYMBOL(phy_mii_ioctl); 624 625 /** 626 * phy_start_aneg_priv - start auto-negotiation for this PHY device 627 * @phydev: the phy_device struct 628 * @sync: indicate whether we should wait for the workqueue cancelation 629 * 630 * Description: Sanitizes the settings (if we're not autonegotiating 631 * them), and then calls the driver's config_aneg function. 632 * If the PHYCONTROL Layer is operating, we change the state to 633 * reflect the beginning of Auto-negotiation or forcing. 634 */ 635 static int phy_start_aneg_priv(struct phy_device *phydev, bool sync) 636 { 637 bool trigger = 0; 638 int err; 639 640 if (!phydev->drv) 641 return -EIO; 642 643 mutex_lock(&phydev->lock); 644 645 if (AUTONEG_DISABLE == phydev->autoneg) 646 phy_sanitize_settings(phydev); 647 648 /* Invalidate LP advertising flags */ 649 phydev->lp_advertising = 0; 650 651 err = phydev->drv->config_aneg(phydev); 652 if (err < 0) 653 goto out_unlock; 654 655 if (phydev->state != PHY_HALTED) { 656 if (AUTONEG_ENABLE == phydev->autoneg) { 657 phydev->state = PHY_AN; 658 phydev->link_timeout = PHY_AN_TIMEOUT; 659 } else { 660 phydev->state = PHY_FORCING; 661 phydev->link_timeout = PHY_FORCE_TIMEOUT; 662 } 663 } 664 665 /* Re-schedule a PHY state machine to check PHY status because 666 * negotiation may already be done and aneg interrupt may not be 667 * generated. 668 */ 669 if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) { 670 err = phy_aneg_done(phydev); 671 if (err > 0) { 672 trigger = true; 673 err = 0; 674 } 675 } 676 677 out_unlock: 678 mutex_unlock(&phydev->lock); 679 680 if (trigger) 681 phy_trigger_machine(phydev, sync); 682 683 return err; 684 } 685 686 /** 687 * phy_start_aneg - start auto-negotiation for this PHY device 688 * @phydev: the phy_device struct 689 * 690 * Description: Sanitizes the settings (if we're not autonegotiating 691 * them), and then calls the driver's config_aneg function. 692 * If the PHYCONTROL Layer is operating, we change the state to 693 * reflect the beginning of Auto-negotiation or forcing. 694 */ 695 int phy_start_aneg(struct phy_device *phydev) 696 { 697 return phy_start_aneg_priv(phydev, true); 698 } 699 EXPORT_SYMBOL(phy_start_aneg); 700 701 /** 702 * phy_start_machine - start PHY state machine tracking 703 * @phydev: the phy_device struct 704 * 705 * Description: The PHY infrastructure can run a state machine 706 * which tracks whether the PHY is starting up, negotiating, 707 * etc. This function starts the timer which tracks the state 708 * of the PHY. If you want to maintain your own state machine, 709 * do not call this function. 710 */ 711 void phy_start_machine(struct phy_device *phydev) 712 { 713 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ); 714 } 715 716 /** 717 * phy_trigger_machine - trigger the state machine to run 718 * 719 * @phydev: the phy_device struct 720 * @sync: indicate whether we should wait for the workqueue cancelation 721 * 722 * Description: There has been a change in state which requires that the 723 * state machine runs. 724 */ 725 726 void phy_trigger_machine(struct phy_device *phydev, bool sync) 727 { 728 if (sync) 729 cancel_delayed_work_sync(&phydev->state_queue); 730 else 731 cancel_delayed_work(&phydev->state_queue); 732 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0); 733 } 734 735 /** 736 * phy_stop_machine - stop the PHY state machine tracking 737 * @phydev: target phy_device struct 738 * 739 * Description: Stops the state machine timer, sets the state to UP 740 * (unless it wasn't up yet). This function must be called BEFORE 741 * phy_detach. 742 */ 743 void phy_stop_machine(struct phy_device *phydev) 744 { 745 cancel_delayed_work_sync(&phydev->state_queue); 746 747 mutex_lock(&phydev->lock); 748 if (phydev->state > PHY_UP && phydev->state != PHY_HALTED) 749 phydev->state = PHY_UP; 750 mutex_unlock(&phydev->lock); 751 } 752 753 /** 754 * phy_error - enter HALTED state for this PHY device 755 * @phydev: target phy_device struct 756 * 757 * Moves the PHY to the HALTED state in response to a read 758 * or write error, and tells the controller the link is down. 759 * Must not be called from interrupt context, or while the 760 * phydev->lock is held. 761 */ 762 static void phy_error(struct phy_device *phydev) 763 { 764 mutex_lock(&phydev->lock); 765 phydev->state = PHY_HALTED; 766 mutex_unlock(&phydev->lock); 767 768 phy_trigger_machine(phydev, false); 769 } 770 771 /** 772 * phy_interrupt - PHY interrupt handler 773 * @irq: interrupt line 774 * @phy_dat: phy_device pointer 775 * 776 * Description: When a PHY interrupt occurs, the handler disables 777 * interrupts, and uses phy_change to handle the interrupt. 778 */ 779 static irqreturn_t phy_interrupt(int irq, void *phy_dat) 780 { 781 struct phy_device *phydev = phy_dat; 782 783 if (PHY_HALTED == phydev->state) 784 return IRQ_NONE; /* It can't be ours. */ 785 786 disable_irq_nosync(irq); 787 atomic_inc(&phydev->irq_disable); 788 789 phy_change(phydev); 790 791 return IRQ_HANDLED; 792 } 793 794 /** 795 * phy_enable_interrupts - Enable the interrupts from the PHY side 796 * @phydev: target phy_device struct 797 */ 798 static int phy_enable_interrupts(struct phy_device *phydev) 799 { 800 int err = phy_clear_interrupt(phydev); 801 802 if (err < 0) 803 return err; 804 805 return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); 806 } 807 808 /** 809 * phy_disable_interrupts - Disable the PHY interrupts from the PHY side 810 * @phydev: target phy_device struct 811 */ 812 static int phy_disable_interrupts(struct phy_device *phydev) 813 { 814 int err; 815 816 /* Disable PHY interrupts */ 817 err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); 818 if (err) 819 goto phy_err; 820 821 /* Clear the interrupt */ 822 err = phy_clear_interrupt(phydev); 823 if (err) 824 goto phy_err; 825 826 return 0; 827 828 phy_err: 829 phy_error(phydev); 830 831 return err; 832 } 833 834 /** 835 * phy_start_interrupts - request and enable interrupts for a PHY device 836 * @phydev: target phy_device struct 837 * 838 * Description: Request the interrupt for the given PHY. 839 * If this fails, then we set irq to PHY_POLL. 840 * Otherwise, we enable the interrupts in the PHY. 841 * This should only be called with a valid IRQ number. 842 * Returns 0 on success or < 0 on error. 843 */ 844 int phy_start_interrupts(struct phy_device *phydev) 845 { 846 atomic_set(&phydev->irq_disable, 0); 847 if (request_threaded_irq(phydev->irq, NULL, phy_interrupt, 848 IRQF_ONESHOT | IRQF_SHARED, 849 phydev_name(phydev), phydev) < 0) { 850 pr_warn("%s: Can't get IRQ %d (PHY)\n", 851 phydev->mdio.bus->name, phydev->irq); 852 phydev->irq = PHY_POLL; 853 return 0; 854 } 855 856 return phy_enable_interrupts(phydev); 857 } 858 EXPORT_SYMBOL(phy_start_interrupts); 859 860 /** 861 * phy_stop_interrupts - disable interrupts from a PHY device 862 * @phydev: target phy_device struct 863 */ 864 int phy_stop_interrupts(struct phy_device *phydev) 865 { 866 int err = phy_disable_interrupts(phydev); 867 868 if (err) 869 phy_error(phydev); 870 871 free_irq(phydev->irq, phydev); 872 873 /* If work indeed has been cancelled, disable_irq() will have 874 * been left unbalanced from phy_interrupt() and enable_irq() 875 * has to be called so that other devices on the line work. 876 */ 877 while (atomic_dec_return(&phydev->irq_disable) >= 0) 878 enable_irq(phydev->irq); 879 880 return err; 881 } 882 EXPORT_SYMBOL(phy_stop_interrupts); 883 884 /** 885 * phy_change - Called by the phy_interrupt to handle PHY changes 886 * @phydev: phy_device struct that interrupted 887 */ 888 void phy_change(struct phy_device *phydev) 889 { 890 if (phy_interrupt_is_valid(phydev)) { 891 if (phydev->drv->did_interrupt && 892 !phydev->drv->did_interrupt(phydev)) 893 goto ignore; 894 895 if (phy_disable_interrupts(phydev)) 896 goto phy_err; 897 } 898 899 mutex_lock(&phydev->lock); 900 if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) 901 phydev->state = PHY_CHANGELINK; 902 mutex_unlock(&phydev->lock); 903 904 if (phy_interrupt_is_valid(phydev)) { 905 atomic_dec(&phydev->irq_disable); 906 enable_irq(phydev->irq); 907 908 /* Reenable interrupts */ 909 if (PHY_HALTED != phydev->state && 910 phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED)) 911 goto irq_enable_err; 912 } 913 914 /* reschedule state queue work to run as soon as possible */ 915 phy_trigger_machine(phydev, true); 916 return; 917 918 ignore: 919 atomic_dec(&phydev->irq_disable); 920 enable_irq(phydev->irq); 921 return; 922 923 irq_enable_err: 924 disable_irq(phydev->irq); 925 atomic_inc(&phydev->irq_disable); 926 phy_err: 927 phy_error(phydev); 928 } 929 930 /** 931 * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes 932 * @work: work_struct that describes the work to be done 933 */ 934 void phy_change_work(struct work_struct *work) 935 { 936 struct phy_device *phydev = 937 container_of(work, struct phy_device, phy_queue); 938 939 phy_change(phydev); 940 } 941 942 /** 943 * phy_stop - Bring down the PHY link, and stop checking the status 944 * @phydev: target phy_device struct 945 */ 946 void phy_stop(struct phy_device *phydev) 947 { 948 mutex_lock(&phydev->lock); 949 950 if (PHY_HALTED == phydev->state) 951 goto out_unlock; 952 953 if (phy_interrupt_is_valid(phydev)) { 954 /* Disable PHY Interrupts */ 955 phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); 956 957 /* Clear any pending interrupts */ 958 phy_clear_interrupt(phydev); 959 } 960 961 phydev->state = PHY_HALTED; 962 963 out_unlock: 964 mutex_unlock(&phydev->lock); 965 966 /* Cannot call flush_scheduled_work() here as desired because 967 * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change() 968 * will not reenable interrupts. 969 */ 970 } 971 EXPORT_SYMBOL(phy_stop); 972 973 /** 974 * phy_start - start or restart a PHY device 975 * @phydev: target phy_device struct 976 * 977 * Description: Indicates the attached device's readiness to 978 * handle PHY-related work. Used during startup to start the 979 * PHY, and after a call to phy_stop() to resume operation. 980 * Also used to indicate the MDIO bus has cleared an error 981 * condition. 982 */ 983 void phy_start(struct phy_device *phydev) 984 { 985 bool do_resume = false; 986 int err = 0; 987 988 mutex_lock(&phydev->lock); 989 990 switch (phydev->state) { 991 case PHY_STARTING: 992 phydev->state = PHY_PENDING; 993 break; 994 case PHY_READY: 995 phydev->state = PHY_UP; 996 break; 997 case PHY_HALTED: 998 /* make sure interrupts are re-enabled for the PHY */ 999 if (phydev->irq != PHY_POLL) { 1000 err = phy_enable_interrupts(phydev); 1001 if (err < 0) 1002 break; 1003 } 1004 1005 phydev->state = PHY_RESUMING; 1006 do_resume = true; 1007 break; 1008 default: 1009 break; 1010 } 1011 mutex_unlock(&phydev->lock); 1012 1013 /* if phy was suspended, bring the physical link up again */ 1014 if (do_resume) 1015 phy_resume(phydev); 1016 1017 phy_trigger_machine(phydev, true); 1018 } 1019 EXPORT_SYMBOL(phy_start); 1020 1021 static void phy_adjust_link(struct phy_device *phydev) 1022 { 1023 phydev->adjust_link(phydev->attached_dev); 1024 phy_led_trigger_change_speed(phydev); 1025 } 1026 1027 /** 1028 * phy_state_machine - Handle the state machine 1029 * @work: work_struct that describes the work to be done 1030 */ 1031 void phy_state_machine(struct work_struct *work) 1032 { 1033 struct delayed_work *dwork = to_delayed_work(work); 1034 struct phy_device *phydev = 1035 container_of(dwork, struct phy_device, state_queue); 1036 bool needs_aneg = false, do_suspend = false; 1037 enum phy_state old_state; 1038 int err = 0; 1039 int old_link; 1040 1041 mutex_lock(&phydev->lock); 1042 1043 old_state = phydev->state; 1044 1045 if (phydev->drv && phydev->drv->link_change_notify) 1046 phydev->drv->link_change_notify(phydev); 1047 1048 switch (phydev->state) { 1049 case PHY_DOWN: 1050 case PHY_STARTING: 1051 case PHY_READY: 1052 case PHY_PENDING: 1053 break; 1054 case PHY_UP: 1055 needs_aneg = true; 1056 1057 phydev->link_timeout = PHY_AN_TIMEOUT; 1058 1059 break; 1060 case PHY_AN: 1061 err = phy_read_status(phydev); 1062 if (err < 0) 1063 break; 1064 1065 /* If the link is down, give up on negotiation for now */ 1066 if (!phydev->link) { 1067 phydev->state = PHY_NOLINK; 1068 netif_carrier_off(phydev->attached_dev); 1069 phy_adjust_link(phydev); 1070 break; 1071 } 1072 1073 /* Check if negotiation is done. Break if there's an error */ 1074 err = phy_aneg_done(phydev); 1075 if (err < 0) 1076 break; 1077 1078 /* If AN is done, we're running */ 1079 if (err > 0) { 1080 phydev->state = PHY_RUNNING; 1081 netif_carrier_on(phydev->attached_dev); 1082 phy_adjust_link(phydev); 1083 1084 } else if (0 == phydev->link_timeout--) 1085 needs_aneg = true; 1086 break; 1087 case PHY_NOLINK: 1088 if (phy_interrupt_is_valid(phydev)) 1089 break; 1090 1091 err = phy_read_status(phydev); 1092 if (err) 1093 break; 1094 1095 if (phydev->link) { 1096 if (AUTONEG_ENABLE == phydev->autoneg) { 1097 err = phy_aneg_done(phydev); 1098 if (err < 0) 1099 break; 1100 1101 if (!err) { 1102 phydev->state = PHY_AN; 1103 phydev->link_timeout = PHY_AN_TIMEOUT; 1104 break; 1105 } 1106 } 1107 phydev->state = PHY_RUNNING; 1108 netif_carrier_on(phydev->attached_dev); 1109 phy_adjust_link(phydev); 1110 } 1111 break; 1112 case PHY_FORCING: 1113 err = genphy_update_link(phydev); 1114 if (err) 1115 break; 1116 1117 if (phydev->link) { 1118 phydev->state = PHY_RUNNING; 1119 netif_carrier_on(phydev->attached_dev); 1120 } else { 1121 if (0 == phydev->link_timeout--) 1122 needs_aneg = true; 1123 } 1124 1125 phy_adjust_link(phydev); 1126 break; 1127 case PHY_RUNNING: 1128 /* Only register a CHANGE if we are polling and link changed 1129 * since latest checking. 1130 */ 1131 if (phydev->irq == PHY_POLL) { 1132 old_link = phydev->link; 1133 err = phy_read_status(phydev); 1134 if (err) 1135 break; 1136 1137 if (old_link != phydev->link) 1138 phydev->state = PHY_CHANGELINK; 1139 } 1140 /* 1141 * Failsafe: check that nobody set phydev->link=0 between two 1142 * poll cycles, otherwise we won't leave RUNNING state as long 1143 * as link remains down. 1144 */ 1145 if (!phydev->link && phydev->state == PHY_RUNNING) { 1146 phydev->state = PHY_CHANGELINK; 1147 phydev_err(phydev, "no link in PHY_RUNNING\n"); 1148 } 1149 break; 1150 case PHY_CHANGELINK: 1151 err = phy_read_status(phydev); 1152 if (err) 1153 break; 1154 1155 if (phydev->link) { 1156 phydev->state = PHY_RUNNING; 1157 netif_carrier_on(phydev->attached_dev); 1158 } else { 1159 phydev->state = PHY_NOLINK; 1160 netif_carrier_off(phydev->attached_dev); 1161 } 1162 1163 phy_adjust_link(phydev); 1164 1165 if (phy_interrupt_is_valid(phydev)) 1166 err = phy_config_interrupt(phydev, 1167 PHY_INTERRUPT_ENABLED); 1168 break; 1169 case PHY_HALTED: 1170 if (phydev->link) { 1171 phydev->link = 0; 1172 netif_carrier_off(phydev->attached_dev); 1173 phy_adjust_link(phydev); 1174 do_suspend = true; 1175 } 1176 break; 1177 case PHY_RESUMING: 1178 if (AUTONEG_ENABLE == phydev->autoneg) { 1179 err = phy_aneg_done(phydev); 1180 if (err < 0) 1181 break; 1182 1183 /* err > 0 if AN is done. 1184 * Otherwise, it's 0, and we're still waiting for AN 1185 */ 1186 if (err > 0) { 1187 err = phy_read_status(phydev); 1188 if (err) 1189 break; 1190 1191 if (phydev->link) { 1192 phydev->state = PHY_RUNNING; 1193 netif_carrier_on(phydev->attached_dev); 1194 } else { 1195 phydev->state = PHY_NOLINK; 1196 } 1197 phy_adjust_link(phydev); 1198 } else { 1199 phydev->state = PHY_AN; 1200 phydev->link_timeout = PHY_AN_TIMEOUT; 1201 } 1202 } else { 1203 err = phy_read_status(phydev); 1204 if (err) 1205 break; 1206 1207 if (phydev->link) { 1208 phydev->state = PHY_RUNNING; 1209 netif_carrier_on(phydev->attached_dev); 1210 } else { 1211 phydev->state = PHY_NOLINK; 1212 } 1213 phy_adjust_link(phydev); 1214 } 1215 break; 1216 } 1217 1218 mutex_unlock(&phydev->lock); 1219 1220 if (needs_aneg) 1221 err = phy_start_aneg_priv(phydev, false); 1222 else if (do_suspend) 1223 phy_suspend(phydev); 1224 1225 if (err < 0) 1226 phy_error(phydev); 1227 1228 phydev_dbg(phydev, "PHY state change %s -> %s\n", 1229 phy_state_to_str(old_state), 1230 phy_state_to_str(phydev->state)); 1231 1232 /* Only re-schedule a PHY state machine change if we are polling the 1233 * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving 1234 * between states from phy_mac_interrupt() 1235 */ 1236 if (phydev->irq == PHY_POLL) 1237 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 1238 PHY_STATE_TIME * HZ); 1239 } 1240 1241 /** 1242 * phy_mac_interrupt - MAC says the link has changed 1243 * @phydev: phy_device struct with changed link 1244 * @new_link: Link is Up/Down. 1245 * 1246 * Description: The MAC layer is able indicate there has been a change 1247 * in the PHY link status. Set the new link status, and trigger the 1248 * state machine, work a work queue. 1249 */ 1250 void phy_mac_interrupt(struct phy_device *phydev, int new_link) 1251 { 1252 phydev->link = new_link; 1253 1254 /* Trigger a state machine change */ 1255 queue_work(system_power_efficient_wq, &phydev->phy_queue); 1256 } 1257 EXPORT_SYMBOL(phy_mac_interrupt); 1258 1259 /** 1260 * phy_init_eee - init and check the EEE feature 1261 * @phydev: target phy_device struct 1262 * @clk_stop_enable: PHY may stop the clock during LPI 1263 * 1264 * Description: it checks if the Energy-Efficient Ethernet (EEE) 1265 * is supported by looking at the MMD registers 3.20 and 7.60/61 1266 * and it programs the MMD register 3.0 setting the "Clock stop enable" 1267 * bit if required. 1268 */ 1269 int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) 1270 { 1271 if (!phydev->drv) 1272 return -EIO; 1273 1274 /* According to 802.3az,the EEE is supported only in full duplex-mode. 1275 */ 1276 if (phydev->duplex == DUPLEX_FULL) { 1277 int eee_lp, eee_cap, eee_adv; 1278 u32 lp, cap, adv; 1279 int status; 1280 1281 /* Read phy status to properly get the right settings */ 1282 status = phy_read_status(phydev); 1283 if (status) 1284 return status; 1285 1286 /* First check if the EEE ability is supported */ 1287 eee_cap = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); 1288 if (eee_cap <= 0) 1289 goto eee_exit_err; 1290 1291 cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap); 1292 if (!cap) 1293 goto eee_exit_err; 1294 1295 /* Check which link settings negotiated and verify it in 1296 * the EEE advertising registers. 1297 */ 1298 eee_lp = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE); 1299 if (eee_lp <= 0) 1300 goto eee_exit_err; 1301 1302 eee_adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV); 1303 if (eee_adv <= 0) 1304 goto eee_exit_err; 1305 1306 adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); 1307 lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); 1308 if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv)) 1309 goto eee_exit_err; 1310 1311 if (clk_stop_enable) { 1312 /* Configure the PHY to stop receiving xMII 1313 * clock while it is signaling LPI. 1314 */ 1315 int val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1); 1316 if (val < 0) 1317 return val; 1318 1319 val |= MDIO_PCS_CTRL1_CLKSTOP_EN; 1320 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, val); 1321 } 1322 1323 return 0; /* EEE supported */ 1324 } 1325 eee_exit_err: 1326 return -EPROTONOSUPPORT; 1327 } 1328 EXPORT_SYMBOL(phy_init_eee); 1329 1330 /** 1331 * phy_get_eee_err - report the EEE wake error count 1332 * @phydev: target phy_device struct 1333 * 1334 * Description: it is to report the number of time where the PHY 1335 * failed to complete its normal wake sequence. 1336 */ 1337 int phy_get_eee_err(struct phy_device *phydev) 1338 { 1339 if (!phydev->drv) 1340 return -EIO; 1341 1342 return phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_WK_ERR); 1343 } 1344 EXPORT_SYMBOL(phy_get_eee_err); 1345 1346 /** 1347 * phy_ethtool_get_eee - get EEE supported and status 1348 * @phydev: target phy_device struct 1349 * @data: ethtool_eee data 1350 * 1351 * Description: it reportes the Supported/Advertisement/LP Advertisement 1352 * capabilities. 1353 */ 1354 int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data) 1355 { 1356 int val; 1357 1358 if (!phydev->drv) 1359 return -EIO; 1360 1361 /* Get Supported EEE */ 1362 val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); 1363 if (val < 0) 1364 return val; 1365 data->supported = mmd_eee_cap_to_ethtool_sup_t(val); 1366 1367 /* Get advertisement EEE */ 1368 val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV); 1369 if (val < 0) 1370 return val; 1371 data->advertised = mmd_eee_adv_to_ethtool_adv_t(val); 1372 1373 /* Get LP advertisement EEE */ 1374 val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE); 1375 if (val < 0) 1376 return val; 1377 data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); 1378 1379 return 0; 1380 } 1381 EXPORT_SYMBOL(phy_ethtool_get_eee); 1382 1383 /** 1384 * phy_ethtool_set_eee - set EEE supported and status 1385 * @phydev: target phy_device struct 1386 * @data: ethtool_eee data 1387 * 1388 * Description: it is to program the Advertisement EEE register. 1389 */ 1390 int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data) 1391 { 1392 int cap, old_adv, adv, ret; 1393 1394 if (!phydev->drv) 1395 return -EIO; 1396 1397 /* Get Supported EEE */ 1398 cap = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); 1399 if (cap < 0) 1400 return cap; 1401 1402 old_adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV); 1403 if (old_adv < 0) 1404 return old_adv; 1405 1406 adv = ethtool_adv_to_mmd_eee_adv_t(data->advertised) & cap; 1407 1408 /* Mask prohibited EEE modes */ 1409 adv &= ~phydev->eee_broken_modes; 1410 1411 if (old_adv != adv) { 1412 ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv); 1413 if (ret < 0) 1414 return ret; 1415 1416 /* Restart autonegotiation so the new modes get sent to the 1417 * link partner. 1418 */ 1419 ret = phy_restart_aneg(phydev); 1420 if (ret < 0) 1421 return ret; 1422 } 1423 1424 return 0; 1425 } 1426 EXPORT_SYMBOL(phy_ethtool_set_eee); 1427 1428 int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) 1429 { 1430 if (phydev->drv && phydev->drv->set_wol) 1431 return phydev->drv->set_wol(phydev, wol); 1432 1433 return -EOPNOTSUPP; 1434 } 1435 EXPORT_SYMBOL(phy_ethtool_set_wol); 1436 1437 void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) 1438 { 1439 if (phydev->drv && phydev->drv->get_wol) 1440 phydev->drv->get_wol(phydev, wol); 1441 } 1442 EXPORT_SYMBOL(phy_ethtool_get_wol); 1443 1444 int phy_ethtool_get_link_ksettings(struct net_device *ndev, 1445 struct ethtool_link_ksettings *cmd) 1446 { 1447 struct phy_device *phydev = ndev->phydev; 1448 1449 if (!phydev) 1450 return -ENODEV; 1451 1452 phy_ethtool_ksettings_get(phydev, cmd); 1453 1454 return 0; 1455 } 1456 EXPORT_SYMBOL(phy_ethtool_get_link_ksettings); 1457 1458 int phy_ethtool_set_link_ksettings(struct net_device *ndev, 1459 const struct ethtool_link_ksettings *cmd) 1460 { 1461 struct phy_device *phydev = ndev->phydev; 1462 1463 if (!phydev) 1464 return -ENODEV; 1465 1466 return phy_ethtool_ksettings_set(phydev, cmd); 1467 } 1468 EXPORT_SYMBOL(phy_ethtool_set_link_ksettings); 1469 1470 int phy_ethtool_nway_reset(struct net_device *ndev) 1471 { 1472 struct phy_device *phydev = ndev->phydev; 1473 1474 if (!phydev) 1475 return -ENODEV; 1476 1477 if (!phydev->drv) 1478 return -EIO; 1479 1480 return phy_restart_aneg(phydev); 1481 } 1482 EXPORT_SYMBOL(phy_ethtool_nway_reset); 1483