1 /* Framework for configuring and reading PHY devices 2 * Based on code in sungem_phy.c and gianfar_phy.c 3 * 4 * Author: Andy Fleming 5 * 6 * Copyright (c) 2004 Freescale Semiconductor, Inc. 7 * Copyright (c) 2006, 2007 Maciej W. Rozycki 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the 11 * Free Software Foundation; either version 2 of the License, or (at your 12 * option) any later version. 13 * 14 */ 15 16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 18 #include <linux/kernel.h> 19 #include <linux/string.h> 20 #include <linux/errno.h> 21 #include <linux/unistd.h> 22 #include <linux/interrupt.h> 23 #include <linux/delay.h> 24 #include <linux/netdevice.h> 25 #include <linux/etherdevice.h> 26 #include <linux/skbuff.h> 27 #include <linux/mm.h> 28 #include <linux/module.h> 29 #include <linux/mii.h> 30 #include <linux/ethtool.h> 31 #include <linux/phy.h> 32 #include <linux/phy_led_triggers.h> 33 #include <linux/timer.h> 34 #include <linux/workqueue.h> 35 #include <linux/mdio.h> 36 #include <linux/io.h> 37 #include <linux/uaccess.h> 38 #include <linux/atomic.h> 39 40 #include <asm/irq.h> 41 42 static const char *phy_speed_to_str(int speed) 43 { 44 switch (speed) { 45 case SPEED_10: 46 return "10Mbps"; 47 case SPEED_100: 48 return "100Mbps"; 49 case SPEED_1000: 50 return "1Gbps"; 51 case SPEED_2500: 52 return "2.5Gbps"; 53 case SPEED_10000: 54 return "10Gbps"; 55 case SPEED_UNKNOWN: 56 return "Unknown"; 57 default: 58 return "Unsupported (update phy.c)"; 59 } 60 } 61 62 #define PHY_STATE_STR(_state) \ 63 case PHY_##_state: \ 64 return __stringify(_state); \ 65 66 static const char *phy_state_to_str(enum phy_state st) 67 { 68 switch (st) { 69 PHY_STATE_STR(DOWN) 70 PHY_STATE_STR(STARTING) 71 PHY_STATE_STR(READY) 72 PHY_STATE_STR(PENDING) 73 PHY_STATE_STR(UP) 74 PHY_STATE_STR(AN) 75 PHY_STATE_STR(RUNNING) 76 PHY_STATE_STR(NOLINK) 77 PHY_STATE_STR(FORCING) 78 PHY_STATE_STR(CHANGELINK) 79 PHY_STATE_STR(HALTED) 80 PHY_STATE_STR(RESUMING) 81 } 82 83 return NULL; 84 } 85 86 87 /** 88 * phy_print_status - Convenience function to print out the current phy status 89 * @phydev: the phy_device struct 90 */ 91 void phy_print_status(struct phy_device *phydev) 92 { 93 if (phydev->link) { 94 netdev_info(phydev->attached_dev, 95 "Link is Up - %s/%s - flow control %s\n", 96 phy_speed_to_str(phydev->speed), 97 DUPLEX_FULL == phydev->duplex ? "Full" : "Half", 98 phydev->pause ? "rx/tx" : "off"); 99 } else { 100 netdev_info(phydev->attached_dev, "Link is Down\n"); 101 } 102 } 103 EXPORT_SYMBOL(phy_print_status); 104 105 /** 106 * phy_clear_interrupt - Ack the phy device's interrupt 107 * @phydev: the phy_device struct 108 * 109 * If the @phydev driver has an ack_interrupt function, call it to 110 * ack and clear the phy device's interrupt. 111 * 112 * Returns 0 on success or < 0 on error. 113 */ 114 static int phy_clear_interrupt(struct phy_device *phydev) 115 { 116 if (phydev->drv->ack_interrupt) 117 return phydev->drv->ack_interrupt(phydev); 118 119 return 0; 120 } 121 122 /** 123 * phy_config_interrupt - configure the PHY device for the requested interrupts 124 * @phydev: the phy_device struct 125 * @interrupts: interrupt flags to configure for this @phydev 126 * 127 * Returns 0 on success or < 0 on error. 128 */ 129 static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts) 130 { 131 phydev->interrupts = interrupts; 132 if (phydev->drv->config_intr) 133 return phydev->drv->config_intr(phydev); 134 135 return 0; 136 } 137 138 139 /** 140 * phy_aneg_done - return auto-negotiation status 141 * @phydev: target phy_device struct 142 * 143 * Description: Return the auto-negotiation status from this @phydev 144 * Returns > 0 on success or < 0 on error. 0 means that auto-negotiation 145 * is still pending. 146 */ 147 int phy_aneg_done(struct phy_device *phydev) 148 { 149 if (phydev->drv->aneg_done) 150 return phydev->drv->aneg_done(phydev); 151 152 return genphy_aneg_done(phydev); 153 } 154 EXPORT_SYMBOL(phy_aneg_done); 155 156 /* A structure for mapping a particular speed and duplex 157 * combination to a particular SUPPORTED and ADVERTISED value 158 */ 159 struct phy_setting { 160 int speed; 161 int duplex; 162 u32 setting; 163 }; 164 165 /* A mapping of all SUPPORTED settings to speed/duplex */ 166 static const struct phy_setting settings[] = { 167 { 168 .speed = SPEED_10000, 169 .duplex = DUPLEX_FULL, 170 .setting = SUPPORTED_10000baseKR_Full, 171 }, 172 { 173 .speed = SPEED_10000, 174 .duplex = DUPLEX_FULL, 175 .setting = SUPPORTED_10000baseKX4_Full, 176 }, 177 { 178 .speed = SPEED_10000, 179 .duplex = DUPLEX_FULL, 180 .setting = SUPPORTED_10000baseT_Full, 181 }, 182 { 183 .speed = SPEED_2500, 184 .duplex = DUPLEX_FULL, 185 .setting = SUPPORTED_2500baseX_Full, 186 }, 187 { 188 .speed = SPEED_1000, 189 .duplex = DUPLEX_FULL, 190 .setting = SUPPORTED_1000baseKX_Full, 191 }, 192 { 193 .speed = SPEED_1000, 194 .duplex = DUPLEX_FULL, 195 .setting = SUPPORTED_1000baseT_Full, 196 }, 197 { 198 .speed = SPEED_1000, 199 .duplex = DUPLEX_HALF, 200 .setting = SUPPORTED_1000baseT_Half, 201 }, 202 { 203 .speed = SPEED_100, 204 .duplex = DUPLEX_FULL, 205 .setting = SUPPORTED_100baseT_Full, 206 }, 207 { 208 .speed = SPEED_100, 209 .duplex = DUPLEX_HALF, 210 .setting = SUPPORTED_100baseT_Half, 211 }, 212 { 213 .speed = SPEED_10, 214 .duplex = DUPLEX_FULL, 215 .setting = SUPPORTED_10baseT_Full, 216 }, 217 { 218 .speed = SPEED_10, 219 .duplex = DUPLEX_HALF, 220 .setting = SUPPORTED_10baseT_Half, 221 }, 222 }; 223 224 #define MAX_NUM_SETTINGS ARRAY_SIZE(settings) 225 226 /** 227 * phy_find_setting - find a PHY settings array entry that matches speed & duplex 228 * @speed: speed to match 229 * @duplex: duplex to match 230 * 231 * Description: Searches the settings array for the setting which 232 * matches the desired speed and duplex, and returns the index 233 * of that setting. Returns the index of the last setting if 234 * none of the others match. 235 */ 236 static inline unsigned int phy_find_setting(int speed, int duplex) 237 { 238 unsigned int idx = 0; 239 240 while (idx < ARRAY_SIZE(settings) && 241 (settings[idx].speed != speed || settings[idx].duplex != duplex)) 242 idx++; 243 244 return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1; 245 } 246 247 /** 248 * phy_find_valid - find a PHY setting that matches the requested features mask 249 * @idx: The first index in settings[] to search 250 * @features: A mask of the valid settings 251 * 252 * Description: Returns the index of the first valid setting less 253 * than or equal to the one pointed to by idx, as determined by 254 * the mask in features. Returns the index of the last setting 255 * if nothing else matches. 256 */ 257 static inline unsigned int phy_find_valid(unsigned int idx, u32 features) 258 { 259 while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features)) 260 idx++; 261 262 return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1; 263 } 264 265 /** 266 * phy_supported_speeds - return all speeds currently supported by a phy device 267 * @phy: The phy device to return supported speeds of. 268 * @speeds: buffer to store supported speeds in. 269 * @size: size of speeds buffer. 270 * 271 * Description: Returns the number of supported speeds, and fills the speeds 272 * buffer with the supported speeds. If speeds buffer is too small to contain 273 * all currently supported speeds, will return as many speeds as can fit. 274 */ 275 unsigned int phy_supported_speeds(struct phy_device *phy, 276 unsigned int *speeds, 277 unsigned int size) 278 { 279 unsigned int count = 0; 280 unsigned int idx = 0; 281 282 while (idx < MAX_NUM_SETTINGS && count < size) { 283 idx = phy_find_valid(idx, phy->supported); 284 285 if (!(settings[idx].setting & phy->supported)) 286 break; 287 288 /* Assumes settings are grouped by speed */ 289 if ((count == 0) || 290 (speeds[count - 1] != settings[idx].speed)) { 291 speeds[count] = settings[idx].speed; 292 count++; 293 } 294 idx++; 295 } 296 297 return count; 298 } 299 300 /** 301 * phy_check_valid - check if there is a valid PHY setting which matches 302 * speed, duplex, and feature mask 303 * @speed: speed to match 304 * @duplex: duplex to match 305 * @features: A mask of the valid settings 306 * 307 * Description: Returns true if there is a valid setting, false otherwise. 308 */ 309 static inline bool phy_check_valid(int speed, int duplex, u32 features) 310 { 311 unsigned int idx; 312 313 idx = phy_find_valid(phy_find_setting(speed, duplex), features); 314 315 return settings[idx].speed == speed && settings[idx].duplex == duplex && 316 (settings[idx].setting & features); 317 } 318 319 /** 320 * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex 321 * @phydev: the target phy_device struct 322 * 323 * Description: Make sure the PHY is set to supported speeds and 324 * duplexes. Drop down by one in this order: 1000/FULL, 325 * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF. 326 */ 327 static void phy_sanitize_settings(struct phy_device *phydev) 328 { 329 u32 features = phydev->supported; 330 unsigned int idx; 331 332 /* Sanitize settings based on PHY capabilities */ 333 if ((features & SUPPORTED_Autoneg) == 0) 334 phydev->autoneg = AUTONEG_DISABLE; 335 336 idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex), 337 features); 338 339 phydev->speed = settings[idx].speed; 340 phydev->duplex = settings[idx].duplex; 341 } 342 343 /** 344 * phy_ethtool_sset - generic ethtool sset function, handles all the details 345 * @phydev: target phy_device struct 346 * @cmd: ethtool_cmd 347 * 348 * A few notes about parameter checking: 349 * - We don't set port or transceiver, so we don't care what they 350 * were set to. 351 * - phy_start_aneg() will make sure forced settings are sane, and 352 * choose the next best ones from the ones selected, so we don't 353 * care if ethtool tries to give us bad values. 354 */ 355 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd) 356 { 357 u32 speed = ethtool_cmd_speed(cmd); 358 359 if (cmd->phy_address != phydev->mdio.addr) 360 return -EINVAL; 361 362 /* We make sure that we don't pass unsupported values in to the PHY */ 363 cmd->advertising &= phydev->supported; 364 365 /* Verify the settings we care about. */ 366 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE) 367 return -EINVAL; 368 369 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0) 370 return -EINVAL; 371 372 if (cmd->autoneg == AUTONEG_DISABLE && 373 ((speed != SPEED_1000 && 374 speed != SPEED_100 && 375 speed != SPEED_10) || 376 (cmd->duplex != DUPLEX_HALF && 377 cmd->duplex != DUPLEX_FULL))) 378 return -EINVAL; 379 380 phydev->autoneg = cmd->autoneg; 381 382 phydev->speed = speed; 383 384 phydev->advertising = cmd->advertising; 385 386 if (AUTONEG_ENABLE == cmd->autoneg) 387 phydev->advertising |= ADVERTISED_Autoneg; 388 else 389 phydev->advertising &= ~ADVERTISED_Autoneg; 390 391 phydev->duplex = cmd->duplex; 392 393 phydev->mdix_ctrl = cmd->eth_tp_mdix_ctrl; 394 395 /* Restart the PHY */ 396 phy_start_aneg(phydev); 397 398 return 0; 399 } 400 EXPORT_SYMBOL(phy_ethtool_sset); 401 402 int phy_ethtool_ksettings_set(struct phy_device *phydev, 403 const struct ethtool_link_ksettings *cmd) 404 { 405 u8 autoneg = cmd->base.autoneg; 406 u8 duplex = cmd->base.duplex; 407 u32 speed = cmd->base.speed; 408 u32 advertising; 409 410 if (cmd->base.phy_address != phydev->mdio.addr) 411 return -EINVAL; 412 413 ethtool_convert_link_mode_to_legacy_u32(&advertising, 414 cmd->link_modes.advertising); 415 416 /* We make sure that we don't pass unsupported values in to the PHY */ 417 advertising &= phydev->supported; 418 419 /* Verify the settings we care about. */ 420 if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE) 421 return -EINVAL; 422 423 if (autoneg == AUTONEG_ENABLE && advertising == 0) 424 return -EINVAL; 425 426 if (autoneg == AUTONEG_DISABLE && 427 ((speed != SPEED_1000 && 428 speed != SPEED_100 && 429 speed != SPEED_10) || 430 (duplex != DUPLEX_HALF && 431 duplex != DUPLEX_FULL))) 432 return -EINVAL; 433 434 phydev->autoneg = autoneg; 435 436 phydev->speed = speed; 437 438 phydev->advertising = advertising; 439 440 if (autoneg == AUTONEG_ENABLE) 441 phydev->advertising |= ADVERTISED_Autoneg; 442 else 443 phydev->advertising &= ~ADVERTISED_Autoneg; 444 445 phydev->duplex = duplex; 446 447 phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl; 448 449 /* Restart the PHY */ 450 phy_start_aneg(phydev); 451 452 return 0; 453 } 454 EXPORT_SYMBOL(phy_ethtool_ksettings_set); 455 456 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd) 457 { 458 cmd->supported = phydev->supported; 459 460 cmd->advertising = phydev->advertising; 461 cmd->lp_advertising = phydev->lp_advertising; 462 463 ethtool_cmd_speed_set(cmd, phydev->speed); 464 cmd->duplex = phydev->duplex; 465 if (phydev->interface == PHY_INTERFACE_MODE_MOCA) 466 cmd->port = PORT_BNC; 467 else 468 cmd->port = PORT_MII; 469 cmd->phy_address = phydev->mdio.addr; 470 cmd->transceiver = phy_is_internal(phydev) ? 471 XCVR_INTERNAL : XCVR_EXTERNAL; 472 cmd->autoneg = phydev->autoneg; 473 cmd->eth_tp_mdix_ctrl = phydev->mdix_ctrl; 474 cmd->eth_tp_mdix = phydev->mdix; 475 476 return 0; 477 } 478 EXPORT_SYMBOL(phy_ethtool_gset); 479 480 int phy_ethtool_ksettings_get(struct phy_device *phydev, 481 struct ethtool_link_ksettings *cmd) 482 { 483 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 484 phydev->supported); 485 486 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 487 phydev->advertising); 488 489 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, 490 phydev->lp_advertising); 491 492 cmd->base.speed = phydev->speed; 493 cmd->base.duplex = phydev->duplex; 494 if (phydev->interface == PHY_INTERFACE_MODE_MOCA) 495 cmd->base.port = PORT_BNC; 496 else 497 cmd->base.port = PORT_MII; 498 499 cmd->base.phy_address = phydev->mdio.addr; 500 cmd->base.autoneg = phydev->autoneg; 501 cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl; 502 cmd->base.eth_tp_mdix = phydev->mdix; 503 504 return 0; 505 } 506 EXPORT_SYMBOL(phy_ethtool_ksettings_get); 507 508 /** 509 * phy_mii_ioctl - generic PHY MII ioctl interface 510 * @phydev: the phy_device struct 511 * @ifr: &struct ifreq for socket ioctl's 512 * @cmd: ioctl cmd to execute 513 * 514 * Note that this function is currently incompatible with the 515 * PHYCONTROL layer. It changes registers without regard to 516 * current state. Use at own risk. 517 */ 518 int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) 519 { 520 struct mii_ioctl_data *mii_data = if_mii(ifr); 521 u16 val = mii_data->val_in; 522 bool change_autoneg = false; 523 524 switch (cmd) { 525 case SIOCGMIIPHY: 526 mii_data->phy_id = phydev->mdio.addr; 527 /* fall through */ 528 529 case SIOCGMIIREG: 530 mii_data->val_out = mdiobus_read(phydev->mdio.bus, 531 mii_data->phy_id, 532 mii_data->reg_num); 533 return 0; 534 535 case SIOCSMIIREG: 536 if (mii_data->phy_id == phydev->mdio.addr) { 537 switch (mii_data->reg_num) { 538 case MII_BMCR: 539 if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) { 540 if (phydev->autoneg == AUTONEG_ENABLE) 541 change_autoneg = true; 542 phydev->autoneg = AUTONEG_DISABLE; 543 if (val & BMCR_FULLDPLX) 544 phydev->duplex = DUPLEX_FULL; 545 else 546 phydev->duplex = DUPLEX_HALF; 547 if (val & BMCR_SPEED1000) 548 phydev->speed = SPEED_1000; 549 else if (val & BMCR_SPEED100) 550 phydev->speed = SPEED_100; 551 else phydev->speed = SPEED_10; 552 } 553 else { 554 if (phydev->autoneg == AUTONEG_DISABLE) 555 change_autoneg = true; 556 phydev->autoneg = AUTONEG_ENABLE; 557 } 558 break; 559 case MII_ADVERTISE: 560 phydev->advertising = mii_adv_to_ethtool_adv_t(val); 561 change_autoneg = true; 562 break; 563 default: 564 /* do nothing */ 565 break; 566 } 567 } 568 569 mdiobus_write(phydev->mdio.bus, mii_data->phy_id, 570 mii_data->reg_num, val); 571 572 if (mii_data->phy_id == phydev->mdio.addr && 573 mii_data->reg_num == MII_BMCR && 574 val & BMCR_RESET) 575 return phy_init_hw(phydev); 576 577 if (change_autoneg) 578 return phy_start_aneg(phydev); 579 580 return 0; 581 582 case SIOCSHWTSTAMP: 583 if (phydev->drv->hwtstamp) 584 return phydev->drv->hwtstamp(phydev, ifr); 585 /* fall through */ 586 587 default: 588 return -EOPNOTSUPP; 589 } 590 } 591 EXPORT_SYMBOL(phy_mii_ioctl); 592 593 /** 594 * phy_start_aneg - start auto-negotiation for this PHY device 595 * @phydev: the phy_device struct 596 * 597 * Description: Sanitizes the settings (if we're not autonegotiating 598 * them), and then calls the driver's config_aneg function. 599 * If the PHYCONTROL Layer is operating, we change the state to 600 * reflect the beginning of Auto-negotiation or forcing. 601 */ 602 int phy_start_aneg(struct phy_device *phydev) 603 { 604 int err; 605 606 mutex_lock(&phydev->lock); 607 608 if (AUTONEG_DISABLE == phydev->autoneg) 609 phy_sanitize_settings(phydev); 610 611 /* Invalidate LP advertising flags */ 612 phydev->lp_advertising = 0; 613 614 err = phydev->drv->config_aneg(phydev); 615 if (err < 0) 616 goto out_unlock; 617 618 if (phydev->state != PHY_HALTED) { 619 if (AUTONEG_ENABLE == phydev->autoneg) { 620 phydev->state = PHY_AN; 621 phydev->link_timeout = PHY_AN_TIMEOUT; 622 } else { 623 phydev->state = PHY_FORCING; 624 phydev->link_timeout = PHY_FORCE_TIMEOUT; 625 } 626 } 627 628 out_unlock: 629 mutex_unlock(&phydev->lock); 630 return err; 631 } 632 EXPORT_SYMBOL(phy_start_aneg); 633 634 /** 635 * phy_start_machine - start PHY state machine tracking 636 * @phydev: the phy_device struct 637 * 638 * Description: The PHY infrastructure can run a state machine 639 * which tracks whether the PHY is starting up, negotiating, 640 * etc. This function starts the timer which tracks the state 641 * of the PHY. If you want to maintain your own state machine, 642 * do not call this function. 643 */ 644 void phy_start_machine(struct phy_device *phydev) 645 { 646 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ); 647 } 648 649 /** 650 * phy_trigger_machine - trigger the state machine to run 651 * 652 * @phydev: the phy_device struct 653 * @sync: indicate whether we should wait for the workqueue cancelation 654 * 655 * Description: There has been a change in state which requires that the 656 * state machine runs. 657 */ 658 659 static void phy_trigger_machine(struct phy_device *phydev, bool sync) 660 { 661 if (sync) 662 cancel_delayed_work_sync(&phydev->state_queue); 663 else 664 cancel_delayed_work(&phydev->state_queue); 665 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0); 666 } 667 668 /** 669 * phy_stop_machine - stop the PHY state machine tracking 670 * @phydev: target phy_device struct 671 * 672 * Description: Stops the state machine timer, sets the state to UP 673 * (unless it wasn't up yet). This function must be called BEFORE 674 * phy_detach. 675 */ 676 void phy_stop_machine(struct phy_device *phydev) 677 { 678 cancel_delayed_work_sync(&phydev->state_queue); 679 680 mutex_lock(&phydev->lock); 681 if (phydev->state > PHY_UP) 682 phydev->state = PHY_UP; 683 mutex_unlock(&phydev->lock); 684 } 685 686 /** 687 * phy_error - enter HALTED state for this PHY device 688 * @phydev: target phy_device struct 689 * 690 * Moves the PHY to the HALTED state in response to a read 691 * or write error, and tells the controller the link is down. 692 * Must not be called from interrupt context, or while the 693 * phydev->lock is held. 694 */ 695 static void phy_error(struct phy_device *phydev) 696 { 697 mutex_lock(&phydev->lock); 698 phydev->state = PHY_HALTED; 699 mutex_unlock(&phydev->lock); 700 701 phy_trigger_machine(phydev, false); 702 } 703 704 /** 705 * phy_interrupt - PHY interrupt handler 706 * @irq: interrupt line 707 * @phy_dat: phy_device pointer 708 * 709 * Description: When a PHY interrupt occurs, the handler disables 710 * interrupts, and uses phy_change to handle the interrupt. 711 */ 712 static irqreturn_t phy_interrupt(int irq, void *phy_dat) 713 { 714 struct phy_device *phydev = phy_dat; 715 716 if (PHY_HALTED == phydev->state) 717 return IRQ_NONE; /* It can't be ours. */ 718 719 disable_irq_nosync(irq); 720 atomic_inc(&phydev->irq_disable); 721 722 phy_change(phydev); 723 724 return IRQ_HANDLED; 725 } 726 727 /** 728 * phy_enable_interrupts - Enable the interrupts from the PHY side 729 * @phydev: target phy_device struct 730 */ 731 static int phy_enable_interrupts(struct phy_device *phydev) 732 { 733 int err = phy_clear_interrupt(phydev); 734 735 if (err < 0) 736 return err; 737 738 return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); 739 } 740 741 /** 742 * phy_disable_interrupts - Disable the PHY interrupts from the PHY side 743 * @phydev: target phy_device struct 744 */ 745 static int phy_disable_interrupts(struct phy_device *phydev) 746 { 747 int err; 748 749 /* Disable PHY interrupts */ 750 err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); 751 if (err) 752 goto phy_err; 753 754 /* Clear the interrupt */ 755 err = phy_clear_interrupt(phydev); 756 if (err) 757 goto phy_err; 758 759 return 0; 760 761 phy_err: 762 phy_error(phydev); 763 764 return err; 765 } 766 767 /** 768 * phy_start_interrupts - request and enable interrupts for a PHY device 769 * @phydev: target phy_device struct 770 * 771 * Description: Request the interrupt for the given PHY. 772 * If this fails, then we set irq to PHY_POLL. 773 * Otherwise, we enable the interrupts in the PHY. 774 * This should only be called with a valid IRQ number. 775 * Returns 0 on success or < 0 on error. 776 */ 777 int phy_start_interrupts(struct phy_device *phydev) 778 { 779 atomic_set(&phydev->irq_disable, 0); 780 if (request_threaded_irq(phydev->irq, NULL, phy_interrupt, 781 IRQF_ONESHOT | IRQF_SHARED, 782 phydev_name(phydev), phydev) < 0) { 783 pr_warn("%s: Can't get IRQ %d (PHY)\n", 784 phydev->mdio.bus->name, phydev->irq); 785 phydev->irq = PHY_POLL; 786 return 0; 787 } 788 789 return phy_enable_interrupts(phydev); 790 } 791 EXPORT_SYMBOL(phy_start_interrupts); 792 793 /** 794 * phy_stop_interrupts - disable interrupts from a PHY device 795 * @phydev: target phy_device struct 796 */ 797 int phy_stop_interrupts(struct phy_device *phydev) 798 { 799 int err = phy_disable_interrupts(phydev); 800 801 if (err) 802 phy_error(phydev); 803 804 free_irq(phydev->irq, phydev); 805 806 /* If work indeed has been cancelled, disable_irq() will have 807 * been left unbalanced from phy_interrupt() and enable_irq() 808 * has to be called so that other devices on the line work. 809 */ 810 while (atomic_dec_return(&phydev->irq_disable) >= 0) 811 enable_irq(phydev->irq); 812 813 return err; 814 } 815 EXPORT_SYMBOL(phy_stop_interrupts); 816 817 /** 818 * phy_change - Called by the phy_interrupt to handle PHY changes 819 * @phydev: phy_device struct that interrupted 820 */ 821 void phy_change(struct phy_device *phydev) 822 { 823 if (phy_interrupt_is_valid(phydev)) { 824 if (phydev->drv->did_interrupt && 825 !phydev->drv->did_interrupt(phydev)) 826 goto ignore; 827 828 if (phy_disable_interrupts(phydev)) 829 goto phy_err; 830 } 831 832 mutex_lock(&phydev->lock); 833 if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) 834 phydev->state = PHY_CHANGELINK; 835 mutex_unlock(&phydev->lock); 836 837 if (phy_interrupt_is_valid(phydev)) { 838 atomic_dec(&phydev->irq_disable); 839 enable_irq(phydev->irq); 840 841 /* Reenable interrupts */ 842 if (PHY_HALTED != phydev->state && 843 phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED)) 844 goto irq_enable_err; 845 } 846 847 /* reschedule state queue work to run as soon as possible */ 848 phy_trigger_machine(phydev, true); 849 return; 850 851 ignore: 852 atomic_dec(&phydev->irq_disable); 853 enable_irq(phydev->irq); 854 return; 855 856 irq_enable_err: 857 disable_irq(phydev->irq); 858 atomic_inc(&phydev->irq_disable); 859 phy_err: 860 phy_error(phydev); 861 } 862 863 /** 864 * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes 865 * @work: work_struct that describes the work to be done 866 */ 867 void phy_change_work(struct work_struct *work) 868 { 869 struct phy_device *phydev = 870 container_of(work, struct phy_device, phy_queue); 871 872 phy_change(phydev); 873 } 874 875 /** 876 * phy_stop - Bring down the PHY link, and stop checking the status 877 * @phydev: target phy_device struct 878 */ 879 void phy_stop(struct phy_device *phydev) 880 { 881 mutex_lock(&phydev->lock); 882 883 if (PHY_HALTED == phydev->state) 884 goto out_unlock; 885 886 if (phy_interrupt_is_valid(phydev)) { 887 /* Disable PHY Interrupts */ 888 phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); 889 890 /* Clear any pending interrupts */ 891 phy_clear_interrupt(phydev); 892 } 893 894 phydev->state = PHY_HALTED; 895 896 out_unlock: 897 mutex_unlock(&phydev->lock); 898 899 /* Cannot call flush_scheduled_work() here as desired because 900 * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change() 901 * will not reenable interrupts. 902 */ 903 } 904 EXPORT_SYMBOL(phy_stop); 905 906 /** 907 * phy_start - start or restart a PHY device 908 * @phydev: target phy_device struct 909 * 910 * Description: Indicates the attached device's readiness to 911 * handle PHY-related work. Used during startup to start the 912 * PHY, and after a call to phy_stop() to resume operation. 913 * Also used to indicate the MDIO bus has cleared an error 914 * condition. 915 */ 916 void phy_start(struct phy_device *phydev) 917 { 918 bool do_resume = false; 919 int err = 0; 920 921 mutex_lock(&phydev->lock); 922 923 switch (phydev->state) { 924 case PHY_STARTING: 925 phydev->state = PHY_PENDING; 926 break; 927 case PHY_READY: 928 phydev->state = PHY_UP; 929 break; 930 case PHY_HALTED: 931 /* make sure interrupts are re-enabled for the PHY */ 932 if (phydev->irq != PHY_POLL) { 933 err = phy_enable_interrupts(phydev); 934 if (err < 0) 935 break; 936 } 937 938 phydev->state = PHY_RESUMING; 939 do_resume = true; 940 break; 941 default: 942 break; 943 } 944 mutex_unlock(&phydev->lock); 945 946 /* if phy was suspended, bring the physical link up again */ 947 if (do_resume) 948 phy_resume(phydev); 949 950 phy_trigger_machine(phydev, true); 951 } 952 EXPORT_SYMBOL(phy_start); 953 954 static void phy_adjust_link(struct phy_device *phydev) 955 { 956 phydev->adjust_link(phydev->attached_dev); 957 phy_led_trigger_change_speed(phydev); 958 } 959 960 /** 961 * phy_state_machine - Handle the state machine 962 * @work: work_struct that describes the work to be done 963 */ 964 void phy_state_machine(struct work_struct *work) 965 { 966 struct delayed_work *dwork = to_delayed_work(work); 967 struct phy_device *phydev = 968 container_of(dwork, struct phy_device, state_queue); 969 bool needs_aneg = false, do_suspend = false; 970 enum phy_state old_state; 971 int err = 0; 972 int old_link; 973 974 mutex_lock(&phydev->lock); 975 976 old_state = phydev->state; 977 978 if (phydev->drv->link_change_notify) 979 phydev->drv->link_change_notify(phydev); 980 981 switch (phydev->state) { 982 case PHY_DOWN: 983 case PHY_STARTING: 984 case PHY_READY: 985 case PHY_PENDING: 986 break; 987 case PHY_UP: 988 needs_aneg = true; 989 990 phydev->link_timeout = PHY_AN_TIMEOUT; 991 992 break; 993 case PHY_AN: 994 err = phy_read_status(phydev); 995 if (err < 0) 996 break; 997 998 /* If the link is down, give up on negotiation for now */ 999 if (!phydev->link) { 1000 phydev->state = PHY_NOLINK; 1001 netif_carrier_off(phydev->attached_dev); 1002 phy_adjust_link(phydev); 1003 break; 1004 } 1005 1006 /* Check if negotiation is done. Break if there's an error */ 1007 err = phy_aneg_done(phydev); 1008 if (err < 0) 1009 break; 1010 1011 /* If AN is done, we're running */ 1012 if (err > 0) { 1013 phydev->state = PHY_RUNNING; 1014 netif_carrier_on(phydev->attached_dev); 1015 phy_adjust_link(phydev); 1016 1017 } else if (0 == phydev->link_timeout--) 1018 needs_aneg = true; 1019 break; 1020 case PHY_NOLINK: 1021 if (phy_interrupt_is_valid(phydev)) 1022 break; 1023 1024 err = phy_read_status(phydev); 1025 if (err) 1026 break; 1027 1028 if (phydev->link) { 1029 if (AUTONEG_ENABLE == phydev->autoneg) { 1030 err = phy_aneg_done(phydev); 1031 if (err < 0) 1032 break; 1033 1034 if (!err) { 1035 phydev->state = PHY_AN; 1036 phydev->link_timeout = PHY_AN_TIMEOUT; 1037 break; 1038 } 1039 } 1040 phydev->state = PHY_RUNNING; 1041 netif_carrier_on(phydev->attached_dev); 1042 phy_adjust_link(phydev); 1043 } 1044 break; 1045 case PHY_FORCING: 1046 err = genphy_update_link(phydev); 1047 if (err) 1048 break; 1049 1050 if (phydev->link) { 1051 phydev->state = PHY_RUNNING; 1052 netif_carrier_on(phydev->attached_dev); 1053 } else { 1054 if (0 == phydev->link_timeout--) 1055 needs_aneg = true; 1056 } 1057 1058 phy_adjust_link(phydev); 1059 break; 1060 case PHY_RUNNING: 1061 /* Only register a CHANGE if we are polling and link changed 1062 * since latest checking. 1063 */ 1064 if (phydev->irq == PHY_POLL) { 1065 old_link = phydev->link; 1066 err = phy_read_status(phydev); 1067 if (err) 1068 break; 1069 1070 if (old_link != phydev->link) 1071 phydev->state = PHY_CHANGELINK; 1072 } 1073 /* 1074 * Failsafe: check that nobody set phydev->link=0 between two 1075 * poll cycles, otherwise we won't leave RUNNING state as long 1076 * as link remains down. 1077 */ 1078 if (!phydev->link && phydev->state == PHY_RUNNING) { 1079 phydev->state = PHY_CHANGELINK; 1080 phydev_err(phydev, "no link in PHY_RUNNING\n"); 1081 } 1082 break; 1083 case PHY_CHANGELINK: 1084 err = phy_read_status(phydev); 1085 if (err) 1086 break; 1087 1088 if (phydev->link) { 1089 phydev->state = PHY_RUNNING; 1090 netif_carrier_on(phydev->attached_dev); 1091 } else { 1092 phydev->state = PHY_NOLINK; 1093 netif_carrier_off(phydev->attached_dev); 1094 } 1095 1096 phy_adjust_link(phydev); 1097 1098 if (phy_interrupt_is_valid(phydev)) 1099 err = phy_config_interrupt(phydev, 1100 PHY_INTERRUPT_ENABLED); 1101 break; 1102 case PHY_HALTED: 1103 if (phydev->link) { 1104 phydev->link = 0; 1105 netif_carrier_off(phydev->attached_dev); 1106 phy_adjust_link(phydev); 1107 do_suspend = true; 1108 } 1109 break; 1110 case PHY_RESUMING: 1111 if (AUTONEG_ENABLE == phydev->autoneg) { 1112 err = phy_aneg_done(phydev); 1113 if (err < 0) 1114 break; 1115 1116 /* err > 0 if AN is done. 1117 * Otherwise, it's 0, and we're still waiting for AN 1118 */ 1119 if (err > 0) { 1120 err = phy_read_status(phydev); 1121 if (err) 1122 break; 1123 1124 if (phydev->link) { 1125 phydev->state = PHY_RUNNING; 1126 netif_carrier_on(phydev->attached_dev); 1127 } else { 1128 phydev->state = PHY_NOLINK; 1129 } 1130 phy_adjust_link(phydev); 1131 } else { 1132 phydev->state = PHY_AN; 1133 phydev->link_timeout = PHY_AN_TIMEOUT; 1134 } 1135 } else { 1136 err = phy_read_status(phydev); 1137 if (err) 1138 break; 1139 1140 if (phydev->link) { 1141 phydev->state = PHY_RUNNING; 1142 netif_carrier_on(phydev->attached_dev); 1143 } else { 1144 phydev->state = PHY_NOLINK; 1145 } 1146 phy_adjust_link(phydev); 1147 } 1148 break; 1149 } 1150 1151 mutex_unlock(&phydev->lock); 1152 1153 if (needs_aneg) 1154 err = phy_start_aneg(phydev); 1155 else if (do_suspend) 1156 phy_suspend(phydev); 1157 1158 if (err < 0) 1159 phy_error(phydev); 1160 1161 phydev_dbg(phydev, "PHY state change %s -> %s\n", 1162 phy_state_to_str(old_state), 1163 phy_state_to_str(phydev->state)); 1164 1165 /* Only re-schedule a PHY state machine change if we are polling the 1166 * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving 1167 * between states from phy_mac_interrupt() 1168 */ 1169 if (phydev->irq == PHY_POLL) 1170 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 1171 PHY_STATE_TIME * HZ); 1172 } 1173 1174 /** 1175 * phy_mac_interrupt - MAC says the link has changed 1176 * @phydev: phy_device struct with changed link 1177 * @new_link: Link is Up/Down. 1178 * 1179 * Description: The MAC layer is able indicate there has been a change 1180 * in the PHY link status. Set the new link status, and trigger the 1181 * state machine, work a work queue. 1182 */ 1183 void phy_mac_interrupt(struct phy_device *phydev, int new_link) 1184 { 1185 phydev->link = new_link; 1186 1187 /* Trigger a state machine change */ 1188 queue_work(system_power_efficient_wq, &phydev->phy_queue); 1189 } 1190 EXPORT_SYMBOL(phy_mac_interrupt); 1191 1192 static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad, 1193 int addr) 1194 { 1195 /* Write the desired MMD Devad */ 1196 bus->write(bus, addr, MII_MMD_CTRL, devad); 1197 1198 /* Write the desired MMD register address */ 1199 bus->write(bus, addr, MII_MMD_DATA, prtad); 1200 1201 /* Select the Function : DATA with no post increment */ 1202 bus->write(bus, addr, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR)); 1203 } 1204 1205 /** 1206 * phy_read_mmd_indirect - reads data from the MMD registers 1207 * @phydev: The PHY device bus 1208 * @prtad: MMD Address 1209 * @devad: MMD DEVAD 1210 * 1211 * Description: it reads data from the MMD registers (clause 22 to access to 1212 * clause 45) of the specified phy address. 1213 * To read these register we have: 1214 * 1) Write reg 13 // DEVAD 1215 * 2) Write reg 14 // MMD Address 1216 * 3) Write reg 13 // MMD Data Command for MMD DEVAD 1217 * 3) Read reg 14 // Read MMD data 1218 */ 1219 int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, int devad) 1220 { 1221 struct phy_driver *phydrv = phydev->drv; 1222 int addr = phydev->mdio.addr; 1223 int value = -1; 1224 1225 if (!phydrv->read_mmd_indirect) { 1226 struct mii_bus *bus = phydev->mdio.bus; 1227 1228 mutex_lock(&bus->mdio_lock); 1229 mmd_phy_indirect(bus, prtad, devad, addr); 1230 1231 /* Read the content of the MMD's selected register */ 1232 value = bus->read(bus, addr, MII_MMD_DATA); 1233 mutex_unlock(&bus->mdio_lock); 1234 } else { 1235 value = phydrv->read_mmd_indirect(phydev, prtad, devad, addr); 1236 } 1237 return value; 1238 } 1239 EXPORT_SYMBOL(phy_read_mmd_indirect); 1240 1241 /** 1242 * phy_write_mmd_indirect - writes data to the MMD registers 1243 * @phydev: The PHY device 1244 * @prtad: MMD Address 1245 * @devad: MMD DEVAD 1246 * @data: data to write in the MMD register 1247 * 1248 * Description: Write data from the MMD registers of the specified 1249 * phy address. 1250 * To write these register we have: 1251 * 1) Write reg 13 // DEVAD 1252 * 2) Write reg 14 // MMD Address 1253 * 3) Write reg 13 // MMD Data Command for MMD DEVAD 1254 * 3) Write reg 14 // Write MMD data 1255 */ 1256 void phy_write_mmd_indirect(struct phy_device *phydev, int prtad, 1257 int devad, u32 data) 1258 { 1259 struct phy_driver *phydrv = phydev->drv; 1260 int addr = phydev->mdio.addr; 1261 1262 if (!phydrv->write_mmd_indirect) { 1263 struct mii_bus *bus = phydev->mdio.bus; 1264 1265 mutex_lock(&bus->mdio_lock); 1266 mmd_phy_indirect(bus, prtad, devad, addr); 1267 1268 /* Write the data into MMD's selected register */ 1269 bus->write(bus, addr, MII_MMD_DATA, data); 1270 mutex_unlock(&bus->mdio_lock); 1271 } else { 1272 phydrv->write_mmd_indirect(phydev, prtad, devad, addr, data); 1273 } 1274 } 1275 EXPORT_SYMBOL(phy_write_mmd_indirect); 1276 1277 /** 1278 * phy_init_eee - init and check the EEE feature 1279 * @phydev: target phy_device struct 1280 * @clk_stop_enable: PHY may stop the clock during LPI 1281 * 1282 * Description: it checks if the Energy-Efficient Ethernet (EEE) 1283 * is supported by looking at the MMD registers 3.20 and 7.60/61 1284 * and it programs the MMD register 3.0 setting the "Clock stop enable" 1285 * bit if required. 1286 */ 1287 int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) 1288 { 1289 /* According to 802.3az,the EEE is supported only in full duplex-mode. 1290 * Also EEE feature is active when core is operating with MII, GMII 1291 * or RGMII (all kinds). Internal PHYs are also allowed to proceed and 1292 * should return an error if they do not support EEE. 1293 */ 1294 if ((phydev->duplex == DUPLEX_FULL) && 1295 ((phydev->interface == PHY_INTERFACE_MODE_MII) || 1296 (phydev->interface == PHY_INTERFACE_MODE_GMII) || 1297 phy_interface_is_rgmii(phydev) || 1298 phy_is_internal(phydev))) { 1299 int eee_lp, eee_cap, eee_adv; 1300 u32 lp, cap, adv; 1301 int status; 1302 1303 /* Read phy status to properly get the right settings */ 1304 status = phy_read_status(phydev); 1305 if (status) 1306 return status; 1307 1308 /* First check if the EEE ability is supported */ 1309 eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, 1310 MDIO_MMD_PCS); 1311 if (eee_cap <= 0) 1312 goto eee_exit_err; 1313 1314 cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap); 1315 if (!cap) 1316 goto eee_exit_err; 1317 1318 /* Check which link settings negotiated and verify it in 1319 * the EEE advertising registers. 1320 */ 1321 eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE, 1322 MDIO_MMD_AN); 1323 if (eee_lp <= 0) 1324 goto eee_exit_err; 1325 1326 eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, 1327 MDIO_MMD_AN); 1328 if (eee_adv <= 0) 1329 goto eee_exit_err; 1330 1331 adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); 1332 lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); 1333 if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv)) 1334 goto eee_exit_err; 1335 1336 if (clk_stop_enable) { 1337 /* Configure the PHY to stop receiving xMII 1338 * clock while it is signaling LPI. 1339 */ 1340 int val = phy_read_mmd_indirect(phydev, MDIO_CTRL1, 1341 MDIO_MMD_PCS); 1342 if (val < 0) 1343 return val; 1344 1345 val |= MDIO_PCS_CTRL1_CLKSTOP_EN; 1346 phy_write_mmd_indirect(phydev, MDIO_CTRL1, 1347 MDIO_MMD_PCS, val); 1348 } 1349 1350 return 0; /* EEE supported */ 1351 } 1352 eee_exit_err: 1353 return -EPROTONOSUPPORT; 1354 } 1355 EXPORT_SYMBOL(phy_init_eee); 1356 1357 /** 1358 * phy_get_eee_err - report the EEE wake error count 1359 * @phydev: target phy_device struct 1360 * 1361 * Description: it is to report the number of time where the PHY 1362 * failed to complete its normal wake sequence. 1363 */ 1364 int phy_get_eee_err(struct phy_device *phydev) 1365 { 1366 return phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_WK_ERR, MDIO_MMD_PCS); 1367 } 1368 EXPORT_SYMBOL(phy_get_eee_err); 1369 1370 /** 1371 * phy_ethtool_get_eee - get EEE supported and status 1372 * @phydev: target phy_device struct 1373 * @data: ethtool_eee data 1374 * 1375 * Description: it reportes the Supported/Advertisement/LP Advertisement 1376 * capabilities. 1377 */ 1378 int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data) 1379 { 1380 int val; 1381 1382 /* Get Supported EEE */ 1383 val = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, MDIO_MMD_PCS); 1384 if (val < 0) 1385 return val; 1386 data->supported = mmd_eee_cap_to_ethtool_sup_t(val); 1387 1388 /* Get advertisement EEE */ 1389 val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN); 1390 if (val < 0) 1391 return val; 1392 data->advertised = mmd_eee_adv_to_ethtool_adv_t(val); 1393 1394 /* Get LP advertisement EEE */ 1395 val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE, MDIO_MMD_AN); 1396 if (val < 0) 1397 return val; 1398 data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); 1399 1400 return 0; 1401 } 1402 EXPORT_SYMBOL(phy_ethtool_get_eee); 1403 1404 /** 1405 * phy_ethtool_set_eee - set EEE supported and status 1406 * @phydev: target phy_device struct 1407 * @data: ethtool_eee data 1408 * 1409 * Description: it is to program the Advertisement EEE register. 1410 */ 1411 int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data) 1412 { 1413 int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised); 1414 1415 /* Mask prohibited EEE modes */ 1416 val &= ~phydev->eee_broken_modes; 1417 1418 phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, val); 1419 1420 return 0; 1421 } 1422 EXPORT_SYMBOL(phy_ethtool_set_eee); 1423 1424 int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) 1425 { 1426 if (phydev->drv->set_wol) 1427 return phydev->drv->set_wol(phydev, wol); 1428 1429 return -EOPNOTSUPP; 1430 } 1431 EXPORT_SYMBOL(phy_ethtool_set_wol); 1432 1433 void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) 1434 { 1435 if (phydev->drv->get_wol) 1436 phydev->drv->get_wol(phydev, wol); 1437 } 1438 EXPORT_SYMBOL(phy_ethtool_get_wol); 1439 1440 int phy_ethtool_get_link_ksettings(struct net_device *ndev, 1441 struct ethtool_link_ksettings *cmd) 1442 { 1443 struct phy_device *phydev = ndev->phydev; 1444 1445 if (!phydev) 1446 return -ENODEV; 1447 1448 return phy_ethtool_ksettings_get(phydev, cmd); 1449 } 1450 EXPORT_SYMBOL(phy_ethtool_get_link_ksettings); 1451 1452 int phy_ethtool_set_link_ksettings(struct net_device *ndev, 1453 const struct ethtool_link_ksettings *cmd) 1454 { 1455 struct phy_device *phydev = ndev->phydev; 1456 1457 if (!phydev) 1458 return -ENODEV; 1459 1460 return phy_ethtool_ksettings_set(phydev, cmd); 1461 } 1462 EXPORT_SYMBOL(phy_ethtool_set_link_ksettings); 1463 1464 int phy_ethtool_nway_reset(struct net_device *ndev) 1465 { 1466 struct phy_device *phydev = ndev->phydev; 1467 1468 if (!phydev) 1469 return -ENODEV; 1470 1471 return genphy_restart_aneg(phydev); 1472 } 1473 EXPORT_SYMBOL(phy_ethtool_nway_reset); 1474