1 /* 2 * drivers/net/phy/phy.c 3 * 4 * Framework for configuring and reading PHY devices 5 * Based on code in sungem_phy.c and gianfar_phy.c 6 * 7 * Author: Andy Fleming 8 * 9 * Copyright (c) 2004 Freescale Semiconductor, Inc. 10 * Copyright (c) 2006, 2007 Maciej W. Rozycki 11 * 12 * This program is free software; you can redistribute it and/or modify it 13 * under the terms of the GNU General Public License as published by the 14 * Free Software Foundation; either version 2 of the License, or (at your 15 * option) any later version. 16 * 17 */ 18 19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 20 21 #include <linux/kernel.h> 22 #include <linux/string.h> 23 #include <linux/errno.h> 24 #include <linux/unistd.h> 25 #include <linux/interrupt.h> 26 #include <linux/init.h> 27 #include <linux/delay.h> 28 #include <linux/netdevice.h> 29 #include <linux/etherdevice.h> 30 #include <linux/skbuff.h> 31 #include <linux/mm.h> 32 #include <linux/module.h> 33 #include <linux/mii.h> 34 #include <linux/ethtool.h> 35 #include <linux/phy.h> 36 #include <linux/timer.h> 37 #include <linux/workqueue.h> 38 #include <linux/mdio.h> 39 40 #include <linux/atomic.h> 41 #include <asm/io.h> 42 #include <asm/irq.h> 43 #include <asm/uaccess.h> 44 45 /** 46 * phy_print_status - Convenience function to print out the current phy status 47 * @phydev: the phy_device struct 48 */ 49 void phy_print_status(struct phy_device *phydev) 50 { 51 if (phydev->link) 52 pr_info("%s - Link is Up - %d/%s\n", 53 dev_name(&phydev->dev), 54 phydev->speed, 55 DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); 56 else 57 pr_info("%s - Link is Down\n", dev_name(&phydev->dev)); 58 } 59 EXPORT_SYMBOL(phy_print_status); 60 61 /** 62 * phy_clear_interrupt - Ack the phy device's interrupt 63 * @phydev: the phy_device struct 64 * 65 * If the @phydev driver has an ack_interrupt function, call it to 66 * ack and clear the phy device's interrupt. 67 * 68 * Returns 0 on success on < 0 on error. 69 */ 70 static int phy_clear_interrupt(struct phy_device *phydev) 71 { 72 int err = 0; 73 74 if (phydev->drv->ack_interrupt) 75 err = phydev->drv->ack_interrupt(phydev); 76 77 return err; 78 } 79 80 /** 81 * phy_config_interrupt - configure the PHY device for the requested interrupts 82 * @phydev: the phy_device struct 83 * @interrupts: interrupt flags to configure for this @phydev 84 * 85 * Returns 0 on success on < 0 on error. 86 */ 87 static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts) 88 { 89 int err = 0; 90 91 phydev->interrupts = interrupts; 92 if (phydev->drv->config_intr) 93 err = phydev->drv->config_intr(phydev); 94 95 return err; 96 } 97 98 99 /** 100 * phy_aneg_done - return auto-negotiation status 101 * @phydev: target phy_device struct 102 * 103 * Description: Reads the status register and returns 0 either if 104 * auto-negotiation is incomplete, or if there was an error. 105 * Returns BMSR_ANEGCOMPLETE if auto-negotiation is done. 106 */ 107 static inline int phy_aneg_done(struct phy_device *phydev) 108 { 109 int retval; 110 111 retval = phy_read(phydev, MII_BMSR); 112 113 return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE); 114 } 115 116 /* A structure for mapping a particular speed and duplex 117 * combination to a particular SUPPORTED and ADVERTISED value */ 118 struct phy_setting { 119 int speed; 120 int duplex; 121 u32 setting; 122 }; 123 124 /* A mapping of all SUPPORTED settings to speed/duplex */ 125 static const struct phy_setting settings[] = { 126 { 127 .speed = 10000, 128 .duplex = DUPLEX_FULL, 129 .setting = SUPPORTED_10000baseT_Full, 130 }, 131 { 132 .speed = SPEED_1000, 133 .duplex = DUPLEX_FULL, 134 .setting = SUPPORTED_1000baseT_Full, 135 }, 136 { 137 .speed = SPEED_1000, 138 .duplex = DUPLEX_HALF, 139 .setting = SUPPORTED_1000baseT_Half, 140 }, 141 { 142 .speed = SPEED_100, 143 .duplex = DUPLEX_FULL, 144 .setting = SUPPORTED_100baseT_Full, 145 }, 146 { 147 .speed = SPEED_100, 148 .duplex = DUPLEX_HALF, 149 .setting = SUPPORTED_100baseT_Half, 150 }, 151 { 152 .speed = SPEED_10, 153 .duplex = DUPLEX_FULL, 154 .setting = SUPPORTED_10baseT_Full, 155 }, 156 { 157 .speed = SPEED_10, 158 .duplex = DUPLEX_HALF, 159 .setting = SUPPORTED_10baseT_Half, 160 }, 161 }; 162 163 #define MAX_NUM_SETTINGS ARRAY_SIZE(settings) 164 165 /** 166 * phy_find_setting - find a PHY settings array entry that matches speed & duplex 167 * @speed: speed to match 168 * @duplex: duplex to match 169 * 170 * Description: Searches the settings array for the setting which 171 * matches the desired speed and duplex, and returns the index 172 * of that setting. Returns the index of the last setting if 173 * none of the others match. 174 */ 175 static inline int phy_find_setting(int speed, int duplex) 176 { 177 int idx = 0; 178 179 while (idx < ARRAY_SIZE(settings) && 180 (settings[idx].speed != speed || 181 settings[idx].duplex != duplex)) 182 idx++; 183 184 return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1; 185 } 186 187 /** 188 * phy_find_valid - find a PHY setting that matches the requested features mask 189 * @idx: The first index in settings[] to search 190 * @features: A mask of the valid settings 191 * 192 * Description: Returns the index of the first valid setting less 193 * than or equal to the one pointed to by idx, as determined by 194 * the mask in features. Returns the index of the last setting 195 * if nothing else matches. 196 */ 197 static inline int phy_find_valid(int idx, u32 features) 198 { 199 while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features)) 200 idx++; 201 202 return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1; 203 } 204 205 /** 206 * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex 207 * @phydev: the target phy_device struct 208 * 209 * Description: Make sure the PHY is set to supported speeds and 210 * duplexes. Drop down by one in this order: 1000/FULL, 211 * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF. 212 */ 213 static void phy_sanitize_settings(struct phy_device *phydev) 214 { 215 u32 features = phydev->supported; 216 int idx; 217 218 /* Sanitize settings based on PHY capabilities */ 219 if ((features & SUPPORTED_Autoneg) == 0) 220 phydev->autoneg = AUTONEG_DISABLE; 221 222 idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex), 223 features); 224 225 phydev->speed = settings[idx].speed; 226 phydev->duplex = settings[idx].duplex; 227 } 228 229 /** 230 * phy_ethtool_sset - generic ethtool sset function, handles all the details 231 * @phydev: target phy_device struct 232 * @cmd: ethtool_cmd 233 * 234 * A few notes about parameter checking: 235 * - We don't set port or transceiver, so we don't care what they 236 * were set to. 237 * - phy_start_aneg() will make sure forced settings are sane, and 238 * choose the next best ones from the ones selected, so we don't 239 * care if ethtool tries to give us bad values. 240 */ 241 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd) 242 { 243 u32 speed = ethtool_cmd_speed(cmd); 244 245 if (cmd->phy_address != phydev->addr) 246 return -EINVAL; 247 248 /* We make sure that we don't pass unsupported 249 * values in to the PHY */ 250 cmd->advertising &= phydev->supported; 251 252 /* Verify the settings we care about. */ 253 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE) 254 return -EINVAL; 255 256 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0) 257 return -EINVAL; 258 259 if (cmd->autoneg == AUTONEG_DISABLE && 260 ((speed != SPEED_1000 && 261 speed != SPEED_100 && 262 speed != SPEED_10) || 263 (cmd->duplex != DUPLEX_HALF && 264 cmd->duplex != DUPLEX_FULL))) 265 return -EINVAL; 266 267 phydev->autoneg = cmd->autoneg; 268 269 phydev->speed = speed; 270 271 phydev->advertising = cmd->advertising; 272 273 if (AUTONEG_ENABLE == cmd->autoneg) 274 phydev->advertising |= ADVERTISED_Autoneg; 275 else 276 phydev->advertising &= ~ADVERTISED_Autoneg; 277 278 phydev->duplex = cmd->duplex; 279 280 /* Restart the PHY */ 281 phy_start_aneg(phydev); 282 283 return 0; 284 } 285 EXPORT_SYMBOL(phy_ethtool_sset); 286 287 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd) 288 { 289 cmd->supported = phydev->supported; 290 291 cmd->advertising = phydev->advertising; 292 cmd->lp_advertising = phydev->lp_advertising; 293 294 ethtool_cmd_speed_set(cmd, phydev->speed); 295 cmd->duplex = phydev->duplex; 296 cmd->port = PORT_MII; 297 cmd->phy_address = phydev->addr; 298 cmd->transceiver = phy_is_internal(phydev) ? 299 XCVR_INTERNAL : XCVR_EXTERNAL; 300 cmd->autoneg = phydev->autoneg; 301 302 return 0; 303 } 304 EXPORT_SYMBOL(phy_ethtool_gset); 305 306 /** 307 * phy_mii_ioctl - generic PHY MII ioctl interface 308 * @phydev: the phy_device struct 309 * @ifr: &struct ifreq for socket ioctl's 310 * @cmd: ioctl cmd to execute 311 * 312 * Note that this function is currently incompatible with the 313 * PHYCONTROL layer. It changes registers without regard to 314 * current state. Use at own risk. 315 */ 316 int phy_mii_ioctl(struct phy_device *phydev, 317 struct ifreq *ifr, int cmd) 318 { 319 struct mii_ioctl_data *mii_data = if_mii(ifr); 320 u16 val = mii_data->val_in; 321 int ret = 0; 322 323 switch (cmd) { 324 case SIOCGMIIPHY: 325 mii_data->phy_id = phydev->addr; 326 /* fall through */ 327 328 case SIOCGMIIREG: 329 mii_data->val_out = mdiobus_read(phydev->bus, mii_data->phy_id, 330 mii_data->reg_num); 331 break; 332 333 case SIOCSMIIREG: 334 if (mii_data->phy_id == phydev->addr) { 335 switch(mii_data->reg_num) { 336 case MII_BMCR: 337 if ((val & (BMCR_RESET|BMCR_ANENABLE)) == 0) 338 phydev->autoneg = AUTONEG_DISABLE; 339 else 340 phydev->autoneg = AUTONEG_ENABLE; 341 if ((!phydev->autoneg) && (val & BMCR_FULLDPLX)) 342 phydev->duplex = DUPLEX_FULL; 343 else 344 phydev->duplex = DUPLEX_HALF; 345 if ((!phydev->autoneg) && 346 (val & BMCR_SPEED1000)) 347 phydev->speed = SPEED_1000; 348 else if ((!phydev->autoneg) && 349 (val & BMCR_SPEED100)) 350 phydev->speed = SPEED_100; 351 break; 352 case MII_ADVERTISE: 353 phydev->advertising = val; 354 break; 355 default: 356 /* do nothing */ 357 break; 358 } 359 } 360 361 mdiobus_write(phydev->bus, mii_data->phy_id, 362 mii_data->reg_num, val); 363 364 if (mii_data->reg_num == MII_BMCR && 365 val & BMCR_RESET) 366 ret = phy_init_hw(phydev); 367 break; 368 369 case SIOCSHWTSTAMP: 370 if (phydev->drv->hwtstamp) 371 return phydev->drv->hwtstamp(phydev, ifr); 372 /* fall through */ 373 374 default: 375 return -EOPNOTSUPP; 376 } 377 378 return ret; 379 } 380 EXPORT_SYMBOL(phy_mii_ioctl); 381 382 /** 383 * phy_start_aneg - start auto-negotiation for this PHY device 384 * @phydev: the phy_device struct 385 * 386 * Description: Sanitizes the settings (if we're not autonegotiating 387 * them), and then calls the driver's config_aneg function. 388 * If the PHYCONTROL Layer is operating, we change the state to 389 * reflect the beginning of Auto-negotiation or forcing. 390 */ 391 int phy_start_aneg(struct phy_device *phydev) 392 { 393 int err; 394 395 mutex_lock(&phydev->lock); 396 397 if (AUTONEG_DISABLE == phydev->autoneg) 398 phy_sanitize_settings(phydev); 399 400 err = phydev->drv->config_aneg(phydev); 401 402 if (err < 0) 403 goto out_unlock; 404 405 if (phydev->state != PHY_HALTED) { 406 if (AUTONEG_ENABLE == phydev->autoneg) { 407 phydev->state = PHY_AN; 408 phydev->link_timeout = PHY_AN_TIMEOUT; 409 } else { 410 phydev->state = PHY_FORCING; 411 phydev->link_timeout = PHY_FORCE_TIMEOUT; 412 } 413 } 414 415 out_unlock: 416 mutex_unlock(&phydev->lock); 417 return err; 418 } 419 EXPORT_SYMBOL(phy_start_aneg); 420 421 422 /** 423 * phy_start_machine - start PHY state machine tracking 424 * @phydev: the phy_device struct 425 * @handler: callback function for state change notifications 426 * 427 * Description: The PHY infrastructure can run a state machine 428 * which tracks whether the PHY is starting up, negotiating, 429 * etc. This function starts the timer which tracks the state 430 * of the PHY. If you want to be notified when the state changes, 431 * pass in the callback @handler, otherwise, pass NULL. If you 432 * want to maintain your own state machine, do not call this 433 * function. 434 */ 435 void phy_start_machine(struct phy_device *phydev, 436 void (*handler)(struct net_device *)) 437 { 438 phydev->adjust_state = handler; 439 440 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ); 441 } 442 443 /** 444 * phy_stop_machine - stop the PHY state machine tracking 445 * @phydev: target phy_device struct 446 * 447 * Description: Stops the state machine timer, sets the state to UP 448 * (unless it wasn't up yet). This function must be called BEFORE 449 * phy_detach. 450 */ 451 void phy_stop_machine(struct phy_device *phydev) 452 { 453 cancel_delayed_work_sync(&phydev->state_queue); 454 455 mutex_lock(&phydev->lock); 456 if (phydev->state > PHY_UP) 457 phydev->state = PHY_UP; 458 mutex_unlock(&phydev->lock); 459 460 phydev->adjust_state = NULL; 461 } 462 463 /** 464 * phy_error - enter HALTED state for this PHY device 465 * @phydev: target phy_device struct 466 * 467 * Moves the PHY to the HALTED state in response to a read 468 * or write error, and tells the controller the link is down. 469 * Must not be called from interrupt context, or while the 470 * phydev->lock is held. 471 */ 472 static void phy_error(struct phy_device *phydev) 473 { 474 mutex_lock(&phydev->lock); 475 phydev->state = PHY_HALTED; 476 mutex_unlock(&phydev->lock); 477 } 478 479 /** 480 * phy_interrupt - PHY interrupt handler 481 * @irq: interrupt line 482 * @phy_dat: phy_device pointer 483 * 484 * Description: When a PHY interrupt occurs, the handler disables 485 * interrupts, and schedules a work task to clear the interrupt. 486 */ 487 static irqreturn_t phy_interrupt(int irq, void *phy_dat) 488 { 489 struct phy_device *phydev = phy_dat; 490 491 if (PHY_HALTED == phydev->state) 492 return IRQ_NONE; /* It can't be ours. */ 493 494 /* The MDIO bus is not allowed to be written in interrupt 495 * context, so we need to disable the irq here. A work 496 * queue will write the PHY to disable and clear the 497 * interrupt, and then reenable the irq line. */ 498 disable_irq_nosync(irq); 499 atomic_inc(&phydev->irq_disable); 500 501 queue_work(system_power_efficient_wq, &phydev->phy_queue); 502 503 return IRQ_HANDLED; 504 } 505 506 /** 507 * phy_enable_interrupts - Enable the interrupts from the PHY side 508 * @phydev: target phy_device struct 509 */ 510 static int phy_enable_interrupts(struct phy_device *phydev) 511 { 512 int err; 513 514 err = phy_clear_interrupt(phydev); 515 516 if (err < 0) 517 return err; 518 519 err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); 520 521 return err; 522 } 523 524 /** 525 * phy_disable_interrupts - Disable the PHY interrupts from the PHY side 526 * @phydev: target phy_device struct 527 */ 528 static int phy_disable_interrupts(struct phy_device *phydev) 529 { 530 int err; 531 532 /* Disable PHY interrupts */ 533 err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); 534 535 if (err) 536 goto phy_err; 537 538 /* Clear the interrupt */ 539 err = phy_clear_interrupt(phydev); 540 541 if (err) 542 goto phy_err; 543 544 return 0; 545 546 phy_err: 547 phy_error(phydev); 548 549 return err; 550 } 551 552 /** 553 * phy_start_interrupts - request and enable interrupts for a PHY device 554 * @phydev: target phy_device struct 555 * 556 * Description: Request the interrupt for the given PHY. 557 * If this fails, then we set irq to PHY_POLL. 558 * Otherwise, we enable the interrupts in the PHY. 559 * This should only be called with a valid IRQ number. 560 * Returns 0 on success or < 0 on error. 561 */ 562 int phy_start_interrupts(struct phy_device *phydev) 563 { 564 int err = 0; 565 566 atomic_set(&phydev->irq_disable, 0); 567 if (request_irq(phydev->irq, phy_interrupt, 568 IRQF_SHARED, 569 "phy_interrupt", 570 phydev) < 0) { 571 pr_warn("%s: Can't get IRQ %d (PHY)\n", 572 phydev->bus->name, phydev->irq); 573 phydev->irq = PHY_POLL; 574 return 0; 575 } 576 577 err = phy_enable_interrupts(phydev); 578 579 return err; 580 } 581 EXPORT_SYMBOL(phy_start_interrupts); 582 583 /** 584 * phy_stop_interrupts - disable interrupts from a PHY device 585 * @phydev: target phy_device struct 586 */ 587 int phy_stop_interrupts(struct phy_device *phydev) 588 { 589 int err; 590 591 err = phy_disable_interrupts(phydev); 592 593 if (err) 594 phy_error(phydev); 595 596 free_irq(phydev->irq, phydev); 597 598 /* 599 * Cannot call flush_scheduled_work() here as desired because 600 * of rtnl_lock(), but we do not really care about what would 601 * be done, except from enable_irq(), so cancel any work 602 * possibly pending and take care of the matter below. 603 */ 604 cancel_work_sync(&phydev->phy_queue); 605 /* 606 * If work indeed has been cancelled, disable_irq() will have 607 * been left unbalanced from phy_interrupt() and enable_irq() 608 * has to be called so that other devices on the line work. 609 */ 610 while (atomic_dec_return(&phydev->irq_disable) >= 0) 611 enable_irq(phydev->irq); 612 613 return err; 614 } 615 EXPORT_SYMBOL(phy_stop_interrupts); 616 617 618 /** 619 * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes 620 * @work: work_struct that describes the work to be done 621 */ 622 void phy_change(struct work_struct *work) 623 { 624 int err; 625 struct phy_device *phydev = 626 container_of(work, struct phy_device, phy_queue); 627 628 if (phydev->drv->did_interrupt && 629 !phydev->drv->did_interrupt(phydev)) 630 goto ignore; 631 632 err = phy_disable_interrupts(phydev); 633 634 if (err) 635 goto phy_err; 636 637 mutex_lock(&phydev->lock); 638 if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) 639 phydev->state = PHY_CHANGELINK; 640 mutex_unlock(&phydev->lock); 641 642 atomic_dec(&phydev->irq_disable); 643 enable_irq(phydev->irq); 644 645 /* Reenable interrupts */ 646 if (PHY_HALTED != phydev->state) 647 err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); 648 649 if (err) 650 goto irq_enable_err; 651 652 /* reschedule state queue work to run as soon as possible */ 653 cancel_delayed_work_sync(&phydev->state_queue); 654 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0); 655 656 return; 657 658 ignore: 659 atomic_dec(&phydev->irq_disable); 660 enable_irq(phydev->irq); 661 return; 662 663 irq_enable_err: 664 disable_irq(phydev->irq); 665 atomic_inc(&phydev->irq_disable); 666 phy_err: 667 phy_error(phydev); 668 } 669 670 /** 671 * phy_stop - Bring down the PHY link, and stop checking the status 672 * @phydev: target phy_device struct 673 */ 674 void phy_stop(struct phy_device *phydev) 675 { 676 mutex_lock(&phydev->lock); 677 678 if (PHY_HALTED == phydev->state) 679 goto out_unlock; 680 681 if (phy_interrupt_is_valid(phydev)) { 682 /* Disable PHY Interrupts */ 683 phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); 684 685 /* Clear any pending interrupts */ 686 phy_clear_interrupt(phydev); 687 } 688 689 phydev->state = PHY_HALTED; 690 691 out_unlock: 692 mutex_unlock(&phydev->lock); 693 694 /* 695 * Cannot call flush_scheduled_work() here as desired because 696 * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change() 697 * will not reenable interrupts. 698 */ 699 } 700 701 702 /** 703 * phy_start - start or restart a PHY device 704 * @phydev: target phy_device struct 705 * 706 * Description: Indicates the attached device's readiness to 707 * handle PHY-related work. Used during startup to start the 708 * PHY, and after a call to phy_stop() to resume operation. 709 * Also used to indicate the MDIO bus has cleared an error 710 * condition. 711 */ 712 void phy_start(struct phy_device *phydev) 713 { 714 mutex_lock(&phydev->lock); 715 716 switch (phydev->state) { 717 case PHY_STARTING: 718 phydev->state = PHY_PENDING; 719 break; 720 case PHY_READY: 721 phydev->state = PHY_UP; 722 break; 723 case PHY_HALTED: 724 phydev->state = PHY_RESUMING; 725 default: 726 break; 727 } 728 mutex_unlock(&phydev->lock); 729 } 730 EXPORT_SYMBOL(phy_stop); 731 EXPORT_SYMBOL(phy_start); 732 733 /** 734 * phy_state_machine - Handle the state machine 735 * @work: work_struct that describes the work to be done 736 */ 737 void phy_state_machine(struct work_struct *work) 738 { 739 struct delayed_work *dwork = to_delayed_work(work); 740 struct phy_device *phydev = 741 container_of(dwork, struct phy_device, state_queue); 742 int needs_aneg = 0; 743 int err = 0; 744 745 mutex_lock(&phydev->lock); 746 747 if (phydev->adjust_state) 748 phydev->adjust_state(phydev->attached_dev); 749 750 switch(phydev->state) { 751 case PHY_DOWN: 752 case PHY_STARTING: 753 case PHY_READY: 754 case PHY_PENDING: 755 break; 756 case PHY_UP: 757 needs_aneg = 1; 758 759 phydev->link_timeout = PHY_AN_TIMEOUT; 760 761 break; 762 case PHY_AN: 763 err = phy_read_status(phydev); 764 765 if (err < 0) 766 break; 767 768 /* If the link is down, give up on 769 * negotiation for now */ 770 if (!phydev->link) { 771 phydev->state = PHY_NOLINK; 772 netif_carrier_off(phydev->attached_dev); 773 phydev->adjust_link(phydev->attached_dev); 774 break; 775 } 776 777 /* Check if negotiation is done. Break 778 * if there's an error */ 779 err = phy_aneg_done(phydev); 780 if (err < 0) 781 break; 782 783 /* If AN is done, we're running */ 784 if (err > 0) { 785 phydev->state = PHY_RUNNING; 786 netif_carrier_on(phydev->attached_dev); 787 phydev->adjust_link(phydev->attached_dev); 788 789 } else if (0 == phydev->link_timeout--) { 790 needs_aneg = 1; 791 /* If we have the magic_aneg bit, 792 * we try again */ 793 if (phydev->drv->flags & PHY_HAS_MAGICANEG) 794 break; 795 } 796 break; 797 case PHY_NOLINK: 798 err = phy_read_status(phydev); 799 800 if (err) 801 break; 802 803 if (phydev->link) { 804 phydev->state = PHY_RUNNING; 805 netif_carrier_on(phydev->attached_dev); 806 phydev->adjust_link(phydev->attached_dev); 807 } 808 break; 809 case PHY_FORCING: 810 err = genphy_update_link(phydev); 811 812 if (err) 813 break; 814 815 if (phydev->link) { 816 phydev->state = PHY_RUNNING; 817 netif_carrier_on(phydev->attached_dev); 818 } else { 819 if (0 == phydev->link_timeout--) 820 needs_aneg = 1; 821 } 822 823 phydev->adjust_link(phydev->attached_dev); 824 break; 825 case PHY_RUNNING: 826 /* Only register a CHANGE if we are 827 * polling or ignoring interrupts 828 */ 829 if (!phy_interrupt_is_valid(phydev)) 830 phydev->state = PHY_CHANGELINK; 831 break; 832 case PHY_CHANGELINK: 833 err = phy_read_status(phydev); 834 835 if (err) 836 break; 837 838 if (phydev->link) { 839 phydev->state = PHY_RUNNING; 840 netif_carrier_on(phydev->attached_dev); 841 } else { 842 phydev->state = PHY_NOLINK; 843 netif_carrier_off(phydev->attached_dev); 844 } 845 846 phydev->adjust_link(phydev->attached_dev); 847 848 if (phy_interrupt_is_valid(phydev)) 849 err = phy_config_interrupt(phydev, 850 PHY_INTERRUPT_ENABLED); 851 break; 852 case PHY_HALTED: 853 if (phydev->link) { 854 phydev->link = 0; 855 netif_carrier_off(phydev->attached_dev); 856 phydev->adjust_link(phydev->attached_dev); 857 } 858 break; 859 case PHY_RESUMING: 860 861 err = phy_clear_interrupt(phydev); 862 863 if (err) 864 break; 865 866 err = phy_config_interrupt(phydev, 867 PHY_INTERRUPT_ENABLED); 868 869 if (err) 870 break; 871 872 if (AUTONEG_ENABLE == phydev->autoneg) { 873 err = phy_aneg_done(phydev); 874 if (err < 0) 875 break; 876 877 /* err > 0 if AN is done. 878 * Otherwise, it's 0, and we're 879 * still waiting for AN */ 880 if (err > 0) { 881 err = phy_read_status(phydev); 882 if (err) 883 break; 884 885 if (phydev->link) { 886 phydev->state = PHY_RUNNING; 887 netif_carrier_on(phydev->attached_dev); 888 } else 889 phydev->state = PHY_NOLINK; 890 phydev->adjust_link(phydev->attached_dev); 891 } else { 892 phydev->state = PHY_AN; 893 phydev->link_timeout = PHY_AN_TIMEOUT; 894 } 895 } else { 896 err = phy_read_status(phydev); 897 if (err) 898 break; 899 900 if (phydev->link) { 901 phydev->state = PHY_RUNNING; 902 netif_carrier_on(phydev->attached_dev); 903 } else 904 phydev->state = PHY_NOLINK; 905 phydev->adjust_link(phydev->attached_dev); 906 } 907 break; 908 } 909 910 mutex_unlock(&phydev->lock); 911 912 if (needs_aneg) 913 err = phy_start_aneg(phydev); 914 915 if (err < 0) 916 phy_error(phydev); 917 918 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 919 PHY_STATE_TIME * HZ); 920 } 921 922 void phy_mac_interrupt(struct phy_device *phydev, int new_link) 923 { 924 cancel_work_sync(&phydev->phy_queue); 925 phydev->link = new_link; 926 schedule_work(&phydev->phy_queue); 927 } 928 EXPORT_SYMBOL(phy_mac_interrupt); 929 930 static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad, 931 int addr) 932 { 933 /* Write the desired MMD Devad */ 934 bus->write(bus, addr, MII_MMD_CTRL, devad); 935 936 /* Write the desired MMD register address */ 937 bus->write(bus, addr, MII_MMD_DATA, prtad); 938 939 /* Select the Function : DATA with no post increment */ 940 bus->write(bus, addr, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR)); 941 } 942 943 /** 944 * phy_read_mmd_indirect - reads data from the MMD registers 945 * @bus: the target MII bus 946 * @prtad: MMD Address 947 * @devad: MMD DEVAD 948 * @addr: PHY address on the MII bus 949 * 950 * Description: it reads data from the MMD registers (clause 22 to access to 951 * clause 45) of the specified phy address. 952 * To read these register we have: 953 * 1) Write reg 13 // DEVAD 954 * 2) Write reg 14 // MMD Address 955 * 3) Write reg 13 // MMD Data Command for MMD DEVAD 956 * 3) Read reg 14 // Read MMD data 957 */ 958 static int phy_read_mmd_indirect(struct mii_bus *bus, int prtad, int devad, 959 int addr) 960 { 961 u32 ret; 962 963 mmd_phy_indirect(bus, prtad, devad, addr); 964 965 /* Read the content of the MMD's selected register */ 966 ret = bus->read(bus, addr, MII_MMD_DATA); 967 968 return ret; 969 } 970 971 /** 972 * phy_write_mmd_indirect - writes data to the MMD registers 973 * @bus: the target MII bus 974 * @prtad: MMD Address 975 * @devad: MMD DEVAD 976 * @addr: PHY address on the MII bus 977 * @data: data to write in the MMD register 978 * 979 * Description: Write data from the MMD registers of the specified 980 * phy address. 981 * To write these register we have: 982 * 1) Write reg 13 // DEVAD 983 * 2) Write reg 14 // MMD Address 984 * 3) Write reg 13 // MMD Data Command for MMD DEVAD 985 * 3) Write reg 14 // Write MMD data 986 */ 987 static void phy_write_mmd_indirect(struct mii_bus *bus, int prtad, int devad, 988 int addr, u32 data) 989 { 990 mmd_phy_indirect(bus, prtad, devad, addr); 991 992 /* Write the data into MMD's selected register */ 993 bus->write(bus, addr, MII_MMD_DATA, data); 994 } 995 996 /** 997 * phy_init_eee - init and check the EEE feature 998 * @phydev: target phy_device struct 999 * @clk_stop_enable: PHY may stop the clock during LPI 1000 * 1001 * Description: it checks if the Energy-Efficient Ethernet (EEE) 1002 * is supported by looking at the MMD registers 3.20 and 7.60/61 1003 * and it programs the MMD register 3.0 setting the "Clock stop enable" 1004 * bit if required. 1005 */ 1006 int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) 1007 { 1008 int ret = -EPROTONOSUPPORT; 1009 1010 /* According to 802.3az,the EEE is supported only in full duplex-mode. 1011 * Also EEE feature is active when core is operating with MII, GMII 1012 * or RGMII. 1013 */ 1014 if ((phydev->duplex == DUPLEX_FULL) && 1015 ((phydev->interface == PHY_INTERFACE_MODE_MII) || 1016 (phydev->interface == PHY_INTERFACE_MODE_GMII) || 1017 (phydev->interface == PHY_INTERFACE_MODE_RGMII))) { 1018 int eee_lp, eee_cap, eee_adv; 1019 u32 lp, cap, adv; 1020 int idx, status; 1021 1022 /* Read phy status to properly get the right settings */ 1023 status = phy_read_status(phydev); 1024 if (status) 1025 return status; 1026 1027 /* First check if the EEE ability is supported */ 1028 eee_cap = phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_ABLE, 1029 MDIO_MMD_PCS, phydev->addr); 1030 if (eee_cap < 0) 1031 return eee_cap; 1032 1033 cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap); 1034 if (!cap) 1035 goto eee_exit; 1036 1037 /* Check which link settings negotiated and verify it in 1038 * the EEE advertising registers. 1039 */ 1040 eee_lp = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE, 1041 MDIO_MMD_AN, phydev->addr); 1042 if (eee_lp < 0) 1043 return eee_lp; 1044 1045 eee_adv = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, 1046 MDIO_MMD_AN, phydev->addr); 1047 if (eee_adv < 0) 1048 return eee_adv; 1049 1050 adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); 1051 lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); 1052 idx = phy_find_setting(phydev->speed, phydev->duplex); 1053 if (!(lp & adv & settings[idx].setting)) 1054 goto eee_exit; 1055 1056 if (clk_stop_enable) { 1057 /* Configure the PHY to stop receiving xMII 1058 * clock while it is signaling LPI. 1059 */ 1060 int val = phy_read_mmd_indirect(phydev->bus, MDIO_CTRL1, 1061 MDIO_MMD_PCS, 1062 phydev->addr); 1063 if (val < 0) 1064 return val; 1065 1066 val |= MDIO_PCS_CTRL1_CLKSTOP_EN; 1067 phy_write_mmd_indirect(phydev->bus, MDIO_CTRL1, 1068 MDIO_MMD_PCS, phydev->addr, val); 1069 } 1070 1071 ret = 0; /* EEE supported */ 1072 } 1073 1074 eee_exit: 1075 return ret; 1076 } 1077 EXPORT_SYMBOL(phy_init_eee); 1078 1079 /** 1080 * phy_get_eee_err - report the EEE wake error count 1081 * @phydev: target phy_device struct 1082 * 1083 * Description: it is to report the number of time where the PHY 1084 * failed to complete its normal wake sequence. 1085 */ 1086 int phy_get_eee_err(struct phy_device *phydev) 1087 { 1088 return phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_WK_ERR, 1089 MDIO_MMD_PCS, phydev->addr); 1090 1091 } 1092 EXPORT_SYMBOL(phy_get_eee_err); 1093 1094 /** 1095 * phy_ethtool_get_eee - get EEE supported and status 1096 * @phydev: target phy_device struct 1097 * @data: ethtool_eee data 1098 * 1099 * Description: it reportes the Supported/Advertisement/LP Advertisement 1100 * capabilities. 1101 */ 1102 int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data) 1103 { 1104 int val; 1105 1106 /* Get Supported EEE */ 1107 val = phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_ABLE, 1108 MDIO_MMD_PCS, phydev->addr); 1109 if (val < 0) 1110 return val; 1111 data->supported = mmd_eee_cap_to_ethtool_sup_t(val); 1112 1113 /* Get advertisement EEE */ 1114 val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, 1115 MDIO_MMD_AN, phydev->addr); 1116 if (val < 0) 1117 return val; 1118 data->advertised = mmd_eee_adv_to_ethtool_adv_t(val); 1119 1120 /* Get LP advertisement EEE */ 1121 val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE, 1122 MDIO_MMD_AN, phydev->addr); 1123 if (val < 0) 1124 return val; 1125 data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); 1126 1127 return 0; 1128 } 1129 EXPORT_SYMBOL(phy_ethtool_get_eee); 1130 1131 /** 1132 * phy_ethtool_set_eee - set EEE supported and status 1133 * @phydev: target phy_device struct 1134 * @data: ethtool_eee data 1135 * 1136 * Description: it is to program the Advertisement EEE register. 1137 */ 1138 int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data) 1139 { 1140 int val; 1141 1142 val = ethtool_adv_to_mmd_eee_adv_t(data->advertised); 1143 phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN, 1144 phydev->addr, val); 1145 1146 return 0; 1147 } 1148 EXPORT_SYMBOL(phy_ethtool_set_eee); 1149 1150 int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) 1151 { 1152 if (phydev->drv->set_wol) 1153 return phydev->drv->set_wol(phydev, wol); 1154 1155 return -EOPNOTSUPP; 1156 } 1157 EXPORT_SYMBOL(phy_ethtool_set_wol); 1158 1159 void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) 1160 { 1161 if (phydev->drv->get_wol) 1162 phydev->drv->get_wol(phydev, wol); 1163 } 1164 EXPORT_SYMBOL(phy_ethtool_get_wol); 1165