1 /* 2 * drivers/net/phy/phy.c 3 * 4 * Framework for configuring and reading PHY devices 5 * Based on code in sungem_phy.c and gianfar_phy.c 6 * 7 * Author: Andy Fleming 8 * 9 * Copyright (c) 2004 Freescale Semiconductor, Inc. 10 * Copyright (c) 2006 Maciej W. Rozycki 11 * 12 * This program is free software; you can redistribute it and/or modify it 13 * under the terms of the GNU General Public License as published by the 14 * Free Software Foundation; either version 2 of the License, or (at your 15 * option) any later version. 16 * 17 */ 18 #include <linux/kernel.h> 19 #include <linux/sched.h> 20 #include <linux/string.h> 21 #include <linux/errno.h> 22 #include <linux/unistd.h> 23 #include <linux/slab.h> 24 #include <linux/interrupt.h> 25 #include <linux/init.h> 26 #include <linux/delay.h> 27 #include <linux/netdevice.h> 28 #include <linux/etherdevice.h> 29 #include <linux/skbuff.h> 30 #include <linux/spinlock.h> 31 #include <linux/mm.h> 32 #include <linux/module.h> 33 #include <linux/mii.h> 34 #include <linux/ethtool.h> 35 #include <linux/phy.h> 36 #include <linux/timer.h> 37 #include <linux/workqueue.h> 38 39 #include <asm/io.h> 40 #include <asm/irq.h> 41 #include <asm/uaccess.h> 42 43 /* Convenience function to print out the current phy status 44 */ 45 void phy_print_status(struct phy_device *phydev) 46 { 47 pr_info("PHY: %s - Link is %s", phydev->dev.bus_id, 48 phydev->link ? "Up" : "Down"); 49 if (phydev->link) 50 printk(" - %d/%s", phydev->speed, 51 DUPLEX_FULL == phydev->duplex ? 52 "Full" : "Half"); 53 54 printk("\n"); 55 } 56 EXPORT_SYMBOL(phy_print_status); 57 58 59 /* Convenience functions for reading/writing a given PHY 60 * register. They MUST NOT be called from interrupt context, 61 * because the bus read/write functions may wait for an interrupt 62 * to conclude the operation. */ 63 int phy_read(struct phy_device *phydev, u16 regnum) 64 { 65 int retval; 66 struct mii_bus *bus = phydev->bus; 67 68 spin_lock_bh(&bus->mdio_lock); 69 retval = bus->read(bus, phydev->addr, regnum); 70 spin_unlock_bh(&bus->mdio_lock); 71 72 return retval; 73 } 74 EXPORT_SYMBOL(phy_read); 75 76 int phy_write(struct phy_device *phydev, u16 regnum, u16 val) 77 { 78 int err; 79 struct mii_bus *bus = phydev->bus; 80 81 spin_lock_bh(&bus->mdio_lock); 82 err = bus->write(bus, phydev->addr, regnum, val); 83 spin_unlock_bh(&bus->mdio_lock); 84 85 return err; 86 } 87 EXPORT_SYMBOL(phy_write); 88 89 90 int phy_clear_interrupt(struct phy_device *phydev) 91 { 92 int err = 0; 93 94 if (phydev->drv->ack_interrupt) 95 err = phydev->drv->ack_interrupt(phydev); 96 97 return err; 98 } 99 100 101 int phy_config_interrupt(struct phy_device *phydev, u32 interrupts) 102 { 103 int err = 0; 104 105 phydev->interrupts = interrupts; 106 if (phydev->drv->config_intr) 107 err = phydev->drv->config_intr(phydev); 108 109 return err; 110 } 111 112 113 /* phy_aneg_done 114 * 115 * description: Reads the status register and returns 0 either if 116 * auto-negotiation is incomplete, or if there was an error. 117 * Returns BMSR_ANEGCOMPLETE if auto-negotiation is done. 118 */ 119 static inline int phy_aneg_done(struct phy_device *phydev) 120 { 121 int retval; 122 123 retval = phy_read(phydev, MII_BMSR); 124 125 return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE); 126 } 127 128 /* A structure for mapping a particular speed and duplex 129 * combination to a particular SUPPORTED and ADVERTISED value */ 130 struct phy_setting { 131 int speed; 132 int duplex; 133 u32 setting; 134 }; 135 136 /* A mapping of all SUPPORTED settings to speed/duplex */ 137 static const struct phy_setting settings[] = { 138 { 139 .speed = 10000, 140 .duplex = DUPLEX_FULL, 141 .setting = SUPPORTED_10000baseT_Full, 142 }, 143 { 144 .speed = SPEED_1000, 145 .duplex = DUPLEX_FULL, 146 .setting = SUPPORTED_1000baseT_Full, 147 }, 148 { 149 .speed = SPEED_1000, 150 .duplex = DUPLEX_HALF, 151 .setting = SUPPORTED_1000baseT_Half, 152 }, 153 { 154 .speed = SPEED_100, 155 .duplex = DUPLEX_FULL, 156 .setting = SUPPORTED_100baseT_Full, 157 }, 158 { 159 .speed = SPEED_100, 160 .duplex = DUPLEX_HALF, 161 .setting = SUPPORTED_100baseT_Half, 162 }, 163 { 164 .speed = SPEED_10, 165 .duplex = DUPLEX_FULL, 166 .setting = SUPPORTED_10baseT_Full, 167 }, 168 { 169 .speed = SPEED_10, 170 .duplex = DUPLEX_HALF, 171 .setting = SUPPORTED_10baseT_Half, 172 }, 173 }; 174 175 #define MAX_NUM_SETTINGS (sizeof(settings)/sizeof(struct phy_setting)) 176 177 /* phy_find_setting 178 * 179 * description: Searches the settings array for the setting which 180 * matches the desired speed and duplex, and returns the index 181 * of that setting. Returns the index of the last setting if 182 * none of the others match. 183 */ 184 static inline int phy_find_setting(int speed, int duplex) 185 { 186 int idx = 0; 187 188 while (idx < ARRAY_SIZE(settings) && 189 (settings[idx].speed != speed || 190 settings[idx].duplex != duplex)) 191 idx++; 192 193 return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1; 194 } 195 196 /* phy_find_valid 197 * idx: The first index in settings[] to search 198 * features: A mask of the valid settings 199 * 200 * description: Returns the index of the first valid setting less 201 * than or equal to the one pointed to by idx, as determined by 202 * the mask in features. Returns the index of the last setting 203 * if nothing else matches. 204 */ 205 static inline int phy_find_valid(int idx, u32 features) 206 { 207 while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features)) 208 idx++; 209 210 return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1; 211 } 212 213 /* phy_sanitize_settings 214 * 215 * description: Make sure the PHY is set to supported speeds and 216 * duplexes. Drop down by one in this order: 1000/FULL, 217 * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF 218 */ 219 void phy_sanitize_settings(struct phy_device *phydev) 220 { 221 u32 features = phydev->supported; 222 int idx; 223 224 /* Sanitize settings based on PHY capabilities */ 225 if ((features & SUPPORTED_Autoneg) == 0) 226 phydev->autoneg = 0; 227 228 idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex), 229 features); 230 231 phydev->speed = settings[idx].speed; 232 phydev->duplex = settings[idx].duplex; 233 } 234 EXPORT_SYMBOL(phy_sanitize_settings); 235 236 /* phy_ethtool_sset: 237 * A generic ethtool sset function. Handles all the details 238 * 239 * A few notes about parameter checking: 240 * - We don't set port or transceiver, so we don't care what they 241 * were set to. 242 * - phy_start_aneg() will make sure forced settings are sane, and 243 * choose the next best ones from the ones selected, so we don't 244 * care if ethtool tries to give us bad values 245 * 246 */ 247 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd) 248 { 249 if (cmd->phy_address != phydev->addr) 250 return -EINVAL; 251 252 /* We make sure that we don't pass unsupported 253 * values in to the PHY */ 254 cmd->advertising &= phydev->supported; 255 256 /* Verify the settings we care about. */ 257 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE) 258 return -EINVAL; 259 260 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0) 261 return -EINVAL; 262 263 if (cmd->autoneg == AUTONEG_DISABLE 264 && ((cmd->speed != SPEED_1000 265 && cmd->speed != SPEED_100 266 && cmd->speed != SPEED_10) 267 || (cmd->duplex != DUPLEX_HALF 268 && cmd->duplex != DUPLEX_FULL))) 269 return -EINVAL; 270 271 phydev->autoneg = cmd->autoneg; 272 273 phydev->speed = cmd->speed; 274 275 phydev->advertising = cmd->advertising; 276 277 if (AUTONEG_ENABLE == cmd->autoneg) 278 phydev->advertising |= ADVERTISED_Autoneg; 279 else 280 phydev->advertising &= ~ADVERTISED_Autoneg; 281 282 phydev->duplex = cmd->duplex; 283 284 /* Restart the PHY */ 285 phy_start_aneg(phydev); 286 287 return 0; 288 } 289 EXPORT_SYMBOL(phy_ethtool_sset); 290 291 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd) 292 { 293 cmd->supported = phydev->supported; 294 295 cmd->advertising = phydev->advertising; 296 297 cmd->speed = phydev->speed; 298 cmd->duplex = phydev->duplex; 299 cmd->port = PORT_MII; 300 cmd->phy_address = phydev->addr; 301 cmd->transceiver = XCVR_EXTERNAL; 302 cmd->autoneg = phydev->autoneg; 303 304 return 0; 305 } 306 EXPORT_SYMBOL(phy_ethtool_gset); 307 308 /* Note that this function is currently incompatible with the 309 * PHYCONTROL layer. It changes registers without regard to 310 * current state. Use at own risk 311 */ 312 int phy_mii_ioctl(struct phy_device *phydev, 313 struct mii_ioctl_data *mii_data, int cmd) 314 { 315 u16 val = mii_data->val_in; 316 317 switch (cmd) { 318 case SIOCGMIIPHY: 319 mii_data->phy_id = phydev->addr; 320 break; 321 case SIOCGMIIREG: 322 mii_data->val_out = phy_read(phydev, mii_data->reg_num); 323 break; 324 325 case SIOCSMIIREG: 326 if (!capable(CAP_NET_ADMIN)) 327 return -EPERM; 328 329 if (mii_data->phy_id == phydev->addr) { 330 switch(mii_data->reg_num) { 331 case MII_BMCR: 332 if (val & (BMCR_RESET|BMCR_ANENABLE)) 333 phydev->autoneg = AUTONEG_DISABLE; 334 else 335 phydev->autoneg = AUTONEG_ENABLE; 336 if ((!phydev->autoneg) && (val & BMCR_FULLDPLX)) 337 phydev->duplex = DUPLEX_FULL; 338 else 339 phydev->duplex = DUPLEX_HALF; 340 break; 341 case MII_ADVERTISE: 342 phydev->advertising = val; 343 break; 344 default: 345 /* do nothing */ 346 break; 347 } 348 } 349 350 phy_write(phydev, mii_data->reg_num, val); 351 352 if (mii_data->reg_num == MII_BMCR 353 && val & BMCR_RESET 354 && phydev->drv->config_init) 355 phydev->drv->config_init(phydev); 356 break; 357 } 358 359 return 0; 360 } 361 362 /* phy_start_aneg 363 * 364 * description: Sanitizes the settings (if we're not 365 * autonegotiating them), and then calls the driver's 366 * config_aneg function. If the PHYCONTROL Layer is operating, 367 * we change the state to reflect the beginning of 368 * Auto-negotiation or forcing. 369 */ 370 int phy_start_aneg(struct phy_device *phydev) 371 { 372 int err; 373 374 spin_lock(&phydev->lock); 375 376 if (AUTONEG_DISABLE == phydev->autoneg) 377 phy_sanitize_settings(phydev); 378 379 err = phydev->drv->config_aneg(phydev); 380 381 if (err < 0) 382 goto out_unlock; 383 384 if (phydev->state != PHY_HALTED) { 385 if (AUTONEG_ENABLE == phydev->autoneg) { 386 phydev->state = PHY_AN; 387 phydev->link_timeout = PHY_AN_TIMEOUT; 388 } else { 389 phydev->state = PHY_FORCING; 390 phydev->link_timeout = PHY_FORCE_TIMEOUT; 391 } 392 } 393 394 out_unlock: 395 spin_unlock(&phydev->lock); 396 return err; 397 } 398 EXPORT_SYMBOL(phy_start_aneg); 399 400 401 static void phy_change(struct work_struct *work); 402 static void phy_timer(unsigned long data); 403 404 /* phy_start_machine: 405 * 406 * description: The PHY infrastructure can run a state machine 407 * which tracks whether the PHY is starting up, negotiating, 408 * etc. This function starts the timer which tracks the state 409 * of the PHY. If you want to be notified when the state 410 * changes, pass in the callback, otherwise, pass NULL. If you 411 * want to maintain your own state machine, do not call this 412 * function. */ 413 void phy_start_machine(struct phy_device *phydev, 414 void (*handler)(struct net_device *)) 415 { 416 phydev->adjust_state = handler; 417 418 init_timer(&phydev->phy_timer); 419 phydev->phy_timer.function = &phy_timer; 420 phydev->phy_timer.data = (unsigned long) phydev; 421 mod_timer(&phydev->phy_timer, jiffies + HZ); 422 } 423 424 /* phy_stop_machine 425 * 426 * description: Stops the state machine timer, sets the state to UP 427 * (unless it wasn't up yet). This function must be called BEFORE 428 * phy_detach. 429 */ 430 void phy_stop_machine(struct phy_device *phydev) 431 { 432 del_timer_sync(&phydev->phy_timer); 433 434 spin_lock(&phydev->lock); 435 if (phydev->state > PHY_UP) 436 phydev->state = PHY_UP; 437 spin_unlock(&phydev->lock); 438 439 phydev->adjust_state = NULL; 440 } 441 442 /* phy_force_reduction 443 * 444 * description: Reduces the speed/duplex settings by 445 * one notch. The order is so: 446 * 1000/FULL, 1000/HALF, 100/FULL, 100/HALF, 447 * 10/FULL, 10/HALF. The function bottoms out at 10/HALF. 448 */ 449 static void phy_force_reduction(struct phy_device *phydev) 450 { 451 int idx; 452 453 idx = phy_find_setting(phydev->speed, phydev->duplex); 454 455 idx++; 456 457 idx = phy_find_valid(idx, phydev->supported); 458 459 phydev->speed = settings[idx].speed; 460 phydev->duplex = settings[idx].duplex; 461 462 pr_info("Trying %d/%s\n", phydev->speed, 463 DUPLEX_FULL == phydev->duplex ? 464 "FULL" : "HALF"); 465 } 466 467 468 /* phy_error: 469 * 470 * Moves the PHY to the HALTED state in response to a read 471 * or write error, and tells the controller the link is down. 472 * Must not be called from interrupt context, or while the 473 * phydev->lock is held. 474 */ 475 void phy_error(struct phy_device *phydev) 476 { 477 spin_lock(&phydev->lock); 478 phydev->state = PHY_HALTED; 479 spin_unlock(&phydev->lock); 480 } 481 482 /* phy_interrupt 483 * 484 * description: When a PHY interrupt occurs, the handler disables 485 * interrupts, and schedules a work task to clear the interrupt. 486 */ 487 static irqreturn_t phy_interrupt(int irq, void *phy_dat) 488 { 489 struct phy_device *phydev = phy_dat; 490 491 if (PHY_HALTED == phydev->state) 492 return IRQ_NONE; /* It can't be ours. */ 493 494 /* The MDIO bus is not allowed to be written in interrupt 495 * context, so we need to disable the irq here. A work 496 * queue will write the PHY to disable and clear the 497 * interrupt, and then reenable the irq line. */ 498 disable_irq_nosync(irq); 499 500 schedule_work(&phydev->phy_queue); 501 502 return IRQ_HANDLED; 503 } 504 505 /* Enable the interrupts from the PHY side */ 506 int phy_enable_interrupts(struct phy_device *phydev) 507 { 508 int err; 509 510 err = phy_clear_interrupt(phydev); 511 512 if (err < 0) 513 return err; 514 515 err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); 516 517 return err; 518 } 519 EXPORT_SYMBOL(phy_enable_interrupts); 520 521 /* Disable the PHY interrupts from the PHY side */ 522 int phy_disable_interrupts(struct phy_device *phydev) 523 { 524 int err; 525 526 /* Disable PHY interrupts */ 527 err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); 528 529 if (err) 530 goto phy_err; 531 532 /* Clear the interrupt */ 533 err = phy_clear_interrupt(phydev); 534 535 if (err) 536 goto phy_err; 537 538 return 0; 539 540 phy_err: 541 phy_error(phydev); 542 543 return err; 544 } 545 EXPORT_SYMBOL(phy_disable_interrupts); 546 547 /* phy_start_interrupts 548 * 549 * description: Request the interrupt for the given PHY. If 550 * this fails, then we set irq to PHY_POLL. 551 * Otherwise, we enable the interrupts in the PHY. 552 * Returns 0 on success. 553 * This should only be called with a valid IRQ number. 554 */ 555 int phy_start_interrupts(struct phy_device *phydev) 556 { 557 int err = 0; 558 559 INIT_WORK(&phydev->phy_queue, phy_change); 560 561 if (request_irq(phydev->irq, phy_interrupt, 562 IRQF_SHARED, 563 "phy_interrupt", 564 phydev) < 0) { 565 printk(KERN_WARNING "%s: Can't get IRQ %d (PHY)\n", 566 phydev->bus->name, 567 phydev->irq); 568 phydev->irq = PHY_POLL; 569 return 0; 570 } 571 572 err = phy_enable_interrupts(phydev); 573 574 return err; 575 } 576 EXPORT_SYMBOL(phy_start_interrupts); 577 578 int phy_stop_interrupts(struct phy_device *phydev) 579 { 580 int err; 581 582 err = phy_disable_interrupts(phydev); 583 584 if (err) 585 phy_error(phydev); 586 587 /* 588 * Finish any pending work; we might have been scheduled 589 * to be called from keventd ourselves, though. 590 */ 591 run_scheduled_work(&phydev->phy_queue); 592 593 free_irq(phydev->irq, phydev); 594 595 return err; 596 } 597 EXPORT_SYMBOL(phy_stop_interrupts); 598 599 600 /* Scheduled by the phy_interrupt/timer to handle PHY changes */ 601 static void phy_change(struct work_struct *work) 602 { 603 int err; 604 struct phy_device *phydev = 605 container_of(work, struct phy_device, phy_queue); 606 607 err = phy_disable_interrupts(phydev); 608 609 if (err) 610 goto phy_err; 611 612 spin_lock(&phydev->lock); 613 if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) 614 phydev->state = PHY_CHANGELINK; 615 spin_unlock(&phydev->lock); 616 617 enable_irq(phydev->irq); 618 619 /* Reenable interrupts */ 620 if (PHY_HALTED != phydev->state) 621 err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); 622 623 if (err) 624 goto irq_enable_err; 625 626 return; 627 628 irq_enable_err: 629 disable_irq(phydev->irq); 630 phy_err: 631 phy_error(phydev); 632 } 633 634 /* Bring down the PHY link, and stop checking the status. */ 635 void phy_stop(struct phy_device *phydev) 636 { 637 spin_lock(&phydev->lock); 638 639 if (PHY_HALTED == phydev->state) 640 goto out_unlock; 641 642 phydev->state = PHY_HALTED; 643 644 if (phydev->irq != PHY_POLL) { 645 /* Disable PHY Interrupts */ 646 phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); 647 648 /* Clear any pending interrupts */ 649 phy_clear_interrupt(phydev); 650 } 651 652 out_unlock: 653 spin_unlock(&phydev->lock); 654 655 /* 656 * Cannot call flush_scheduled_work() here as desired because 657 * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change() 658 * will not reenable interrupts. 659 */ 660 } 661 662 663 /* phy_start 664 * 665 * description: Indicates the attached device's readiness to 666 * handle PHY-related work. Used during startup to start the 667 * PHY, and after a call to phy_stop() to resume operation. 668 * Also used to indicate the MDIO bus has cleared an error 669 * condition. 670 */ 671 void phy_start(struct phy_device *phydev) 672 { 673 spin_lock(&phydev->lock); 674 675 switch (phydev->state) { 676 case PHY_STARTING: 677 phydev->state = PHY_PENDING; 678 break; 679 case PHY_READY: 680 phydev->state = PHY_UP; 681 break; 682 case PHY_HALTED: 683 phydev->state = PHY_RESUMING; 684 default: 685 break; 686 } 687 spin_unlock(&phydev->lock); 688 } 689 EXPORT_SYMBOL(phy_stop); 690 EXPORT_SYMBOL(phy_start); 691 692 /* PHY timer which handles the state machine */ 693 static void phy_timer(unsigned long data) 694 { 695 struct phy_device *phydev = (struct phy_device *)data; 696 int needs_aneg = 0; 697 int err = 0; 698 699 spin_lock(&phydev->lock); 700 701 if (phydev->adjust_state) 702 phydev->adjust_state(phydev->attached_dev); 703 704 switch(phydev->state) { 705 case PHY_DOWN: 706 case PHY_STARTING: 707 case PHY_READY: 708 case PHY_PENDING: 709 break; 710 case PHY_UP: 711 needs_aneg = 1; 712 713 phydev->link_timeout = PHY_AN_TIMEOUT; 714 715 break; 716 case PHY_AN: 717 err = phy_read_status(phydev); 718 719 if (err < 0) 720 break; 721 722 /* If the link is down, give up on 723 * negotiation for now */ 724 if (!phydev->link) { 725 phydev->state = PHY_NOLINK; 726 netif_carrier_off(phydev->attached_dev); 727 phydev->adjust_link(phydev->attached_dev); 728 break; 729 } 730 731 /* Check if negotiation is done. Break 732 * if there's an error */ 733 err = phy_aneg_done(phydev); 734 if (err < 0) 735 break; 736 737 /* If AN is done, we're running */ 738 if (err > 0) { 739 phydev->state = PHY_RUNNING; 740 netif_carrier_on(phydev->attached_dev); 741 phydev->adjust_link(phydev->attached_dev); 742 743 } else if (0 == phydev->link_timeout--) { 744 int idx; 745 746 needs_aneg = 1; 747 /* If we have the magic_aneg bit, 748 * we try again */ 749 if (phydev->drv->flags & PHY_HAS_MAGICANEG) 750 break; 751 752 /* The timer expired, and we still 753 * don't have a setting, so we try 754 * forcing it until we find one that 755 * works, starting from the fastest speed, 756 * and working our way down */ 757 idx = phy_find_valid(0, phydev->supported); 758 759 phydev->speed = settings[idx].speed; 760 phydev->duplex = settings[idx].duplex; 761 762 phydev->autoneg = AUTONEG_DISABLE; 763 764 pr_info("Trying %d/%s\n", phydev->speed, 765 DUPLEX_FULL == 766 phydev->duplex ? 767 "FULL" : "HALF"); 768 } 769 break; 770 case PHY_NOLINK: 771 err = phy_read_status(phydev); 772 773 if (err) 774 break; 775 776 if (phydev->link) { 777 phydev->state = PHY_RUNNING; 778 netif_carrier_on(phydev->attached_dev); 779 phydev->adjust_link(phydev->attached_dev); 780 } 781 break; 782 case PHY_FORCING: 783 err = genphy_update_link(phydev); 784 785 if (err) 786 break; 787 788 if (phydev->link) { 789 phydev->state = PHY_RUNNING; 790 netif_carrier_on(phydev->attached_dev); 791 } else { 792 if (0 == phydev->link_timeout--) { 793 phy_force_reduction(phydev); 794 needs_aneg = 1; 795 } 796 } 797 798 phydev->adjust_link(phydev->attached_dev); 799 break; 800 case PHY_RUNNING: 801 /* Only register a CHANGE if we are 802 * polling */ 803 if (PHY_POLL == phydev->irq) 804 phydev->state = PHY_CHANGELINK; 805 break; 806 case PHY_CHANGELINK: 807 err = phy_read_status(phydev); 808 809 if (err) 810 break; 811 812 if (phydev->link) { 813 phydev->state = PHY_RUNNING; 814 netif_carrier_on(phydev->attached_dev); 815 } else { 816 phydev->state = PHY_NOLINK; 817 netif_carrier_off(phydev->attached_dev); 818 } 819 820 phydev->adjust_link(phydev->attached_dev); 821 822 if (PHY_POLL != phydev->irq) 823 err = phy_config_interrupt(phydev, 824 PHY_INTERRUPT_ENABLED); 825 break; 826 case PHY_HALTED: 827 if (phydev->link) { 828 phydev->link = 0; 829 netif_carrier_off(phydev->attached_dev); 830 phydev->adjust_link(phydev->attached_dev); 831 } 832 break; 833 case PHY_RESUMING: 834 835 err = phy_clear_interrupt(phydev); 836 837 if (err) 838 break; 839 840 err = phy_config_interrupt(phydev, 841 PHY_INTERRUPT_ENABLED); 842 843 if (err) 844 break; 845 846 if (AUTONEG_ENABLE == phydev->autoneg) { 847 err = phy_aneg_done(phydev); 848 if (err < 0) 849 break; 850 851 /* err > 0 if AN is done. 852 * Otherwise, it's 0, and we're 853 * still waiting for AN */ 854 if (err > 0) { 855 phydev->state = PHY_RUNNING; 856 } else { 857 phydev->state = PHY_AN; 858 phydev->link_timeout = PHY_AN_TIMEOUT; 859 } 860 } else 861 phydev->state = PHY_RUNNING; 862 break; 863 } 864 865 spin_unlock(&phydev->lock); 866 867 if (needs_aneg) 868 err = phy_start_aneg(phydev); 869 870 if (err < 0) 871 phy_error(phydev); 872 873 mod_timer(&phydev->phy_timer, jiffies + PHY_STATE_TIME * HZ); 874 } 875 876