1 /* 2 * drivers/net/phy/phy.c 3 * 4 * Framework for configuring and reading PHY devices 5 * Based on code in sungem_phy.c and gianfar_phy.c 6 * 7 * Author: Andy Fleming 8 * 9 * Copyright (c) 2004 Freescale Semiconductor, Inc. 10 * Copyright (c) 2006, 2007 Maciej W. Rozycki 11 * 12 * This program is free software; you can redistribute it and/or modify it 13 * under the terms of the GNU General Public License as published by the 14 * Free Software Foundation; either version 2 of the License, or (at your 15 * option) any later version. 16 * 17 */ 18 #include <linux/kernel.h> 19 #include <linux/string.h> 20 #include <linux/errno.h> 21 #include <linux/unistd.h> 22 #include <linux/slab.h> 23 #include <linux/interrupt.h> 24 #include <linux/init.h> 25 #include <linux/delay.h> 26 #include <linux/netdevice.h> 27 #include <linux/etherdevice.h> 28 #include <linux/skbuff.h> 29 #include <linux/spinlock.h> 30 #include <linux/mm.h> 31 #include <linux/module.h> 32 #include <linux/mii.h> 33 #include <linux/ethtool.h> 34 #include <linux/phy.h> 35 #include <linux/timer.h> 36 #include <linux/workqueue.h> 37 38 #include <asm/atomic.h> 39 #include <asm/io.h> 40 #include <asm/irq.h> 41 #include <asm/uaccess.h> 42 43 /** 44 * phy_print_status - Convenience function to print out the current phy status 45 * @phydev: the phy_device struct 46 */ 47 void phy_print_status(struct phy_device *phydev) 48 { 49 pr_info("PHY: %s - Link is %s", phydev->dev.bus_id, 50 phydev->link ? "Up" : "Down"); 51 if (phydev->link) 52 printk(" - %d/%s", phydev->speed, 53 DUPLEX_FULL == phydev->duplex ? 54 "Full" : "Half"); 55 56 printk("\n"); 57 } 58 EXPORT_SYMBOL(phy_print_status); 59 60 61 /** 62 * phy_read - Convenience function for reading a given PHY register 63 * @phydev: the phy_device struct 64 * @regnum: register number to read 65 * 66 * NOTE: MUST NOT be called from interrupt context, 67 * because the bus read/write functions may wait for an interrupt 68 * to conclude the operation. 69 */ 70 int phy_read(struct phy_device *phydev, u16 regnum) 71 { 72 int retval; 73 struct mii_bus *bus = phydev->bus; 74 75 spin_lock_bh(&bus->mdio_lock); 76 retval = bus->read(bus, phydev->addr, regnum); 77 spin_unlock_bh(&bus->mdio_lock); 78 79 return retval; 80 } 81 EXPORT_SYMBOL(phy_read); 82 83 /** 84 * phy_write - Convenience function for writing a given PHY register 85 * @phydev: the phy_device struct 86 * @regnum: register number to write 87 * @val: value to write to @regnum 88 * 89 * NOTE: MUST NOT be called from interrupt context, 90 * because the bus read/write functions may wait for an interrupt 91 * to conclude the operation. 92 */ 93 int phy_write(struct phy_device *phydev, u16 regnum, u16 val) 94 { 95 int err; 96 struct mii_bus *bus = phydev->bus; 97 98 spin_lock_bh(&bus->mdio_lock); 99 err = bus->write(bus, phydev->addr, regnum, val); 100 spin_unlock_bh(&bus->mdio_lock); 101 102 return err; 103 } 104 EXPORT_SYMBOL(phy_write); 105 106 /** 107 * phy_clear_interrupt - Ack the phy device's interrupt 108 * @phydev: the phy_device struct 109 * 110 * If the @phydev driver has an ack_interrupt function, call it to 111 * ack and clear the phy device's interrupt. 112 * 113 * Returns 0 on success on < 0 on error. 114 */ 115 int phy_clear_interrupt(struct phy_device *phydev) 116 { 117 int err = 0; 118 119 if (phydev->drv->ack_interrupt) 120 err = phydev->drv->ack_interrupt(phydev); 121 122 return err; 123 } 124 125 /** 126 * phy_config_interrupt - configure the PHY device for the requested interrupts 127 * @phydev: the phy_device struct 128 * @interrupts: interrupt flags to configure for this @phydev 129 * 130 * Returns 0 on success on < 0 on error. 131 */ 132 int phy_config_interrupt(struct phy_device *phydev, u32 interrupts) 133 { 134 int err = 0; 135 136 phydev->interrupts = interrupts; 137 if (phydev->drv->config_intr) 138 err = phydev->drv->config_intr(phydev); 139 140 return err; 141 } 142 143 144 /** 145 * phy_aneg_done - return auto-negotiation status 146 * @phydev: target phy_device struct 147 * 148 * Description: Reads the status register and returns 0 either if 149 * auto-negotiation is incomplete, or if there was an error. 150 * Returns BMSR_ANEGCOMPLETE if auto-negotiation is done. 151 */ 152 static inline int phy_aneg_done(struct phy_device *phydev) 153 { 154 int retval; 155 156 retval = phy_read(phydev, MII_BMSR); 157 158 return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE); 159 } 160 161 /* A structure for mapping a particular speed and duplex 162 * combination to a particular SUPPORTED and ADVERTISED value */ 163 struct phy_setting { 164 int speed; 165 int duplex; 166 u32 setting; 167 }; 168 169 /* A mapping of all SUPPORTED settings to speed/duplex */ 170 static const struct phy_setting settings[] = { 171 { 172 .speed = 10000, 173 .duplex = DUPLEX_FULL, 174 .setting = SUPPORTED_10000baseT_Full, 175 }, 176 { 177 .speed = SPEED_1000, 178 .duplex = DUPLEX_FULL, 179 .setting = SUPPORTED_1000baseT_Full, 180 }, 181 { 182 .speed = SPEED_1000, 183 .duplex = DUPLEX_HALF, 184 .setting = SUPPORTED_1000baseT_Half, 185 }, 186 { 187 .speed = SPEED_100, 188 .duplex = DUPLEX_FULL, 189 .setting = SUPPORTED_100baseT_Full, 190 }, 191 { 192 .speed = SPEED_100, 193 .duplex = DUPLEX_HALF, 194 .setting = SUPPORTED_100baseT_Half, 195 }, 196 { 197 .speed = SPEED_10, 198 .duplex = DUPLEX_FULL, 199 .setting = SUPPORTED_10baseT_Full, 200 }, 201 { 202 .speed = SPEED_10, 203 .duplex = DUPLEX_HALF, 204 .setting = SUPPORTED_10baseT_Half, 205 }, 206 }; 207 208 #define MAX_NUM_SETTINGS ARRAY_SIZE(settings) 209 210 /** 211 * phy_find_setting - find a PHY settings array entry that matches speed & duplex 212 * @speed: speed to match 213 * @duplex: duplex to match 214 * 215 * Description: Searches the settings array for the setting which 216 * matches the desired speed and duplex, and returns the index 217 * of that setting. Returns the index of the last setting if 218 * none of the others match. 219 */ 220 static inline int phy_find_setting(int speed, int duplex) 221 { 222 int idx = 0; 223 224 while (idx < ARRAY_SIZE(settings) && 225 (settings[idx].speed != speed || 226 settings[idx].duplex != duplex)) 227 idx++; 228 229 return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1; 230 } 231 232 /** 233 * phy_find_valid - find a PHY setting that matches the requested features mask 234 * @idx: The first index in settings[] to search 235 * @features: A mask of the valid settings 236 * 237 * Description: Returns the index of the first valid setting less 238 * than or equal to the one pointed to by idx, as determined by 239 * the mask in features. Returns the index of the last setting 240 * if nothing else matches. 241 */ 242 static inline int phy_find_valid(int idx, u32 features) 243 { 244 while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features)) 245 idx++; 246 247 return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1; 248 } 249 250 /** 251 * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex 252 * @phydev: the target phy_device struct 253 * 254 * Description: Make sure the PHY is set to supported speeds and 255 * duplexes. Drop down by one in this order: 1000/FULL, 256 * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF. 257 */ 258 void phy_sanitize_settings(struct phy_device *phydev) 259 { 260 u32 features = phydev->supported; 261 int idx; 262 263 /* Sanitize settings based on PHY capabilities */ 264 if ((features & SUPPORTED_Autoneg) == 0) 265 phydev->autoneg = AUTONEG_DISABLE; 266 267 idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex), 268 features); 269 270 phydev->speed = settings[idx].speed; 271 phydev->duplex = settings[idx].duplex; 272 } 273 EXPORT_SYMBOL(phy_sanitize_settings); 274 275 /** 276 * phy_ethtool_sset - generic ethtool sset function, handles all the details 277 * @phydev: target phy_device struct 278 * @cmd: ethtool_cmd 279 * 280 * A few notes about parameter checking: 281 * - We don't set port or transceiver, so we don't care what they 282 * were set to. 283 * - phy_start_aneg() will make sure forced settings are sane, and 284 * choose the next best ones from the ones selected, so we don't 285 * care if ethtool tries to give us bad values. 286 */ 287 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd) 288 { 289 if (cmd->phy_address != phydev->addr) 290 return -EINVAL; 291 292 /* We make sure that we don't pass unsupported 293 * values in to the PHY */ 294 cmd->advertising &= phydev->supported; 295 296 /* Verify the settings we care about. */ 297 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE) 298 return -EINVAL; 299 300 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0) 301 return -EINVAL; 302 303 if (cmd->autoneg == AUTONEG_DISABLE 304 && ((cmd->speed != SPEED_1000 305 && cmd->speed != SPEED_100 306 && cmd->speed != SPEED_10) 307 || (cmd->duplex != DUPLEX_HALF 308 && cmd->duplex != DUPLEX_FULL))) 309 return -EINVAL; 310 311 phydev->autoneg = cmd->autoneg; 312 313 phydev->speed = cmd->speed; 314 315 phydev->advertising = cmd->advertising; 316 317 if (AUTONEG_ENABLE == cmd->autoneg) 318 phydev->advertising |= ADVERTISED_Autoneg; 319 else 320 phydev->advertising &= ~ADVERTISED_Autoneg; 321 322 phydev->duplex = cmd->duplex; 323 324 /* Restart the PHY */ 325 phy_start_aneg(phydev); 326 327 return 0; 328 } 329 EXPORT_SYMBOL(phy_ethtool_sset); 330 331 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd) 332 { 333 cmd->supported = phydev->supported; 334 335 cmd->advertising = phydev->advertising; 336 337 cmd->speed = phydev->speed; 338 cmd->duplex = phydev->duplex; 339 cmd->port = PORT_MII; 340 cmd->phy_address = phydev->addr; 341 cmd->transceiver = XCVR_EXTERNAL; 342 cmd->autoneg = phydev->autoneg; 343 344 return 0; 345 } 346 EXPORT_SYMBOL(phy_ethtool_gset); 347 348 /** 349 * phy_mii_ioctl - generic PHY MII ioctl interface 350 * @phydev: the phy_device struct 351 * @mii_data: MII ioctl data 352 * @cmd: ioctl cmd to execute 353 * 354 * Note that this function is currently incompatible with the 355 * PHYCONTROL layer. It changes registers without regard to 356 * current state. Use at own risk. 357 */ 358 int phy_mii_ioctl(struct phy_device *phydev, 359 struct mii_ioctl_data *mii_data, int cmd) 360 { 361 u16 val = mii_data->val_in; 362 363 switch (cmd) { 364 case SIOCGMIIPHY: 365 mii_data->phy_id = phydev->addr; 366 break; 367 case SIOCGMIIREG: 368 mii_data->val_out = phy_read(phydev, mii_data->reg_num); 369 break; 370 371 case SIOCSMIIREG: 372 if (!capable(CAP_NET_ADMIN)) 373 return -EPERM; 374 375 if (mii_data->phy_id == phydev->addr) { 376 switch(mii_data->reg_num) { 377 case MII_BMCR: 378 if ((val & (BMCR_RESET|BMCR_ANENABLE)) == 0) 379 phydev->autoneg = AUTONEG_DISABLE; 380 else 381 phydev->autoneg = AUTONEG_ENABLE; 382 if ((!phydev->autoneg) && (val & BMCR_FULLDPLX)) 383 phydev->duplex = DUPLEX_FULL; 384 else 385 phydev->duplex = DUPLEX_HALF; 386 if ((!phydev->autoneg) && 387 (val & BMCR_SPEED1000)) 388 phydev->speed = SPEED_1000; 389 else if ((!phydev->autoneg) && 390 (val & BMCR_SPEED100)) 391 phydev->speed = SPEED_100; 392 break; 393 case MII_ADVERTISE: 394 phydev->advertising = val; 395 break; 396 default: 397 /* do nothing */ 398 break; 399 } 400 } 401 402 phy_write(phydev, mii_data->reg_num, val); 403 404 if (mii_data->reg_num == MII_BMCR 405 && val & BMCR_RESET 406 && phydev->drv->config_init) 407 phydev->drv->config_init(phydev); 408 break; 409 } 410 411 return 0; 412 } 413 EXPORT_SYMBOL(phy_mii_ioctl); 414 415 /** 416 * phy_start_aneg - start auto-negotiation for this PHY device 417 * @phydev: the phy_device struct 418 * 419 * Description: Sanitizes the settings (if we're not autonegotiating 420 * them), and then calls the driver's config_aneg function. 421 * If the PHYCONTROL Layer is operating, we change the state to 422 * reflect the beginning of Auto-negotiation or forcing. 423 */ 424 int phy_start_aneg(struct phy_device *phydev) 425 { 426 int err; 427 428 spin_lock_bh(&phydev->lock); 429 430 if (AUTONEG_DISABLE == phydev->autoneg) 431 phy_sanitize_settings(phydev); 432 433 err = phydev->drv->config_aneg(phydev); 434 435 if (err < 0) 436 goto out_unlock; 437 438 if (phydev->state != PHY_HALTED) { 439 if (AUTONEG_ENABLE == phydev->autoneg) { 440 phydev->state = PHY_AN; 441 phydev->link_timeout = PHY_AN_TIMEOUT; 442 } else { 443 phydev->state = PHY_FORCING; 444 phydev->link_timeout = PHY_FORCE_TIMEOUT; 445 } 446 } 447 448 out_unlock: 449 spin_unlock_bh(&phydev->lock); 450 return err; 451 } 452 EXPORT_SYMBOL(phy_start_aneg); 453 454 455 static void phy_change(struct work_struct *work); 456 static void phy_timer(unsigned long data); 457 458 /** 459 * phy_start_machine - start PHY state machine tracking 460 * @phydev: the phy_device struct 461 * @handler: callback function for state change notifications 462 * 463 * Description: The PHY infrastructure can run a state machine 464 * which tracks whether the PHY is starting up, negotiating, 465 * etc. This function starts the timer which tracks the state 466 * of the PHY. If you want to be notified when the state changes, 467 * pass in the callback @handler, otherwise, pass NULL. If you 468 * want to maintain your own state machine, do not call this 469 * function. 470 */ 471 void phy_start_machine(struct phy_device *phydev, 472 void (*handler)(struct net_device *)) 473 { 474 phydev->adjust_state = handler; 475 476 init_timer(&phydev->phy_timer); 477 phydev->phy_timer.function = &phy_timer; 478 phydev->phy_timer.data = (unsigned long) phydev; 479 mod_timer(&phydev->phy_timer, jiffies + HZ); 480 } 481 482 /** 483 * phy_stop_machine - stop the PHY state machine tracking 484 * @phydev: target phy_device struct 485 * 486 * Description: Stops the state machine timer, sets the state to UP 487 * (unless it wasn't up yet). This function must be called BEFORE 488 * phy_detach. 489 */ 490 void phy_stop_machine(struct phy_device *phydev) 491 { 492 del_timer_sync(&phydev->phy_timer); 493 494 spin_lock_bh(&phydev->lock); 495 if (phydev->state > PHY_UP) 496 phydev->state = PHY_UP; 497 spin_unlock_bh(&phydev->lock); 498 499 phydev->adjust_state = NULL; 500 } 501 502 /** 503 * phy_force_reduction - reduce PHY speed/duplex settings by one step 504 * @phydev: target phy_device struct 505 * 506 * Description: Reduces the speed/duplex settings by one notch, 507 * in this order-- 508 * 1000/FULL, 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF. 509 * The function bottoms out at 10/HALF. 510 */ 511 static void phy_force_reduction(struct phy_device *phydev) 512 { 513 int idx; 514 515 idx = phy_find_setting(phydev->speed, phydev->duplex); 516 517 idx++; 518 519 idx = phy_find_valid(idx, phydev->supported); 520 521 phydev->speed = settings[idx].speed; 522 phydev->duplex = settings[idx].duplex; 523 524 pr_info("Trying %d/%s\n", phydev->speed, 525 DUPLEX_FULL == phydev->duplex ? 526 "FULL" : "HALF"); 527 } 528 529 530 /** 531 * phy_error - enter HALTED state for this PHY device 532 * @phydev: target phy_device struct 533 * 534 * Moves the PHY to the HALTED state in response to a read 535 * or write error, and tells the controller the link is down. 536 * Must not be called from interrupt context, or while the 537 * phydev->lock is held. 538 */ 539 void phy_error(struct phy_device *phydev) 540 { 541 spin_lock_bh(&phydev->lock); 542 phydev->state = PHY_HALTED; 543 spin_unlock_bh(&phydev->lock); 544 } 545 546 /** 547 * phy_interrupt - PHY interrupt handler 548 * @irq: interrupt line 549 * @phy_dat: phy_device pointer 550 * 551 * Description: When a PHY interrupt occurs, the handler disables 552 * interrupts, and schedules a work task to clear the interrupt. 553 */ 554 static irqreturn_t phy_interrupt(int irq, void *phy_dat) 555 { 556 struct phy_device *phydev = phy_dat; 557 558 if (PHY_HALTED == phydev->state) 559 return IRQ_NONE; /* It can't be ours. */ 560 561 /* The MDIO bus is not allowed to be written in interrupt 562 * context, so we need to disable the irq here. A work 563 * queue will write the PHY to disable and clear the 564 * interrupt, and then reenable the irq line. */ 565 disable_irq_nosync(irq); 566 atomic_inc(&phydev->irq_disable); 567 568 schedule_work(&phydev->phy_queue); 569 570 return IRQ_HANDLED; 571 } 572 573 /** 574 * phy_enable_interrupts - Enable the interrupts from the PHY side 575 * @phydev: target phy_device struct 576 */ 577 int phy_enable_interrupts(struct phy_device *phydev) 578 { 579 int err; 580 581 err = phy_clear_interrupt(phydev); 582 583 if (err < 0) 584 return err; 585 586 err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); 587 588 return err; 589 } 590 EXPORT_SYMBOL(phy_enable_interrupts); 591 592 /** 593 * phy_disable_interrupts - Disable the PHY interrupts from the PHY side 594 * @phydev: target phy_device struct 595 */ 596 int phy_disable_interrupts(struct phy_device *phydev) 597 { 598 int err; 599 600 /* Disable PHY interrupts */ 601 err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); 602 603 if (err) 604 goto phy_err; 605 606 /* Clear the interrupt */ 607 err = phy_clear_interrupt(phydev); 608 609 if (err) 610 goto phy_err; 611 612 return 0; 613 614 phy_err: 615 phy_error(phydev); 616 617 return err; 618 } 619 EXPORT_SYMBOL(phy_disable_interrupts); 620 621 /** 622 * phy_start_interrupts - request and enable interrupts for a PHY device 623 * @phydev: target phy_device struct 624 * 625 * Description: Request the interrupt for the given PHY. 626 * If this fails, then we set irq to PHY_POLL. 627 * Otherwise, we enable the interrupts in the PHY. 628 * This should only be called with a valid IRQ number. 629 * Returns 0 on success or < 0 on error. 630 */ 631 int phy_start_interrupts(struct phy_device *phydev) 632 { 633 int err = 0; 634 635 INIT_WORK(&phydev->phy_queue, phy_change); 636 637 atomic_set(&phydev->irq_disable, 0); 638 if (request_irq(phydev->irq, phy_interrupt, 639 IRQF_SHARED, 640 "phy_interrupt", 641 phydev) < 0) { 642 printk(KERN_WARNING "%s: Can't get IRQ %d (PHY)\n", 643 phydev->bus->name, 644 phydev->irq); 645 phydev->irq = PHY_POLL; 646 return 0; 647 } 648 649 err = phy_enable_interrupts(phydev); 650 651 return err; 652 } 653 EXPORT_SYMBOL(phy_start_interrupts); 654 655 /** 656 * phy_stop_interrupts - disable interrupts from a PHY device 657 * @phydev: target phy_device struct 658 */ 659 int phy_stop_interrupts(struct phy_device *phydev) 660 { 661 int err; 662 663 err = phy_disable_interrupts(phydev); 664 665 if (err) 666 phy_error(phydev); 667 668 free_irq(phydev->irq, phydev); 669 670 /* 671 * Cannot call flush_scheduled_work() here as desired because 672 * of rtnl_lock(), but we do not really care about what would 673 * be done, except from enable_irq(), so cancel any work 674 * possibly pending and take care of the matter below. 675 */ 676 cancel_work_sync(&phydev->phy_queue); 677 /* 678 * If work indeed has been cancelled, disable_irq() will have 679 * been left unbalanced from phy_interrupt() and enable_irq() 680 * has to be called so that other devices on the line work. 681 */ 682 while (atomic_dec_return(&phydev->irq_disable) >= 0) 683 enable_irq(phydev->irq); 684 685 return err; 686 } 687 EXPORT_SYMBOL(phy_stop_interrupts); 688 689 690 /** 691 * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes 692 * @work: work_struct that describes the work to be done 693 */ 694 static void phy_change(struct work_struct *work) 695 { 696 int err; 697 struct phy_device *phydev = 698 container_of(work, struct phy_device, phy_queue); 699 700 err = phy_disable_interrupts(phydev); 701 702 if (err) 703 goto phy_err; 704 705 spin_lock_bh(&phydev->lock); 706 if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) 707 phydev->state = PHY_CHANGELINK; 708 spin_unlock_bh(&phydev->lock); 709 710 atomic_dec(&phydev->irq_disable); 711 enable_irq(phydev->irq); 712 713 /* Reenable interrupts */ 714 if (PHY_HALTED != phydev->state) 715 err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); 716 717 if (err) 718 goto irq_enable_err; 719 720 return; 721 722 irq_enable_err: 723 disable_irq(phydev->irq); 724 atomic_inc(&phydev->irq_disable); 725 phy_err: 726 phy_error(phydev); 727 } 728 729 /** 730 * phy_stop - Bring down the PHY link, and stop checking the status 731 * @phydev: target phy_device struct 732 */ 733 void phy_stop(struct phy_device *phydev) 734 { 735 spin_lock_bh(&phydev->lock); 736 737 if (PHY_HALTED == phydev->state) 738 goto out_unlock; 739 740 if (phydev->irq != PHY_POLL) { 741 /* Disable PHY Interrupts */ 742 phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); 743 744 /* Clear any pending interrupts */ 745 phy_clear_interrupt(phydev); 746 } 747 748 phydev->state = PHY_HALTED; 749 750 out_unlock: 751 spin_unlock_bh(&phydev->lock); 752 753 /* 754 * Cannot call flush_scheduled_work() here as desired because 755 * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change() 756 * will not reenable interrupts. 757 */ 758 } 759 760 761 /** 762 * phy_start - start or restart a PHY device 763 * @phydev: target phy_device struct 764 * 765 * Description: Indicates the attached device's readiness to 766 * handle PHY-related work. Used during startup to start the 767 * PHY, and after a call to phy_stop() to resume operation. 768 * Also used to indicate the MDIO bus has cleared an error 769 * condition. 770 */ 771 void phy_start(struct phy_device *phydev) 772 { 773 spin_lock_bh(&phydev->lock); 774 775 switch (phydev->state) { 776 case PHY_STARTING: 777 phydev->state = PHY_PENDING; 778 break; 779 case PHY_READY: 780 phydev->state = PHY_UP; 781 break; 782 case PHY_HALTED: 783 phydev->state = PHY_RESUMING; 784 default: 785 break; 786 } 787 spin_unlock_bh(&phydev->lock); 788 } 789 EXPORT_SYMBOL(phy_stop); 790 EXPORT_SYMBOL(phy_start); 791 792 /* PHY timer which handles the state machine */ 793 static void phy_timer(unsigned long data) 794 { 795 struct phy_device *phydev = (struct phy_device *)data; 796 int needs_aneg = 0; 797 int err = 0; 798 799 spin_lock_bh(&phydev->lock); 800 801 if (phydev->adjust_state) 802 phydev->adjust_state(phydev->attached_dev); 803 804 switch(phydev->state) { 805 case PHY_DOWN: 806 case PHY_STARTING: 807 case PHY_READY: 808 case PHY_PENDING: 809 break; 810 case PHY_UP: 811 needs_aneg = 1; 812 813 phydev->link_timeout = PHY_AN_TIMEOUT; 814 815 break; 816 case PHY_AN: 817 err = phy_read_status(phydev); 818 819 if (err < 0) 820 break; 821 822 /* If the link is down, give up on 823 * negotiation for now */ 824 if (!phydev->link) { 825 phydev->state = PHY_NOLINK; 826 netif_carrier_off(phydev->attached_dev); 827 phydev->adjust_link(phydev->attached_dev); 828 break; 829 } 830 831 /* Check if negotiation is done. Break 832 * if there's an error */ 833 err = phy_aneg_done(phydev); 834 if (err < 0) 835 break; 836 837 /* If AN is done, we're running */ 838 if (err > 0) { 839 phydev->state = PHY_RUNNING; 840 netif_carrier_on(phydev->attached_dev); 841 phydev->adjust_link(phydev->attached_dev); 842 843 } else if (0 == phydev->link_timeout--) { 844 int idx; 845 846 needs_aneg = 1; 847 /* If we have the magic_aneg bit, 848 * we try again */ 849 if (phydev->drv->flags & PHY_HAS_MAGICANEG) 850 break; 851 852 /* The timer expired, and we still 853 * don't have a setting, so we try 854 * forcing it until we find one that 855 * works, starting from the fastest speed, 856 * and working our way down */ 857 idx = phy_find_valid(0, phydev->supported); 858 859 phydev->speed = settings[idx].speed; 860 phydev->duplex = settings[idx].duplex; 861 862 phydev->autoneg = AUTONEG_DISABLE; 863 864 pr_info("Trying %d/%s\n", phydev->speed, 865 DUPLEX_FULL == 866 phydev->duplex ? 867 "FULL" : "HALF"); 868 } 869 break; 870 case PHY_NOLINK: 871 err = phy_read_status(phydev); 872 873 if (err) 874 break; 875 876 if (phydev->link) { 877 phydev->state = PHY_RUNNING; 878 netif_carrier_on(phydev->attached_dev); 879 phydev->adjust_link(phydev->attached_dev); 880 } 881 break; 882 case PHY_FORCING: 883 err = genphy_update_link(phydev); 884 885 if (err) 886 break; 887 888 if (phydev->link) { 889 phydev->state = PHY_RUNNING; 890 netif_carrier_on(phydev->attached_dev); 891 } else { 892 if (0 == phydev->link_timeout--) { 893 phy_force_reduction(phydev); 894 needs_aneg = 1; 895 } 896 } 897 898 phydev->adjust_link(phydev->attached_dev); 899 break; 900 case PHY_RUNNING: 901 /* Only register a CHANGE if we are 902 * polling */ 903 if (PHY_POLL == phydev->irq) 904 phydev->state = PHY_CHANGELINK; 905 break; 906 case PHY_CHANGELINK: 907 err = phy_read_status(phydev); 908 909 if (err) 910 break; 911 912 if (phydev->link) { 913 phydev->state = PHY_RUNNING; 914 netif_carrier_on(phydev->attached_dev); 915 } else { 916 phydev->state = PHY_NOLINK; 917 netif_carrier_off(phydev->attached_dev); 918 } 919 920 phydev->adjust_link(phydev->attached_dev); 921 922 if (PHY_POLL != phydev->irq) 923 err = phy_config_interrupt(phydev, 924 PHY_INTERRUPT_ENABLED); 925 break; 926 case PHY_HALTED: 927 if (phydev->link) { 928 phydev->link = 0; 929 netif_carrier_off(phydev->attached_dev); 930 phydev->adjust_link(phydev->attached_dev); 931 } 932 break; 933 case PHY_RESUMING: 934 935 err = phy_clear_interrupt(phydev); 936 937 if (err) 938 break; 939 940 err = phy_config_interrupt(phydev, 941 PHY_INTERRUPT_ENABLED); 942 943 if (err) 944 break; 945 946 if (AUTONEG_ENABLE == phydev->autoneg) { 947 err = phy_aneg_done(phydev); 948 if (err < 0) 949 break; 950 951 /* err > 0 if AN is done. 952 * Otherwise, it's 0, and we're 953 * still waiting for AN */ 954 if (err > 0) { 955 phydev->state = PHY_RUNNING; 956 } else { 957 phydev->state = PHY_AN; 958 phydev->link_timeout = PHY_AN_TIMEOUT; 959 } 960 } else 961 phydev->state = PHY_RUNNING; 962 break; 963 } 964 965 spin_unlock_bh(&phydev->lock); 966 967 if (needs_aneg) 968 err = phy_start_aneg(phydev); 969 970 if (err < 0) 971 phy_error(phydev); 972 973 mod_timer(&phydev->phy_timer, jiffies + PHY_STATE_TIME * HZ); 974 } 975 976