xref: /linux/drivers/net/phy/phy.c (revision 4f139972b489f8bc2c821aa25ac65018d92af3f7)
1 /* Framework for configuring and reading PHY devices
2  * Based on code in sungem_phy.c and gianfar_phy.c
3  *
4  * Author: Andy Fleming
5  *
6  * Copyright (c) 2004 Freescale Semiconductor, Inc.
7  * Copyright (c) 2006, 2007  Maciej W. Rozycki
8  *
9  * This program is free software; you can redistribute  it and/or modify it
10  * under  the terms of  the GNU General  Public License as published by the
11  * Free Software Foundation;  either version 2 of the  License, or (at your
12  * option) any later version.
13  *
14  */
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/errno.h>
21 #include <linux/unistd.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/mm.h>
28 #include <linux/module.h>
29 #include <linux/mii.h>
30 #include <linux/ethtool.h>
31 #include <linux/phy.h>
32 #include <linux/phy_led_triggers.h>
33 #include <linux/timer.h>
34 #include <linux/workqueue.h>
35 #include <linux/mdio.h>
36 #include <linux/io.h>
37 #include <linux/uaccess.h>
38 #include <linux/atomic.h>
39 
40 #include <asm/irq.h>
41 
42 static const char *phy_speed_to_str(int speed)
43 {
44 	switch (speed) {
45 	case SPEED_10:
46 		return "10Mbps";
47 	case SPEED_100:
48 		return "100Mbps";
49 	case SPEED_1000:
50 		return "1Gbps";
51 	case SPEED_2500:
52 		return "2.5Gbps";
53 	case SPEED_5000:
54 		return "5Gbps";
55 	case SPEED_10000:
56 		return "10Gbps";
57 	case SPEED_20000:
58 		return "20Gbps";
59 	case SPEED_25000:
60 		return "25Gbps";
61 	case SPEED_40000:
62 		return "40Gbps";
63 	case SPEED_50000:
64 		return "50Gbps";
65 	case SPEED_56000:
66 		return "56Gbps";
67 	case SPEED_100000:
68 		return "100Gbps";
69 	case SPEED_UNKNOWN:
70 		return "Unknown";
71 	default:
72 		return "Unsupported (update phy.c)";
73 	}
74 }
75 
76 #define PHY_STATE_STR(_state)			\
77 	case PHY_##_state:			\
78 		return __stringify(_state);	\
79 
80 static const char *phy_state_to_str(enum phy_state st)
81 {
82 	switch (st) {
83 	PHY_STATE_STR(DOWN)
84 	PHY_STATE_STR(STARTING)
85 	PHY_STATE_STR(READY)
86 	PHY_STATE_STR(PENDING)
87 	PHY_STATE_STR(UP)
88 	PHY_STATE_STR(AN)
89 	PHY_STATE_STR(RUNNING)
90 	PHY_STATE_STR(NOLINK)
91 	PHY_STATE_STR(FORCING)
92 	PHY_STATE_STR(CHANGELINK)
93 	PHY_STATE_STR(HALTED)
94 	PHY_STATE_STR(RESUMING)
95 	}
96 
97 	return NULL;
98 }
99 
100 
101 /**
102  * phy_print_status - Convenience function to print out the current phy status
103  * @phydev: the phy_device struct
104  */
105 void phy_print_status(struct phy_device *phydev)
106 {
107 	if (phydev->link) {
108 		netdev_info(phydev->attached_dev,
109 			"Link is Up - %s/%s - flow control %s\n",
110 			phy_speed_to_str(phydev->speed),
111 			DUPLEX_FULL == phydev->duplex ? "Full" : "Half",
112 			phydev->pause ? "rx/tx" : "off");
113 	} else	{
114 		netdev_info(phydev->attached_dev, "Link is Down\n");
115 	}
116 }
117 EXPORT_SYMBOL(phy_print_status);
118 
119 /**
120  * phy_clear_interrupt - Ack the phy device's interrupt
121  * @phydev: the phy_device struct
122  *
123  * If the @phydev driver has an ack_interrupt function, call it to
124  * ack and clear the phy device's interrupt.
125  *
126  * Returns 0 on success or < 0 on error.
127  */
128 static int phy_clear_interrupt(struct phy_device *phydev)
129 {
130 	if (phydev->drv->ack_interrupt)
131 		return phydev->drv->ack_interrupt(phydev);
132 
133 	return 0;
134 }
135 
136 /**
137  * phy_config_interrupt - configure the PHY device for the requested interrupts
138  * @phydev: the phy_device struct
139  * @interrupts: interrupt flags to configure for this @phydev
140  *
141  * Returns 0 on success or < 0 on error.
142  */
143 static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
144 {
145 	phydev->interrupts = interrupts;
146 	if (phydev->drv->config_intr)
147 		return phydev->drv->config_intr(phydev);
148 
149 	return 0;
150 }
151 
152 
153 /**
154  * phy_aneg_done - return auto-negotiation status
155  * @phydev: target phy_device struct
156  *
157  * Description: Return the auto-negotiation status from this @phydev
158  * Returns > 0 on success or < 0 on error. 0 means that auto-negotiation
159  * is still pending.
160  */
161 int phy_aneg_done(struct phy_device *phydev)
162 {
163 	if (phydev->drv && phydev->drv->aneg_done)
164 		return phydev->drv->aneg_done(phydev);
165 
166 	return genphy_aneg_done(phydev);
167 }
168 EXPORT_SYMBOL(phy_aneg_done);
169 
170 /* A structure for mapping a particular speed and duplex
171  * combination to a particular SUPPORTED and ADVERTISED value
172  */
173 struct phy_setting {
174 	int speed;
175 	int duplex;
176 	u32 setting;
177 };
178 
179 /* A mapping of all SUPPORTED settings to speed/duplex */
180 static const struct phy_setting settings[] = {
181 	{
182 		.speed = SPEED_10000,
183 		.duplex = DUPLEX_FULL,
184 		.setting = SUPPORTED_10000baseKR_Full,
185 	},
186 	{
187 		.speed = SPEED_10000,
188 		.duplex = DUPLEX_FULL,
189 		.setting = SUPPORTED_10000baseKX4_Full,
190 	},
191 	{
192 		.speed = SPEED_10000,
193 		.duplex = DUPLEX_FULL,
194 		.setting = SUPPORTED_10000baseT_Full,
195 	},
196 	{
197 		.speed = SPEED_2500,
198 		.duplex = DUPLEX_FULL,
199 		.setting = SUPPORTED_2500baseX_Full,
200 	},
201 	{
202 		.speed = SPEED_1000,
203 		.duplex = DUPLEX_FULL,
204 		.setting = SUPPORTED_1000baseKX_Full,
205 	},
206 	{
207 		.speed = SPEED_1000,
208 		.duplex = DUPLEX_FULL,
209 		.setting = SUPPORTED_1000baseT_Full,
210 	},
211 	{
212 		.speed = SPEED_1000,
213 		.duplex = DUPLEX_HALF,
214 		.setting = SUPPORTED_1000baseT_Half,
215 	},
216 	{
217 		.speed = SPEED_100,
218 		.duplex = DUPLEX_FULL,
219 		.setting = SUPPORTED_100baseT_Full,
220 	},
221 	{
222 		.speed = SPEED_100,
223 		.duplex = DUPLEX_HALF,
224 		.setting = SUPPORTED_100baseT_Half,
225 	},
226 	{
227 		.speed = SPEED_10,
228 		.duplex = DUPLEX_FULL,
229 		.setting = SUPPORTED_10baseT_Full,
230 	},
231 	{
232 		.speed = SPEED_10,
233 		.duplex = DUPLEX_HALF,
234 		.setting = SUPPORTED_10baseT_Half,
235 	},
236 };
237 
238 #define MAX_NUM_SETTINGS ARRAY_SIZE(settings)
239 
240 /**
241  * phy_find_setting - find a PHY settings array entry that matches speed & duplex
242  * @speed: speed to match
243  * @duplex: duplex to match
244  *
245  * Description: Searches the settings array for the setting which
246  *   matches the desired speed and duplex, and returns the index
247  *   of that setting.  Returns the index of the last setting if
248  *   none of the others match.
249  */
250 static inline unsigned int phy_find_setting(int speed, int duplex)
251 {
252 	unsigned int idx = 0;
253 
254 	while (idx < ARRAY_SIZE(settings) &&
255 	       (settings[idx].speed != speed || settings[idx].duplex != duplex))
256 		idx++;
257 
258 	return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
259 }
260 
261 /**
262  * phy_find_valid - find a PHY setting that matches the requested features mask
263  * @idx: The first index in settings[] to search
264  * @features: A mask of the valid settings
265  *
266  * Description: Returns the index of the first valid setting less
267  *   than or equal to the one pointed to by idx, as determined by
268  *   the mask in features.  Returns the index of the last setting
269  *   if nothing else matches.
270  */
271 static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
272 {
273 	while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
274 		idx++;
275 
276 	return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
277 }
278 
279 /**
280  * phy_supported_speeds - return all speeds currently supported by a phy device
281  * @phy: The phy device to return supported speeds of.
282  * @speeds: buffer to store supported speeds in.
283  * @size:   size of speeds buffer.
284  *
285  * Description: Returns the number of supported speeds, and fills the speeds
286  * buffer with the supported speeds. If speeds buffer is too small to contain
287  * all currently supported speeds, will return as many speeds as can fit.
288  */
289 unsigned int phy_supported_speeds(struct phy_device *phy,
290 				  unsigned int *speeds,
291 				  unsigned int size)
292 {
293 	unsigned int count = 0;
294 	unsigned int idx = 0;
295 
296 	while (idx < MAX_NUM_SETTINGS && count < size) {
297 		idx = phy_find_valid(idx, phy->supported);
298 
299 		if (!(settings[idx].setting & phy->supported))
300 			break;
301 
302 		/* Assumes settings are grouped by speed */
303 		if ((count == 0) ||
304 		    (speeds[count - 1] != settings[idx].speed)) {
305 			speeds[count] = settings[idx].speed;
306 			count++;
307 		}
308 		idx++;
309 	}
310 
311 	return count;
312 }
313 
314 /**
315  * phy_check_valid - check if there is a valid PHY setting which matches
316  *		     speed, duplex, and feature mask
317  * @speed: speed to match
318  * @duplex: duplex to match
319  * @features: A mask of the valid settings
320  *
321  * Description: Returns true if there is a valid setting, false otherwise.
322  */
323 static inline bool phy_check_valid(int speed, int duplex, u32 features)
324 {
325 	unsigned int idx;
326 
327 	idx = phy_find_valid(phy_find_setting(speed, duplex), features);
328 
329 	return settings[idx].speed == speed && settings[idx].duplex == duplex &&
330 		(settings[idx].setting & features);
331 }
332 
333 /**
334  * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
335  * @phydev: the target phy_device struct
336  *
337  * Description: Make sure the PHY is set to supported speeds and
338  *   duplexes.  Drop down by one in this order:  1000/FULL,
339  *   1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
340  */
341 static void phy_sanitize_settings(struct phy_device *phydev)
342 {
343 	u32 features = phydev->supported;
344 	unsigned int idx;
345 
346 	/* Sanitize settings based on PHY capabilities */
347 	if ((features & SUPPORTED_Autoneg) == 0)
348 		phydev->autoneg = AUTONEG_DISABLE;
349 
350 	idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex),
351 			features);
352 
353 	phydev->speed = settings[idx].speed;
354 	phydev->duplex = settings[idx].duplex;
355 }
356 
357 /**
358  * phy_ethtool_sset - generic ethtool sset function, handles all the details
359  * @phydev: target phy_device struct
360  * @cmd: ethtool_cmd
361  *
362  * A few notes about parameter checking:
363  * - We don't set port or transceiver, so we don't care what they
364  *   were set to.
365  * - phy_start_aneg() will make sure forced settings are sane, and
366  *   choose the next best ones from the ones selected, so we don't
367  *   care if ethtool tries to give us bad values.
368  */
369 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
370 {
371 	u32 speed = ethtool_cmd_speed(cmd);
372 
373 	if (cmd->phy_address != phydev->mdio.addr)
374 		return -EINVAL;
375 
376 	/* We make sure that we don't pass unsupported values in to the PHY */
377 	cmd->advertising &= phydev->supported;
378 
379 	/* Verify the settings we care about. */
380 	if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
381 		return -EINVAL;
382 
383 	if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
384 		return -EINVAL;
385 
386 	if (cmd->autoneg == AUTONEG_DISABLE &&
387 	    ((speed != SPEED_1000 &&
388 	      speed != SPEED_100 &&
389 	      speed != SPEED_10) ||
390 	     (cmd->duplex != DUPLEX_HALF &&
391 	      cmd->duplex != DUPLEX_FULL)))
392 		return -EINVAL;
393 
394 	phydev->autoneg = cmd->autoneg;
395 
396 	phydev->speed = speed;
397 
398 	phydev->advertising = cmd->advertising;
399 
400 	if (AUTONEG_ENABLE == cmd->autoneg)
401 		phydev->advertising |= ADVERTISED_Autoneg;
402 	else
403 		phydev->advertising &= ~ADVERTISED_Autoneg;
404 
405 	phydev->duplex = cmd->duplex;
406 
407 	phydev->mdix_ctrl = cmd->eth_tp_mdix_ctrl;
408 
409 	/* Restart the PHY */
410 	phy_start_aneg(phydev);
411 
412 	return 0;
413 }
414 EXPORT_SYMBOL(phy_ethtool_sset);
415 
416 int phy_ethtool_ksettings_set(struct phy_device *phydev,
417 			      const struct ethtool_link_ksettings *cmd)
418 {
419 	u8 autoneg = cmd->base.autoneg;
420 	u8 duplex = cmd->base.duplex;
421 	u32 speed = cmd->base.speed;
422 	u32 advertising;
423 
424 	if (cmd->base.phy_address != phydev->mdio.addr)
425 		return -EINVAL;
426 
427 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
428 						cmd->link_modes.advertising);
429 
430 	/* We make sure that we don't pass unsupported values in to the PHY */
431 	advertising &= phydev->supported;
432 
433 	/* Verify the settings we care about. */
434 	if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
435 		return -EINVAL;
436 
437 	if (autoneg == AUTONEG_ENABLE && advertising == 0)
438 		return -EINVAL;
439 
440 	if (autoneg == AUTONEG_DISABLE &&
441 	    ((speed != SPEED_1000 &&
442 	      speed != SPEED_100 &&
443 	      speed != SPEED_10) ||
444 	     (duplex != DUPLEX_HALF &&
445 	      duplex != DUPLEX_FULL)))
446 		return -EINVAL;
447 
448 	phydev->autoneg = autoneg;
449 
450 	phydev->speed = speed;
451 
452 	phydev->advertising = advertising;
453 
454 	if (autoneg == AUTONEG_ENABLE)
455 		phydev->advertising |= ADVERTISED_Autoneg;
456 	else
457 		phydev->advertising &= ~ADVERTISED_Autoneg;
458 
459 	phydev->duplex = duplex;
460 
461 	phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
462 
463 	/* Restart the PHY */
464 	phy_start_aneg(phydev);
465 
466 	return 0;
467 }
468 EXPORT_SYMBOL(phy_ethtool_ksettings_set);
469 
470 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
471 {
472 	cmd->supported = phydev->supported;
473 
474 	cmd->advertising = phydev->advertising;
475 	cmd->lp_advertising = phydev->lp_advertising;
476 
477 	ethtool_cmd_speed_set(cmd, phydev->speed);
478 	cmd->duplex = phydev->duplex;
479 	if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
480 		cmd->port = PORT_BNC;
481 	else
482 		cmd->port = PORT_MII;
483 	cmd->phy_address = phydev->mdio.addr;
484 	cmd->transceiver = phy_is_internal(phydev) ?
485 		XCVR_INTERNAL : XCVR_EXTERNAL;
486 	cmd->autoneg = phydev->autoneg;
487 	cmd->eth_tp_mdix_ctrl = phydev->mdix_ctrl;
488 	cmd->eth_tp_mdix = phydev->mdix;
489 
490 	return 0;
491 }
492 EXPORT_SYMBOL(phy_ethtool_gset);
493 
494 int phy_ethtool_ksettings_get(struct phy_device *phydev,
495 			      struct ethtool_link_ksettings *cmd)
496 {
497 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
498 						phydev->supported);
499 
500 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
501 						phydev->advertising);
502 
503 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
504 						phydev->lp_advertising);
505 
506 	cmd->base.speed = phydev->speed;
507 	cmd->base.duplex = phydev->duplex;
508 	if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
509 		cmd->base.port = PORT_BNC;
510 	else
511 		cmd->base.port = PORT_MII;
512 
513 	cmd->base.phy_address = phydev->mdio.addr;
514 	cmd->base.autoneg = phydev->autoneg;
515 	cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl;
516 	cmd->base.eth_tp_mdix = phydev->mdix;
517 
518 	return 0;
519 }
520 EXPORT_SYMBOL(phy_ethtool_ksettings_get);
521 
522 /**
523  * phy_mii_ioctl - generic PHY MII ioctl interface
524  * @phydev: the phy_device struct
525  * @ifr: &struct ifreq for socket ioctl's
526  * @cmd: ioctl cmd to execute
527  *
528  * Note that this function is currently incompatible with the
529  * PHYCONTROL layer.  It changes registers without regard to
530  * current state.  Use at own risk.
531  */
532 int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
533 {
534 	struct mii_ioctl_data *mii_data = if_mii(ifr);
535 	u16 val = mii_data->val_in;
536 	bool change_autoneg = false;
537 
538 	switch (cmd) {
539 	case SIOCGMIIPHY:
540 		mii_data->phy_id = phydev->mdio.addr;
541 		/* fall through */
542 
543 	case SIOCGMIIREG:
544 		mii_data->val_out = mdiobus_read(phydev->mdio.bus,
545 						 mii_data->phy_id,
546 						 mii_data->reg_num);
547 		return 0;
548 
549 	case SIOCSMIIREG:
550 		if (mii_data->phy_id == phydev->mdio.addr) {
551 			switch (mii_data->reg_num) {
552 			case MII_BMCR:
553 				if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) {
554 					if (phydev->autoneg == AUTONEG_ENABLE)
555 						change_autoneg = true;
556 					phydev->autoneg = AUTONEG_DISABLE;
557 					if (val & BMCR_FULLDPLX)
558 						phydev->duplex = DUPLEX_FULL;
559 					else
560 						phydev->duplex = DUPLEX_HALF;
561 					if (val & BMCR_SPEED1000)
562 						phydev->speed = SPEED_1000;
563 					else if (val & BMCR_SPEED100)
564 						phydev->speed = SPEED_100;
565 					else phydev->speed = SPEED_10;
566 				}
567 				else {
568 					if (phydev->autoneg == AUTONEG_DISABLE)
569 						change_autoneg = true;
570 					phydev->autoneg = AUTONEG_ENABLE;
571 				}
572 				break;
573 			case MII_ADVERTISE:
574 				phydev->advertising = mii_adv_to_ethtool_adv_t(val);
575 				change_autoneg = true;
576 				break;
577 			default:
578 				/* do nothing */
579 				break;
580 			}
581 		}
582 
583 		mdiobus_write(phydev->mdio.bus, mii_data->phy_id,
584 			      mii_data->reg_num, val);
585 
586 		if (mii_data->phy_id == phydev->mdio.addr &&
587 		    mii_data->reg_num == MII_BMCR &&
588 		    val & BMCR_RESET)
589 			return phy_init_hw(phydev);
590 
591 		if (change_autoneg)
592 			return phy_start_aneg(phydev);
593 
594 		return 0;
595 
596 	case SIOCSHWTSTAMP:
597 		if (phydev->drv && phydev->drv->hwtstamp)
598 			return phydev->drv->hwtstamp(phydev, ifr);
599 		/* fall through */
600 
601 	default:
602 		return -EOPNOTSUPP;
603 	}
604 }
605 EXPORT_SYMBOL(phy_mii_ioctl);
606 
607 /**
608  * phy_start_aneg - start auto-negotiation for this PHY device
609  * @phydev: the phy_device struct
610  *
611  * Description: Sanitizes the settings (if we're not autonegotiating
612  *   them), and then calls the driver's config_aneg function.
613  *   If the PHYCONTROL Layer is operating, we change the state to
614  *   reflect the beginning of Auto-negotiation or forcing.
615  */
616 int phy_start_aneg(struct phy_device *phydev)
617 {
618 	int err;
619 
620 	if (!phydev->drv)
621 		return -EIO;
622 
623 	mutex_lock(&phydev->lock);
624 
625 	if (AUTONEG_DISABLE == phydev->autoneg)
626 		phy_sanitize_settings(phydev);
627 
628 	/* Invalidate LP advertising flags */
629 	phydev->lp_advertising = 0;
630 
631 	err = phydev->drv->config_aneg(phydev);
632 	if (err < 0)
633 		goto out_unlock;
634 
635 	if (phydev->state != PHY_HALTED) {
636 		if (AUTONEG_ENABLE == phydev->autoneg) {
637 			phydev->state = PHY_AN;
638 			phydev->link_timeout = PHY_AN_TIMEOUT;
639 		} else {
640 			phydev->state = PHY_FORCING;
641 			phydev->link_timeout = PHY_FORCE_TIMEOUT;
642 		}
643 	}
644 
645 out_unlock:
646 	mutex_unlock(&phydev->lock);
647 	return err;
648 }
649 EXPORT_SYMBOL(phy_start_aneg);
650 
651 /**
652  * phy_start_machine - start PHY state machine tracking
653  * @phydev: the phy_device struct
654  *
655  * Description: The PHY infrastructure can run a state machine
656  *   which tracks whether the PHY is starting up, negotiating,
657  *   etc.  This function starts the timer which tracks the state
658  *   of the PHY.  If you want to maintain your own state machine,
659  *   do not call this function.
660  */
661 void phy_start_machine(struct phy_device *phydev)
662 {
663 	queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
664 }
665 
666 /**
667  * phy_trigger_machine - trigger the state machine to run
668  *
669  * @phydev: the phy_device struct
670  * @sync: indicate whether we should wait for the workqueue cancelation
671  *
672  * Description: There has been a change in state which requires that the
673  *   state machine runs.
674  */
675 
676 static void phy_trigger_machine(struct phy_device *phydev, bool sync)
677 {
678 	if (sync)
679 		cancel_delayed_work_sync(&phydev->state_queue);
680 	else
681 		cancel_delayed_work(&phydev->state_queue);
682 	queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
683 }
684 
685 /**
686  * phy_stop_machine - stop the PHY state machine tracking
687  * @phydev: target phy_device struct
688  *
689  * Description: Stops the state machine timer, sets the state to UP
690  *   (unless it wasn't up yet). This function must be called BEFORE
691  *   phy_detach.
692  */
693 void phy_stop_machine(struct phy_device *phydev)
694 {
695 	cancel_delayed_work_sync(&phydev->state_queue);
696 
697 	mutex_lock(&phydev->lock);
698 	if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
699 		phydev->state = PHY_UP;
700 	mutex_unlock(&phydev->lock);
701 }
702 
703 /**
704  * phy_error - enter HALTED state for this PHY device
705  * @phydev: target phy_device struct
706  *
707  * Moves the PHY to the HALTED state in response to a read
708  * or write error, and tells the controller the link is down.
709  * Must not be called from interrupt context, or while the
710  * phydev->lock is held.
711  */
712 static void phy_error(struct phy_device *phydev)
713 {
714 	mutex_lock(&phydev->lock);
715 	phydev->state = PHY_HALTED;
716 	mutex_unlock(&phydev->lock);
717 
718 	phy_trigger_machine(phydev, false);
719 }
720 
721 /**
722  * phy_interrupt - PHY interrupt handler
723  * @irq: interrupt line
724  * @phy_dat: phy_device pointer
725  *
726  * Description: When a PHY interrupt occurs, the handler disables
727  * interrupts, and uses phy_change to handle the interrupt.
728  */
729 static irqreturn_t phy_interrupt(int irq, void *phy_dat)
730 {
731 	struct phy_device *phydev = phy_dat;
732 
733 	if (PHY_HALTED == phydev->state)
734 		return IRQ_NONE;		/* It can't be ours.  */
735 
736 	disable_irq_nosync(irq);
737 	atomic_inc(&phydev->irq_disable);
738 
739 	phy_change(phydev);
740 
741 	return IRQ_HANDLED;
742 }
743 
744 /**
745  * phy_enable_interrupts - Enable the interrupts from the PHY side
746  * @phydev: target phy_device struct
747  */
748 static int phy_enable_interrupts(struct phy_device *phydev)
749 {
750 	int err = phy_clear_interrupt(phydev);
751 
752 	if (err < 0)
753 		return err;
754 
755 	return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
756 }
757 
758 /**
759  * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
760  * @phydev: target phy_device struct
761  */
762 static int phy_disable_interrupts(struct phy_device *phydev)
763 {
764 	int err;
765 
766 	/* Disable PHY interrupts */
767 	err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
768 	if (err)
769 		goto phy_err;
770 
771 	/* Clear the interrupt */
772 	err = phy_clear_interrupt(phydev);
773 	if (err)
774 		goto phy_err;
775 
776 	return 0;
777 
778 phy_err:
779 	phy_error(phydev);
780 
781 	return err;
782 }
783 
784 /**
785  * phy_start_interrupts - request and enable interrupts for a PHY device
786  * @phydev: target phy_device struct
787  *
788  * Description: Request the interrupt for the given PHY.
789  *   If this fails, then we set irq to PHY_POLL.
790  *   Otherwise, we enable the interrupts in the PHY.
791  *   This should only be called with a valid IRQ number.
792  *   Returns 0 on success or < 0 on error.
793  */
794 int phy_start_interrupts(struct phy_device *phydev)
795 {
796 	atomic_set(&phydev->irq_disable, 0);
797 	if (request_threaded_irq(phydev->irq, NULL, phy_interrupt,
798 				 IRQF_ONESHOT | IRQF_SHARED,
799 				 phydev_name(phydev), phydev) < 0) {
800 		pr_warn("%s: Can't get IRQ %d (PHY)\n",
801 			phydev->mdio.bus->name, phydev->irq);
802 		phydev->irq = PHY_POLL;
803 		return 0;
804 	}
805 
806 	return phy_enable_interrupts(phydev);
807 }
808 EXPORT_SYMBOL(phy_start_interrupts);
809 
810 /**
811  * phy_stop_interrupts - disable interrupts from a PHY device
812  * @phydev: target phy_device struct
813  */
814 int phy_stop_interrupts(struct phy_device *phydev)
815 {
816 	int err = phy_disable_interrupts(phydev);
817 
818 	if (err)
819 		phy_error(phydev);
820 
821 	free_irq(phydev->irq, phydev);
822 
823 	/* If work indeed has been cancelled, disable_irq() will have
824 	 * been left unbalanced from phy_interrupt() and enable_irq()
825 	 * has to be called so that other devices on the line work.
826 	 */
827 	while (atomic_dec_return(&phydev->irq_disable) >= 0)
828 		enable_irq(phydev->irq);
829 
830 	return err;
831 }
832 EXPORT_SYMBOL(phy_stop_interrupts);
833 
834 /**
835  * phy_change - Called by the phy_interrupt to handle PHY changes
836  * @phydev: phy_device struct that interrupted
837  */
838 void phy_change(struct phy_device *phydev)
839 {
840 	if (phy_interrupt_is_valid(phydev)) {
841 		if (phydev->drv->did_interrupt &&
842 		    !phydev->drv->did_interrupt(phydev))
843 			goto ignore;
844 
845 		if (phy_disable_interrupts(phydev))
846 			goto phy_err;
847 	}
848 
849 	mutex_lock(&phydev->lock);
850 	if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
851 		phydev->state = PHY_CHANGELINK;
852 	mutex_unlock(&phydev->lock);
853 
854 	if (phy_interrupt_is_valid(phydev)) {
855 		atomic_dec(&phydev->irq_disable);
856 		enable_irq(phydev->irq);
857 
858 		/* Reenable interrupts */
859 		if (PHY_HALTED != phydev->state &&
860 		    phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
861 			goto irq_enable_err;
862 	}
863 
864 	/* reschedule state queue work to run as soon as possible */
865 	phy_trigger_machine(phydev, true);
866 	return;
867 
868 ignore:
869 	atomic_dec(&phydev->irq_disable);
870 	enable_irq(phydev->irq);
871 	return;
872 
873 irq_enable_err:
874 	disable_irq(phydev->irq);
875 	atomic_inc(&phydev->irq_disable);
876 phy_err:
877 	phy_error(phydev);
878 }
879 
880 /**
881  * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes
882  * @work: work_struct that describes the work to be done
883  */
884 void phy_change_work(struct work_struct *work)
885 {
886 	struct phy_device *phydev =
887 		container_of(work, struct phy_device, phy_queue);
888 
889 	phy_change(phydev);
890 }
891 
892 /**
893  * phy_stop - Bring down the PHY link, and stop checking the status
894  * @phydev: target phy_device struct
895  */
896 void phy_stop(struct phy_device *phydev)
897 {
898 	mutex_lock(&phydev->lock);
899 
900 	if (PHY_HALTED == phydev->state)
901 		goto out_unlock;
902 
903 	if (phy_interrupt_is_valid(phydev)) {
904 		/* Disable PHY Interrupts */
905 		phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
906 
907 		/* Clear any pending interrupts */
908 		phy_clear_interrupt(phydev);
909 	}
910 
911 	phydev->state = PHY_HALTED;
912 
913 out_unlock:
914 	mutex_unlock(&phydev->lock);
915 
916 	/* Cannot call flush_scheduled_work() here as desired because
917 	 * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
918 	 * will not reenable interrupts.
919 	 */
920 }
921 EXPORT_SYMBOL(phy_stop);
922 
923 /**
924  * phy_start - start or restart a PHY device
925  * @phydev: target phy_device struct
926  *
927  * Description: Indicates the attached device's readiness to
928  *   handle PHY-related work.  Used during startup to start the
929  *   PHY, and after a call to phy_stop() to resume operation.
930  *   Also used to indicate the MDIO bus has cleared an error
931  *   condition.
932  */
933 void phy_start(struct phy_device *phydev)
934 {
935 	bool do_resume = false;
936 	int err = 0;
937 
938 	mutex_lock(&phydev->lock);
939 
940 	switch (phydev->state) {
941 	case PHY_STARTING:
942 		phydev->state = PHY_PENDING;
943 		break;
944 	case PHY_READY:
945 		phydev->state = PHY_UP;
946 		break;
947 	case PHY_HALTED:
948 		/* make sure interrupts are re-enabled for the PHY */
949 		if (phydev->irq != PHY_POLL) {
950 			err = phy_enable_interrupts(phydev);
951 			if (err < 0)
952 				break;
953 		}
954 
955 		phydev->state = PHY_RESUMING;
956 		do_resume = true;
957 		break;
958 	default:
959 		break;
960 	}
961 	mutex_unlock(&phydev->lock);
962 
963 	/* if phy was suspended, bring the physical link up again */
964 	if (do_resume)
965 		phy_resume(phydev);
966 
967 	phy_trigger_machine(phydev, true);
968 }
969 EXPORT_SYMBOL(phy_start);
970 
971 static void phy_adjust_link(struct phy_device *phydev)
972 {
973 	phydev->adjust_link(phydev->attached_dev);
974 	phy_led_trigger_change_speed(phydev);
975 }
976 
977 /**
978  * phy_state_machine - Handle the state machine
979  * @work: work_struct that describes the work to be done
980  */
981 void phy_state_machine(struct work_struct *work)
982 {
983 	struct delayed_work *dwork = to_delayed_work(work);
984 	struct phy_device *phydev =
985 			container_of(dwork, struct phy_device, state_queue);
986 	bool needs_aneg = false, do_suspend = false;
987 	enum phy_state old_state;
988 	int err = 0;
989 	int old_link;
990 
991 	mutex_lock(&phydev->lock);
992 
993 	old_state = phydev->state;
994 
995 	if (phydev->drv && phydev->drv->link_change_notify)
996 		phydev->drv->link_change_notify(phydev);
997 
998 	switch (phydev->state) {
999 	case PHY_DOWN:
1000 	case PHY_STARTING:
1001 	case PHY_READY:
1002 	case PHY_PENDING:
1003 		break;
1004 	case PHY_UP:
1005 		needs_aneg = true;
1006 
1007 		phydev->link_timeout = PHY_AN_TIMEOUT;
1008 
1009 		break;
1010 	case PHY_AN:
1011 		err = phy_read_status(phydev);
1012 		if (err < 0)
1013 			break;
1014 
1015 		/* If the link is down, give up on negotiation for now */
1016 		if (!phydev->link) {
1017 			phydev->state = PHY_NOLINK;
1018 			netif_carrier_off(phydev->attached_dev);
1019 			phy_adjust_link(phydev);
1020 			break;
1021 		}
1022 
1023 		/* Check if negotiation is done.  Break if there's an error */
1024 		err = phy_aneg_done(phydev);
1025 		if (err < 0)
1026 			break;
1027 
1028 		/* If AN is done, we're running */
1029 		if (err > 0) {
1030 			phydev->state = PHY_RUNNING;
1031 			netif_carrier_on(phydev->attached_dev);
1032 			phy_adjust_link(phydev);
1033 
1034 		} else if (0 == phydev->link_timeout--)
1035 			needs_aneg = true;
1036 		break;
1037 	case PHY_NOLINK:
1038 		if (phy_interrupt_is_valid(phydev))
1039 			break;
1040 
1041 		err = phy_read_status(phydev);
1042 		if (err)
1043 			break;
1044 
1045 		if (phydev->link) {
1046 			if (AUTONEG_ENABLE == phydev->autoneg) {
1047 				err = phy_aneg_done(phydev);
1048 				if (err < 0)
1049 					break;
1050 
1051 				if (!err) {
1052 					phydev->state = PHY_AN;
1053 					phydev->link_timeout = PHY_AN_TIMEOUT;
1054 					break;
1055 				}
1056 			}
1057 			phydev->state = PHY_RUNNING;
1058 			netif_carrier_on(phydev->attached_dev);
1059 			phy_adjust_link(phydev);
1060 		}
1061 		break;
1062 	case PHY_FORCING:
1063 		err = genphy_update_link(phydev);
1064 		if (err)
1065 			break;
1066 
1067 		if (phydev->link) {
1068 			phydev->state = PHY_RUNNING;
1069 			netif_carrier_on(phydev->attached_dev);
1070 		} else {
1071 			if (0 == phydev->link_timeout--)
1072 				needs_aneg = true;
1073 		}
1074 
1075 		phy_adjust_link(phydev);
1076 		break;
1077 	case PHY_RUNNING:
1078 		/* Only register a CHANGE if we are polling and link changed
1079 		 * since latest checking.
1080 		 */
1081 		if (phydev->irq == PHY_POLL) {
1082 			old_link = phydev->link;
1083 			err = phy_read_status(phydev);
1084 			if (err)
1085 				break;
1086 
1087 			if (old_link != phydev->link)
1088 				phydev->state = PHY_CHANGELINK;
1089 		}
1090 		/*
1091 		 * Failsafe: check that nobody set phydev->link=0 between two
1092 		 * poll cycles, otherwise we won't leave RUNNING state as long
1093 		 * as link remains down.
1094 		 */
1095 		if (!phydev->link && phydev->state == PHY_RUNNING) {
1096 			phydev->state = PHY_CHANGELINK;
1097 			phydev_err(phydev, "no link in PHY_RUNNING\n");
1098 		}
1099 		break;
1100 	case PHY_CHANGELINK:
1101 		err = phy_read_status(phydev);
1102 		if (err)
1103 			break;
1104 
1105 		if (phydev->link) {
1106 			phydev->state = PHY_RUNNING;
1107 			netif_carrier_on(phydev->attached_dev);
1108 		} else {
1109 			phydev->state = PHY_NOLINK;
1110 			netif_carrier_off(phydev->attached_dev);
1111 		}
1112 
1113 		phy_adjust_link(phydev);
1114 
1115 		if (phy_interrupt_is_valid(phydev))
1116 			err = phy_config_interrupt(phydev,
1117 						   PHY_INTERRUPT_ENABLED);
1118 		break;
1119 	case PHY_HALTED:
1120 		if (phydev->link) {
1121 			phydev->link = 0;
1122 			netif_carrier_off(phydev->attached_dev);
1123 			phy_adjust_link(phydev);
1124 			do_suspend = true;
1125 		}
1126 		break;
1127 	case PHY_RESUMING:
1128 		if (AUTONEG_ENABLE == phydev->autoneg) {
1129 			err = phy_aneg_done(phydev);
1130 			if (err < 0)
1131 				break;
1132 
1133 			/* err > 0 if AN is done.
1134 			 * Otherwise, it's 0, and we're  still waiting for AN
1135 			 */
1136 			if (err > 0) {
1137 				err = phy_read_status(phydev);
1138 				if (err)
1139 					break;
1140 
1141 				if (phydev->link) {
1142 					phydev->state = PHY_RUNNING;
1143 					netif_carrier_on(phydev->attached_dev);
1144 				} else	{
1145 					phydev->state = PHY_NOLINK;
1146 				}
1147 				phy_adjust_link(phydev);
1148 			} else {
1149 				phydev->state = PHY_AN;
1150 				phydev->link_timeout = PHY_AN_TIMEOUT;
1151 			}
1152 		} else {
1153 			err = phy_read_status(phydev);
1154 			if (err)
1155 				break;
1156 
1157 			if (phydev->link) {
1158 				phydev->state = PHY_RUNNING;
1159 				netif_carrier_on(phydev->attached_dev);
1160 			} else	{
1161 				phydev->state = PHY_NOLINK;
1162 			}
1163 			phy_adjust_link(phydev);
1164 		}
1165 		break;
1166 	}
1167 
1168 	mutex_unlock(&phydev->lock);
1169 
1170 	if (needs_aneg)
1171 		err = phy_start_aneg(phydev);
1172 	else if (do_suspend)
1173 		phy_suspend(phydev);
1174 
1175 	if (err < 0)
1176 		phy_error(phydev);
1177 
1178 	phydev_dbg(phydev, "PHY state change %s -> %s\n",
1179 		   phy_state_to_str(old_state),
1180 		   phy_state_to_str(phydev->state));
1181 
1182 	/* Only re-schedule a PHY state machine change if we are polling the
1183 	 * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving
1184 	 * between states from phy_mac_interrupt()
1185 	 */
1186 	if (phydev->irq == PHY_POLL)
1187 		queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
1188 				   PHY_STATE_TIME * HZ);
1189 }
1190 
1191 /**
1192  * phy_mac_interrupt - MAC says the link has changed
1193  * @phydev: phy_device struct with changed link
1194  * @new_link: Link is Up/Down.
1195  *
1196  * Description: The MAC layer is able indicate there has been a change
1197  *   in the PHY link status. Set the new link status, and trigger the
1198  *   state machine, work a work queue.
1199  */
1200 void phy_mac_interrupt(struct phy_device *phydev, int new_link)
1201 {
1202 	phydev->link = new_link;
1203 
1204 	/* Trigger a state machine change */
1205 	queue_work(system_power_efficient_wq, &phydev->phy_queue);
1206 }
1207 EXPORT_SYMBOL(phy_mac_interrupt);
1208 
1209 /**
1210  * phy_init_eee - init and check the EEE feature
1211  * @phydev: target phy_device struct
1212  * @clk_stop_enable: PHY may stop the clock during LPI
1213  *
1214  * Description: it checks if the Energy-Efficient Ethernet (EEE)
1215  * is supported by looking at the MMD registers 3.20 and 7.60/61
1216  * and it programs the MMD register 3.0 setting the "Clock stop enable"
1217  * bit if required.
1218  */
1219 int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1220 {
1221 	if (!phydev->drv)
1222 		return -EIO;
1223 
1224 	/* According to 802.3az,the EEE is supported only in full duplex-mode.
1225 	 */
1226 	if (phydev->duplex == DUPLEX_FULL) {
1227 		int eee_lp, eee_cap, eee_adv;
1228 		u32 lp, cap, adv;
1229 		int status;
1230 
1231 		/* Read phy status to properly get the right settings */
1232 		status = phy_read_status(phydev);
1233 		if (status)
1234 			return status;
1235 
1236 		/* First check if the EEE ability is supported */
1237 		eee_cap = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
1238 		if (eee_cap <= 0)
1239 			goto eee_exit_err;
1240 
1241 		cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
1242 		if (!cap)
1243 			goto eee_exit_err;
1244 
1245 		/* Check which link settings negotiated and verify it in
1246 		 * the EEE advertising registers.
1247 		 */
1248 		eee_lp = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
1249 		if (eee_lp <= 0)
1250 			goto eee_exit_err;
1251 
1252 		eee_adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
1253 		if (eee_adv <= 0)
1254 			goto eee_exit_err;
1255 
1256 		adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
1257 		lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
1258 		if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
1259 			goto eee_exit_err;
1260 
1261 		if (clk_stop_enable) {
1262 			/* Configure the PHY to stop receiving xMII
1263 			 * clock while it is signaling LPI.
1264 			 */
1265 			int val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
1266 			if (val < 0)
1267 				return val;
1268 
1269 			val |= MDIO_PCS_CTRL1_CLKSTOP_EN;
1270 			phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, val);
1271 		}
1272 
1273 		return 0; /* EEE supported */
1274 	}
1275 eee_exit_err:
1276 	return -EPROTONOSUPPORT;
1277 }
1278 EXPORT_SYMBOL(phy_init_eee);
1279 
1280 /**
1281  * phy_get_eee_err - report the EEE wake error count
1282  * @phydev: target phy_device struct
1283  *
1284  * Description: it is to report the number of time where the PHY
1285  * failed to complete its normal wake sequence.
1286  */
1287 int phy_get_eee_err(struct phy_device *phydev)
1288 {
1289 	if (!phydev->drv)
1290 		return -EIO;
1291 
1292 	return phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_WK_ERR);
1293 }
1294 EXPORT_SYMBOL(phy_get_eee_err);
1295 
1296 /**
1297  * phy_ethtool_get_eee - get EEE supported and status
1298  * @phydev: target phy_device struct
1299  * @data: ethtool_eee data
1300  *
1301  * Description: it reportes the Supported/Advertisement/LP Advertisement
1302  * capabilities.
1303  */
1304 int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
1305 {
1306 	int val;
1307 
1308 	if (!phydev->drv)
1309 		return -EIO;
1310 
1311 	/* Get Supported EEE */
1312 	val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
1313 	if (val < 0)
1314 		return val;
1315 	data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
1316 
1317 	/* Get advertisement EEE */
1318 	val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
1319 	if (val < 0)
1320 		return val;
1321 	data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
1322 
1323 	/* Get LP advertisement EEE */
1324 	val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
1325 	if (val < 0)
1326 		return val;
1327 	data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
1328 
1329 	return 0;
1330 }
1331 EXPORT_SYMBOL(phy_ethtool_get_eee);
1332 
1333 /**
1334  * phy_ethtool_set_eee - set EEE supported and status
1335  * @phydev: target phy_device struct
1336  * @data: ethtool_eee data
1337  *
1338  * Description: it is to program the Advertisement EEE register.
1339  */
1340 int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
1341 {
1342 	int cap, old_adv, adv, ret;
1343 
1344 	if (!phydev->drv)
1345 		return -EIO;
1346 
1347 	/* Get Supported EEE */
1348 	cap = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
1349 	if (cap < 0)
1350 		return cap;
1351 
1352 	old_adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
1353 	if (old_adv < 0)
1354 		return old_adv;
1355 
1356 	adv = ethtool_adv_to_mmd_eee_adv_t(data->advertised) & cap;
1357 
1358 	/* Mask prohibited EEE modes */
1359 	adv &= ~phydev->eee_broken_modes;
1360 
1361 	if (old_adv != adv) {
1362 		ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv);
1363 		if (ret < 0)
1364 			return ret;
1365 
1366 		/* Restart autonegotiation so the new modes get sent to the
1367 		 * link partner.
1368 		 */
1369 		ret = genphy_restart_aneg(phydev);
1370 		if (ret < 0)
1371 			return ret;
1372 	}
1373 
1374 	return 0;
1375 }
1376 EXPORT_SYMBOL(phy_ethtool_set_eee);
1377 
1378 int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
1379 {
1380 	if (phydev->drv && phydev->drv->set_wol)
1381 		return phydev->drv->set_wol(phydev, wol);
1382 
1383 	return -EOPNOTSUPP;
1384 }
1385 EXPORT_SYMBOL(phy_ethtool_set_wol);
1386 
1387 void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
1388 {
1389 	if (phydev->drv && phydev->drv->get_wol)
1390 		phydev->drv->get_wol(phydev, wol);
1391 }
1392 EXPORT_SYMBOL(phy_ethtool_get_wol);
1393 
1394 int phy_ethtool_get_link_ksettings(struct net_device *ndev,
1395 				   struct ethtool_link_ksettings *cmd)
1396 {
1397 	struct phy_device *phydev = ndev->phydev;
1398 
1399 	if (!phydev)
1400 		return -ENODEV;
1401 
1402 	return phy_ethtool_ksettings_get(phydev, cmd);
1403 }
1404 EXPORT_SYMBOL(phy_ethtool_get_link_ksettings);
1405 
1406 int phy_ethtool_set_link_ksettings(struct net_device *ndev,
1407 				   const struct ethtool_link_ksettings *cmd)
1408 {
1409 	struct phy_device *phydev = ndev->phydev;
1410 
1411 	if (!phydev)
1412 		return -ENODEV;
1413 
1414 	return phy_ethtool_ksettings_set(phydev, cmd);
1415 }
1416 EXPORT_SYMBOL(phy_ethtool_set_link_ksettings);
1417 
1418 int phy_ethtool_nway_reset(struct net_device *ndev)
1419 {
1420 	struct phy_device *phydev = ndev->phydev;
1421 
1422 	if (!phydev)
1423 		return -ENODEV;
1424 
1425 	if (!phydev->drv)
1426 		return -EIO;
1427 
1428 	return genphy_restart_aneg(phydev);
1429 }
1430 EXPORT_SYMBOL(phy_ethtool_nway_reset);
1431