xref: /linux/drivers/net/usb/lan78xx.c (revision cd65cd95128781ca59d06611270fcbd9b4a7cf8d)
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <linux/phy.h>
33 #include <net/ip6_checksum.h>
34 #include <linux/interrupt.h>
35 #include <linux/irqdomain.h>
36 #include <linux/irq.h>
37 #include <linux/irqchip/chained_irq.h>
38 #include <linux/microchipphy.h>
39 #include <linux/phy_fixed.h>
40 #include <linux/of_mdio.h>
41 #include <linux/of_net.h>
42 #include "lan78xx.h"
43 
44 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
45 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
46 #define DRIVER_NAME	"lan78xx"
47 
48 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
49 #define THROTTLE_JIFFIES		(HZ / 8)
50 #define UNLINK_TIMEOUT_MS		3
51 
52 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
53 
54 #define SS_USB_PKT_SIZE			(1024)
55 #define HS_USB_PKT_SIZE			(512)
56 #define FS_USB_PKT_SIZE			(64)
57 
58 #define MAX_RX_FIFO_SIZE		(12 * 1024)
59 #define MAX_TX_FIFO_SIZE		(12 * 1024)
60 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
61 #define DEFAULT_BULK_IN_DELAY		(0x0800)
62 #define MAX_SINGLE_PACKET_SIZE		(9000)
63 #define DEFAULT_TX_CSUM_ENABLE		(true)
64 #define DEFAULT_RX_CSUM_ENABLE		(true)
65 #define DEFAULT_TSO_CSUM_ENABLE		(true)
66 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
67 #define TX_OVERHEAD			(8)
68 #define RXW_PADDING			2
69 
70 #define LAN78XX_USB_VENDOR_ID		(0x0424)
71 #define LAN7800_USB_PRODUCT_ID		(0x7800)
72 #define LAN7850_USB_PRODUCT_ID		(0x7850)
73 #define LAN7801_USB_PRODUCT_ID		(0x7801)
74 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
75 #define LAN78XX_OTP_MAGIC		(0x78F3)
76 
77 #define	MII_READ			1
78 #define	MII_WRITE			0
79 
80 #define EEPROM_INDICATOR		(0xA5)
81 #define EEPROM_MAC_OFFSET		(0x01)
82 #define MAX_EEPROM_SIZE			512
83 #define OTP_INDICATOR_1			(0xF3)
84 #define OTP_INDICATOR_2			(0xF7)
85 
86 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
87 					 WAKE_MCAST | WAKE_BCAST | \
88 					 WAKE_ARP | WAKE_MAGIC)
89 
90 /* USB related defines */
91 #define BULK_IN_PIPE			1
92 #define BULK_OUT_PIPE			2
93 
94 /* default autosuspend delay (mSec)*/
95 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
96 
97 /* statistic update interval (mSec) */
98 #define STAT_UPDATE_TIMER		(1 * 1000)
99 
100 /* defines interrupts from interrupt EP */
101 #define MAX_INT_EP			(32)
102 #define INT_EP_INTEP			(31)
103 #define INT_EP_OTP_WR_DONE		(28)
104 #define INT_EP_EEE_TX_LPI_START		(26)
105 #define INT_EP_EEE_TX_LPI_STOP		(25)
106 #define INT_EP_EEE_RX_LPI		(24)
107 #define INT_EP_MAC_RESET_TIMEOUT	(23)
108 #define INT_EP_RDFO			(22)
109 #define INT_EP_TXE			(21)
110 #define INT_EP_USB_STATUS		(20)
111 #define INT_EP_TX_DIS			(19)
112 #define INT_EP_RX_DIS			(18)
113 #define INT_EP_PHY			(17)
114 #define INT_EP_DP			(16)
115 #define INT_EP_MAC_ERR			(15)
116 #define INT_EP_TDFU			(14)
117 #define INT_EP_TDFO			(13)
118 #define INT_EP_UTX			(12)
119 #define INT_EP_GPIO_11			(11)
120 #define INT_EP_GPIO_10			(10)
121 #define INT_EP_GPIO_9			(9)
122 #define INT_EP_GPIO_8			(8)
123 #define INT_EP_GPIO_7			(7)
124 #define INT_EP_GPIO_6			(6)
125 #define INT_EP_GPIO_5			(5)
126 #define INT_EP_GPIO_4			(4)
127 #define INT_EP_GPIO_3			(3)
128 #define INT_EP_GPIO_2			(2)
129 #define INT_EP_GPIO_1			(1)
130 #define INT_EP_GPIO_0			(0)
131 
132 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
133 	"RX FCS Errors",
134 	"RX Alignment Errors",
135 	"Rx Fragment Errors",
136 	"RX Jabber Errors",
137 	"RX Undersize Frame Errors",
138 	"RX Oversize Frame Errors",
139 	"RX Dropped Frames",
140 	"RX Unicast Byte Count",
141 	"RX Broadcast Byte Count",
142 	"RX Multicast Byte Count",
143 	"RX Unicast Frames",
144 	"RX Broadcast Frames",
145 	"RX Multicast Frames",
146 	"RX Pause Frames",
147 	"RX 64 Byte Frames",
148 	"RX 65 - 127 Byte Frames",
149 	"RX 128 - 255 Byte Frames",
150 	"RX 256 - 511 Bytes Frames",
151 	"RX 512 - 1023 Byte Frames",
152 	"RX 1024 - 1518 Byte Frames",
153 	"RX Greater 1518 Byte Frames",
154 	"EEE RX LPI Transitions",
155 	"EEE RX LPI Time",
156 	"TX FCS Errors",
157 	"TX Excess Deferral Errors",
158 	"TX Carrier Errors",
159 	"TX Bad Byte Count",
160 	"TX Single Collisions",
161 	"TX Multiple Collisions",
162 	"TX Excessive Collision",
163 	"TX Late Collisions",
164 	"TX Unicast Byte Count",
165 	"TX Broadcast Byte Count",
166 	"TX Multicast Byte Count",
167 	"TX Unicast Frames",
168 	"TX Broadcast Frames",
169 	"TX Multicast Frames",
170 	"TX Pause Frames",
171 	"TX 64 Byte Frames",
172 	"TX 65 - 127 Byte Frames",
173 	"TX 128 - 255 Byte Frames",
174 	"TX 256 - 511 Bytes Frames",
175 	"TX 512 - 1023 Byte Frames",
176 	"TX 1024 - 1518 Byte Frames",
177 	"TX Greater 1518 Byte Frames",
178 	"EEE TX LPI Transitions",
179 	"EEE TX LPI Time",
180 };
181 
182 struct lan78xx_statstage {
183 	u32 rx_fcs_errors;
184 	u32 rx_alignment_errors;
185 	u32 rx_fragment_errors;
186 	u32 rx_jabber_errors;
187 	u32 rx_undersize_frame_errors;
188 	u32 rx_oversize_frame_errors;
189 	u32 rx_dropped_frames;
190 	u32 rx_unicast_byte_count;
191 	u32 rx_broadcast_byte_count;
192 	u32 rx_multicast_byte_count;
193 	u32 rx_unicast_frames;
194 	u32 rx_broadcast_frames;
195 	u32 rx_multicast_frames;
196 	u32 rx_pause_frames;
197 	u32 rx_64_byte_frames;
198 	u32 rx_65_127_byte_frames;
199 	u32 rx_128_255_byte_frames;
200 	u32 rx_256_511_bytes_frames;
201 	u32 rx_512_1023_byte_frames;
202 	u32 rx_1024_1518_byte_frames;
203 	u32 rx_greater_1518_byte_frames;
204 	u32 eee_rx_lpi_transitions;
205 	u32 eee_rx_lpi_time;
206 	u32 tx_fcs_errors;
207 	u32 tx_excess_deferral_errors;
208 	u32 tx_carrier_errors;
209 	u32 tx_bad_byte_count;
210 	u32 tx_single_collisions;
211 	u32 tx_multiple_collisions;
212 	u32 tx_excessive_collision;
213 	u32 tx_late_collisions;
214 	u32 tx_unicast_byte_count;
215 	u32 tx_broadcast_byte_count;
216 	u32 tx_multicast_byte_count;
217 	u32 tx_unicast_frames;
218 	u32 tx_broadcast_frames;
219 	u32 tx_multicast_frames;
220 	u32 tx_pause_frames;
221 	u32 tx_64_byte_frames;
222 	u32 tx_65_127_byte_frames;
223 	u32 tx_128_255_byte_frames;
224 	u32 tx_256_511_bytes_frames;
225 	u32 tx_512_1023_byte_frames;
226 	u32 tx_1024_1518_byte_frames;
227 	u32 tx_greater_1518_byte_frames;
228 	u32 eee_tx_lpi_transitions;
229 	u32 eee_tx_lpi_time;
230 };
231 
232 struct lan78xx_statstage64 {
233 	u64 rx_fcs_errors;
234 	u64 rx_alignment_errors;
235 	u64 rx_fragment_errors;
236 	u64 rx_jabber_errors;
237 	u64 rx_undersize_frame_errors;
238 	u64 rx_oversize_frame_errors;
239 	u64 rx_dropped_frames;
240 	u64 rx_unicast_byte_count;
241 	u64 rx_broadcast_byte_count;
242 	u64 rx_multicast_byte_count;
243 	u64 rx_unicast_frames;
244 	u64 rx_broadcast_frames;
245 	u64 rx_multicast_frames;
246 	u64 rx_pause_frames;
247 	u64 rx_64_byte_frames;
248 	u64 rx_65_127_byte_frames;
249 	u64 rx_128_255_byte_frames;
250 	u64 rx_256_511_bytes_frames;
251 	u64 rx_512_1023_byte_frames;
252 	u64 rx_1024_1518_byte_frames;
253 	u64 rx_greater_1518_byte_frames;
254 	u64 eee_rx_lpi_transitions;
255 	u64 eee_rx_lpi_time;
256 	u64 tx_fcs_errors;
257 	u64 tx_excess_deferral_errors;
258 	u64 tx_carrier_errors;
259 	u64 tx_bad_byte_count;
260 	u64 tx_single_collisions;
261 	u64 tx_multiple_collisions;
262 	u64 tx_excessive_collision;
263 	u64 tx_late_collisions;
264 	u64 tx_unicast_byte_count;
265 	u64 tx_broadcast_byte_count;
266 	u64 tx_multicast_byte_count;
267 	u64 tx_unicast_frames;
268 	u64 tx_broadcast_frames;
269 	u64 tx_multicast_frames;
270 	u64 tx_pause_frames;
271 	u64 tx_64_byte_frames;
272 	u64 tx_65_127_byte_frames;
273 	u64 tx_128_255_byte_frames;
274 	u64 tx_256_511_bytes_frames;
275 	u64 tx_512_1023_byte_frames;
276 	u64 tx_1024_1518_byte_frames;
277 	u64 tx_greater_1518_byte_frames;
278 	u64 eee_tx_lpi_transitions;
279 	u64 eee_tx_lpi_time;
280 };
281 
282 static u32 lan78xx_regs[] = {
283 	ID_REV,
284 	INT_STS,
285 	HW_CFG,
286 	PMT_CTL,
287 	E2P_CMD,
288 	E2P_DATA,
289 	USB_STATUS,
290 	VLAN_TYPE,
291 	MAC_CR,
292 	MAC_RX,
293 	MAC_TX,
294 	FLOW,
295 	ERR_STS,
296 	MII_ACC,
297 	MII_DATA,
298 	EEE_TX_LPI_REQ_DLY,
299 	EEE_TW_TX_SYS,
300 	EEE_TX_LPI_REM_DLY,
301 	WUCSR
302 };
303 
304 #define PHY_REG_SIZE (32 * sizeof(u32))
305 
306 struct lan78xx_net;
307 
308 struct lan78xx_priv {
309 	struct lan78xx_net *dev;
310 	u32 rfe_ctl;
311 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
312 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
313 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
314 	struct mutex dataport_mutex; /* for dataport access */
315 	spinlock_t rfe_ctl_lock; /* for rfe register access */
316 	struct work_struct set_multicast;
317 	struct work_struct set_vlan;
318 	u32 wol;
319 };
320 
321 enum skb_state {
322 	illegal = 0,
323 	tx_start,
324 	tx_done,
325 	rx_start,
326 	rx_done,
327 	rx_cleanup,
328 	unlink_start
329 };
330 
331 struct skb_data {		/* skb->cb is one of these */
332 	struct urb *urb;
333 	struct lan78xx_net *dev;
334 	enum skb_state state;
335 	size_t length;
336 	int num_of_packet;
337 };
338 
339 struct usb_context {
340 	struct usb_ctrlrequest req;
341 	struct lan78xx_net *dev;
342 };
343 
344 #define EVENT_TX_HALT			0
345 #define EVENT_RX_HALT			1
346 #define EVENT_RX_MEMORY			2
347 #define EVENT_STS_SPLIT			3
348 #define EVENT_LINK_RESET		4
349 #define EVENT_RX_PAUSED			5
350 #define EVENT_DEV_WAKING		6
351 #define EVENT_DEV_ASLEEP		7
352 #define EVENT_DEV_OPEN			8
353 #define EVENT_STAT_UPDATE		9
354 
355 struct statstage {
356 	struct mutex			access_lock;	/* for stats access */
357 	struct lan78xx_statstage	saved;
358 	struct lan78xx_statstage	rollover_count;
359 	struct lan78xx_statstage	rollover_max;
360 	struct lan78xx_statstage64	curr_stat;
361 };
362 
363 struct irq_domain_data {
364 	struct irq_domain	*irqdomain;
365 	unsigned int		phyirq;
366 	struct irq_chip		*irqchip;
367 	irq_flow_handler_t	irq_handler;
368 	u32			irqenable;
369 	struct mutex		irq_lock;		/* for irq bus access */
370 };
371 
372 struct lan78xx_net {
373 	struct net_device	*net;
374 	struct usb_device	*udev;
375 	struct usb_interface	*intf;
376 	void			*driver_priv;
377 
378 	int			rx_qlen;
379 	int			tx_qlen;
380 	struct sk_buff_head	rxq;
381 	struct sk_buff_head	txq;
382 	struct sk_buff_head	done;
383 	struct sk_buff_head	rxq_pause;
384 	struct sk_buff_head	txq_pend;
385 
386 	struct tasklet_struct	bh;
387 	struct delayed_work	wq;
388 
389 	struct usb_host_endpoint *ep_blkin;
390 	struct usb_host_endpoint *ep_blkout;
391 	struct usb_host_endpoint *ep_intr;
392 
393 	int			msg_enable;
394 
395 	struct urb		*urb_intr;
396 	struct usb_anchor	deferred;
397 
398 	struct mutex		phy_mutex; /* for phy access */
399 	unsigned		pipe_in, pipe_out, pipe_intr;
400 
401 	u32			hard_mtu;	/* count any extra framing */
402 	size_t			rx_urb_size;	/* size for rx urbs */
403 
404 	unsigned long		flags;
405 
406 	wait_queue_head_t	*wait;
407 	unsigned char		suspend_count;
408 
409 	unsigned		maxpacket;
410 	struct timer_list	delay;
411 	struct timer_list	stat_monitor;
412 
413 	unsigned long		data[5];
414 
415 	int			link_on;
416 	u8			mdix_ctrl;
417 
418 	u32			chipid;
419 	u32			chiprev;
420 	struct mii_bus		*mdiobus;
421 	phy_interface_t		interface;
422 
423 	int			fc_autoneg;
424 	u8			fc_request_control;
425 
426 	int			delta;
427 	struct statstage	stats;
428 
429 	struct irq_domain_data	domain_data;
430 };
431 
432 /* define external phy id */
433 #define	PHY_LAN8835			(0x0007C130)
434 #define	PHY_KSZ9031RNX			(0x00221620)
435 
436 /* use ethtool to change the level for any given device */
437 static int msg_level = -1;
438 module_param(msg_level, int, 0);
439 MODULE_PARM_DESC(msg_level, "Override default message level");
440 
441 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
442 {
443 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
444 	int ret;
445 
446 	if (!buf)
447 		return -ENOMEM;
448 
449 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
450 			      USB_VENDOR_REQUEST_READ_REGISTER,
451 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
452 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
453 	if (likely(ret >= 0)) {
454 		le32_to_cpus(buf);
455 		*data = *buf;
456 	} else {
457 		netdev_warn(dev->net,
458 			    "Failed to read register index 0x%08x. ret = %d",
459 			    index, ret);
460 	}
461 
462 	kfree(buf);
463 
464 	return ret;
465 }
466 
467 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
468 {
469 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
470 	int ret;
471 
472 	if (!buf)
473 		return -ENOMEM;
474 
475 	*buf = data;
476 	cpu_to_le32s(buf);
477 
478 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
479 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
480 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
481 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
482 	if (unlikely(ret < 0)) {
483 		netdev_warn(dev->net,
484 			    "Failed to write register index 0x%08x. ret = %d",
485 			    index, ret);
486 	}
487 
488 	kfree(buf);
489 
490 	return ret;
491 }
492 
493 static int lan78xx_read_stats(struct lan78xx_net *dev,
494 			      struct lan78xx_statstage *data)
495 {
496 	int ret = 0;
497 	int i;
498 	struct lan78xx_statstage *stats;
499 	u32 *src;
500 	u32 *dst;
501 
502 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
503 	if (!stats)
504 		return -ENOMEM;
505 
506 	ret = usb_control_msg(dev->udev,
507 			      usb_rcvctrlpipe(dev->udev, 0),
508 			      USB_VENDOR_REQUEST_GET_STATS,
509 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
510 			      0,
511 			      0,
512 			      (void *)stats,
513 			      sizeof(*stats),
514 			      USB_CTRL_SET_TIMEOUT);
515 	if (likely(ret >= 0)) {
516 		src = (u32 *)stats;
517 		dst = (u32 *)data;
518 		for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
519 			le32_to_cpus(&src[i]);
520 			dst[i] = src[i];
521 		}
522 	} else {
523 		netdev_warn(dev->net,
524 			    "Failed to read stat ret = 0x%x", ret);
525 	}
526 
527 	kfree(stats);
528 
529 	return ret;
530 }
531 
532 #define check_counter_rollover(struct1, dev_stats, member) {	\
533 	if (struct1->member < dev_stats.saved.member)		\
534 		dev_stats.rollover_count.member++;		\
535 	}
536 
537 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
538 					struct lan78xx_statstage *stats)
539 {
540 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
541 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
542 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
543 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
544 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
545 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
546 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
547 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
548 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
549 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
550 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
551 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
552 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
553 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
554 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
555 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
556 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
557 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
558 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
559 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
560 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
561 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
562 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
563 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
564 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
565 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
566 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
567 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
568 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
569 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
570 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
571 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
572 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
573 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
574 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
575 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
576 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
577 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
578 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
579 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
580 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
581 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
582 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
583 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
584 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
585 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
586 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
587 
588 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
589 }
590 
591 static void lan78xx_update_stats(struct lan78xx_net *dev)
592 {
593 	u32 *p, *count, *max;
594 	u64 *data;
595 	int i;
596 	struct lan78xx_statstage lan78xx_stats;
597 
598 	if (usb_autopm_get_interface(dev->intf) < 0)
599 		return;
600 
601 	p = (u32 *)&lan78xx_stats;
602 	count = (u32 *)&dev->stats.rollover_count;
603 	max = (u32 *)&dev->stats.rollover_max;
604 	data = (u64 *)&dev->stats.curr_stat;
605 
606 	mutex_lock(&dev->stats.access_lock);
607 
608 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
609 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
610 
611 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
612 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
613 
614 	mutex_unlock(&dev->stats.access_lock);
615 
616 	usb_autopm_put_interface(dev->intf);
617 }
618 
619 /* Loop until the read is completed with timeout called with phy_mutex held */
620 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
621 {
622 	unsigned long start_time = jiffies;
623 	u32 val;
624 	int ret;
625 
626 	do {
627 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
628 		if (unlikely(ret < 0))
629 			return -EIO;
630 
631 		if (!(val & MII_ACC_MII_BUSY_))
632 			return 0;
633 	} while (!time_after(jiffies, start_time + HZ));
634 
635 	return -EIO;
636 }
637 
638 static inline u32 mii_access(int id, int index, int read)
639 {
640 	u32 ret;
641 
642 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
643 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
644 	if (read)
645 		ret |= MII_ACC_MII_READ_;
646 	else
647 		ret |= MII_ACC_MII_WRITE_;
648 	ret |= MII_ACC_MII_BUSY_;
649 
650 	return ret;
651 }
652 
653 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
654 {
655 	unsigned long start_time = jiffies;
656 	u32 val;
657 	int ret;
658 
659 	do {
660 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
661 		if (unlikely(ret < 0))
662 			return -EIO;
663 
664 		if (!(val & E2P_CMD_EPC_BUSY_) ||
665 		    (val & E2P_CMD_EPC_TIMEOUT_))
666 			break;
667 		usleep_range(40, 100);
668 	} while (!time_after(jiffies, start_time + HZ));
669 
670 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
671 		netdev_warn(dev->net, "EEPROM read operation timeout");
672 		return -EIO;
673 	}
674 
675 	return 0;
676 }
677 
678 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
679 {
680 	unsigned long start_time = jiffies;
681 	u32 val;
682 	int ret;
683 
684 	do {
685 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
686 		if (unlikely(ret < 0))
687 			return -EIO;
688 
689 		if (!(val & E2P_CMD_EPC_BUSY_))
690 			return 0;
691 
692 		usleep_range(40, 100);
693 	} while (!time_after(jiffies, start_time + HZ));
694 
695 	netdev_warn(dev->net, "EEPROM is busy");
696 	return -EIO;
697 }
698 
699 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
700 				   u32 length, u8 *data)
701 {
702 	u32 val;
703 	u32 saved;
704 	int i, ret;
705 	int retval;
706 
707 	/* depends on chip, some EEPROM pins are muxed with LED function.
708 	 * disable & restore LED function to access EEPROM.
709 	 */
710 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
711 	saved = val;
712 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
713 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
714 		ret = lan78xx_write_reg(dev, HW_CFG, val);
715 	}
716 
717 	retval = lan78xx_eeprom_confirm_not_busy(dev);
718 	if (retval)
719 		return retval;
720 
721 	for (i = 0; i < length; i++) {
722 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
723 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
724 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
725 		if (unlikely(ret < 0)) {
726 			retval = -EIO;
727 			goto exit;
728 		}
729 
730 		retval = lan78xx_wait_eeprom(dev);
731 		if (retval < 0)
732 			goto exit;
733 
734 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
735 		if (unlikely(ret < 0)) {
736 			retval = -EIO;
737 			goto exit;
738 		}
739 
740 		data[i] = val & 0xFF;
741 		offset++;
742 	}
743 
744 	retval = 0;
745 exit:
746 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
747 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
748 
749 	return retval;
750 }
751 
752 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
753 			       u32 length, u8 *data)
754 {
755 	u8 sig;
756 	int ret;
757 
758 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
759 	if ((ret == 0) && (sig == EEPROM_INDICATOR))
760 		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
761 	else
762 		ret = -EINVAL;
763 
764 	return ret;
765 }
766 
767 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
768 				    u32 length, u8 *data)
769 {
770 	u32 val;
771 	u32 saved;
772 	int i, ret;
773 	int retval;
774 
775 	/* depends on chip, some EEPROM pins are muxed with LED function.
776 	 * disable & restore LED function to access EEPROM.
777 	 */
778 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
779 	saved = val;
780 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
781 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
782 		ret = lan78xx_write_reg(dev, HW_CFG, val);
783 	}
784 
785 	retval = lan78xx_eeprom_confirm_not_busy(dev);
786 	if (retval)
787 		goto exit;
788 
789 	/* Issue write/erase enable command */
790 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
791 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
792 	if (unlikely(ret < 0)) {
793 		retval = -EIO;
794 		goto exit;
795 	}
796 
797 	retval = lan78xx_wait_eeprom(dev);
798 	if (retval < 0)
799 		goto exit;
800 
801 	for (i = 0; i < length; i++) {
802 		/* Fill data register */
803 		val = data[i];
804 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
805 		if (ret < 0) {
806 			retval = -EIO;
807 			goto exit;
808 		}
809 
810 		/* Send "write" command */
811 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
812 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
813 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
814 		if (ret < 0) {
815 			retval = -EIO;
816 			goto exit;
817 		}
818 
819 		retval = lan78xx_wait_eeprom(dev);
820 		if (retval < 0)
821 			goto exit;
822 
823 		offset++;
824 	}
825 
826 	retval = 0;
827 exit:
828 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
829 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
830 
831 	return retval;
832 }
833 
834 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
835 				u32 length, u8 *data)
836 {
837 	int i;
838 	int ret;
839 	u32 buf;
840 	unsigned long timeout;
841 
842 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
843 
844 	if (buf & OTP_PWR_DN_PWRDN_N_) {
845 		/* clear it and wait to be cleared */
846 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
847 
848 		timeout = jiffies + HZ;
849 		do {
850 			usleep_range(1, 10);
851 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
852 			if (time_after(jiffies, timeout)) {
853 				netdev_warn(dev->net,
854 					    "timeout on OTP_PWR_DN");
855 				return -EIO;
856 			}
857 		} while (buf & OTP_PWR_DN_PWRDN_N_);
858 	}
859 
860 	for (i = 0; i < length; i++) {
861 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
862 					((offset + i) >> 8) & OTP_ADDR1_15_11);
863 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
864 					((offset + i) & OTP_ADDR2_10_3));
865 
866 		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
867 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
868 
869 		timeout = jiffies + HZ;
870 		do {
871 			udelay(1);
872 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
873 			if (time_after(jiffies, timeout)) {
874 				netdev_warn(dev->net,
875 					    "timeout on OTP_STATUS");
876 				return -EIO;
877 			}
878 		} while (buf & OTP_STATUS_BUSY_);
879 
880 		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
881 
882 		data[i] = (u8)(buf & 0xFF);
883 	}
884 
885 	return 0;
886 }
887 
888 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
889 				 u32 length, u8 *data)
890 {
891 	int i;
892 	int ret;
893 	u32 buf;
894 	unsigned long timeout;
895 
896 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
897 
898 	if (buf & OTP_PWR_DN_PWRDN_N_) {
899 		/* clear it and wait to be cleared */
900 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
901 
902 		timeout = jiffies + HZ;
903 		do {
904 			udelay(1);
905 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
906 			if (time_after(jiffies, timeout)) {
907 				netdev_warn(dev->net,
908 					    "timeout on OTP_PWR_DN completion");
909 				return -EIO;
910 			}
911 		} while (buf & OTP_PWR_DN_PWRDN_N_);
912 	}
913 
914 	/* set to BYTE program mode */
915 	ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
916 
917 	for (i = 0; i < length; i++) {
918 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
919 					((offset + i) >> 8) & OTP_ADDR1_15_11);
920 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
921 					((offset + i) & OTP_ADDR2_10_3));
922 		ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
923 		ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
924 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
925 
926 		timeout = jiffies + HZ;
927 		do {
928 			udelay(1);
929 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
930 			if (time_after(jiffies, timeout)) {
931 				netdev_warn(dev->net,
932 					    "Timeout on OTP_STATUS completion");
933 				return -EIO;
934 			}
935 		} while (buf & OTP_STATUS_BUSY_);
936 	}
937 
938 	return 0;
939 }
940 
941 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
942 			    u32 length, u8 *data)
943 {
944 	u8 sig;
945 	int ret;
946 
947 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
948 
949 	if (ret == 0) {
950 		if (sig == OTP_INDICATOR_1)
951 			offset = offset;
952 		else if (sig == OTP_INDICATOR_2)
953 			offset += 0x100;
954 		else
955 			ret = -EINVAL;
956 		if (!ret)
957 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
958 	}
959 
960 	return ret;
961 }
962 
963 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
964 {
965 	int i, ret;
966 
967 	for (i = 0; i < 100; i++) {
968 		u32 dp_sel;
969 
970 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
971 		if (unlikely(ret < 0))
972 			return -EIO;
973 
974 		if (dp_sel & DP_SEL_DPRDY_)
975 			return 0;
976 
977 		usleep_range(40, 100);
978 	}
979 
980 	netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
981 
982 	return -EIO;
983 }
984 
985 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
986 				  u32 addr, u32 length, u32 *buf)
987 {
988 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
989 	u32 dp_sel;
990 	int i, ret;
991 
992 	if (usb_autopm_get_interface(dev->intf) < 0)
993 			return 0;
994 
995 	mutex_lock(&pdata->dataport_mutex);
996 
997 	ret = lan78xx_dataport_wait_not_busy(dev);
998 	if (ret < 0)
999 		goto done;
1000 
1001 	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1002 
1003 	dp_sel &= ~DP_SEL_RSEL_MASK_;
1004 	dp_sel |= ram_select;
1005 	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1006 
1007 	for (i = 0; i < length; i++) {
1008 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1009 
1010 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1011 
1012 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1013 
1014 		ret = lan78xx_dataport_wait_not_busy(dev);
1015 		if (ret < 0)
1016 			goto done;
1017 	}
1018 
1019 done:
1020 	mutex_unlock(&pdata->dataport_mutex);
1021 	usb_autopm_put_interface(dev->intf);
1022 
1023 	return ret;
1024 }
1025 
1026 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1027 				    int index, u8 addr[ETH_ALEN])
1028 {
1029 	u32	temp;
1030 
1031 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1032 		temp = addr[3];
1033 		temp = addr[2] | (temp << 8);
1034 		temp = addr[1] | (temp << 8);
1035 		temp = addr[0] | (temp << 8);
1036 		pdata->pfilter_table[index][1] = temp;
1037 		temp = addr[5];
1038 		temp = addr[4] | (temp << 8);
1039 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1040 		pdata->pfilter_table[index][0] = temp;
1041 	}
1042 }
1043 
1044 /* returns hash bit number for given MAC address */
1045 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1046 {
1047 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1048 }
1049 
1050 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1051 {
1052 	struct lan78xx_priv *pdata =
1053 			container_of(param, struct lan78xx_priv, set_multicast);
1054 	struct lan78xx_net *dev = pdata->dev;
1055 	int i;
1056 	int ret;
1057 
1058 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1059 		  pdata->rfe_ctl);
1060 
1061 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1062 			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1063 
1064 	for (i = 1; i < NUM_OF_MAF; i++) {
1065 		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1066 		ret = lan78xx_write_reg(dev, MAF_LO(i),
1067 					pdata->pfilter_table[i][1]);
1068 		ret = lan78xx_write_reg(dev, MAF_HI(i),
1069 					pdata->pfilter_table[i][0]);
1070 	}
1071 
1072 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1073 }
1074 
1075 static void lan78xx_set_multicast(struct net_device *netdev)
1076 {
1077 	struct lan78xx_net *dev = netdev_priv(netdev);
1078 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1079 	unsigned long flags;
1080 	int i;
1081 
1082 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1083 
1084 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1085 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1086 
1087 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1088 			pdata->mchash_table[i] = 0;
1089 	/* pfilter_table[0] has own HW address */
1090 	for (i = 1; i < NUM_OF_MAF; i++) {
1091 			pdata->pfilter_table[i][0] =
1092 			pdata->pfilter_table[i][1] = 0;
1093 	}
1094 
1095 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1096 
1097 	if (dev->net->flags & IFF_PROMISC) {
1098 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1099 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1100 	} else {
1101 		if (dev->net->flags & IFF_ALLMULTI) {
1102 			netif_dbg(dev, drv, dev->net,
1103 				  "receive all multicast enabled");
1104 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1105 		}
1106 	}
1107 
1108 	if (netdev_mc_count(dev->net)) {
1109 		struct netdev_hw_addr *ha;
1110 		int i;
1111 
1112 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1113 
1114 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1115 
1116 		i = 1;
1117 		netdev_for_each_mc_addr(ha, netdev) {
1118 			/* set first 32 into Perfect Filter */
1119 			if (i < 33) {
1120 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1121 			} else {
1122 				u32 bitnum = lan78xx_hash(ha->addr);
1123 
1124 				pdata->mchash_table[bitnum / 32] |=
1125 							(1 << (bitnum % 32));
1126 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1127 			}
1128 			i++;
1129 		}
1130 	}
1131 
1132 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1133 
1134 	/* defer register writes to a sleepable context */
1135 	schedule_work(&pdata->set_multicast);
1136 }
1137 
1138 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1139 				      u16 lcladv, u16 rmtadv)
1140 {
1141 	u32 flow = 0, fct_flow = 0;
1142 	int ret;
1143 	u8 cap;
1144 
1145 	if (dev->fc_autoneg)
1146 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1147 	else
1148 		cap = dev->fc_request_control;
1149 
1150 	if (cap & FLOW_CTRL_TX)
1151 		flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1152 
1153 	if (cap & FLOW_CTRL_RX)
1154 		flow |= FLOW_CR_RX_FCEN_;
1155 
1156 	if (dev->udev->speed == USB_SPEED_SUPER)
1157 		fct_flow = 0x817;
1158 	else if (dev->udev->speed == USB_SPEED_HIGH)
1159 		fct_flow = 0x211;
1160 
1161 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1162 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1163 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1164 
1165 	ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1166 
1167 	/* threshold value should be set before enabling flow */
1168 	ret = lan78xx_write_reg(dev, FLOW, flow);
1169 
1170 	return 0;
1171 }
1172 
1173 static int lan78xx_link_reset(struct lan78xx_net *dev)
1174 {
1175 	struct phy_device *phydev = dev->net->phydev;
1176 	struct ethtool_link_ksettings ecmd;
1177 	int ladv, radv, ret;
1178 	u32 buf;
1179 
1180 	/* clear LAN78xx interrupt status */
1181 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1182 	if (unlikely(ret < 0))
1183 		return -EIO;
1184 
1185 	phy_read_status(phydev);
1186 
1187 	if (!phydev->link && dev->link_on) {
1188 		dev->link_on = false;
1189 
1190 		/* reset MAC */
1191 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1192 		if (unlikely(ret < 0))
1193 			return -EIO;
1194 		buf |= MAC_CR_RST_;
1195 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1196 		if (unlikely(ret < 0))
1197 			return -EIO;
1198 
1199 		del_timer(&dev->stat_monitor);
1200 	} else if (phydev->link && !dev->link_on) {
1201 		dev->link_on = true;
1202 
1203 		phy_ethtool_ksettings_get(phydev, &ecmd);
1204 
1205 		if (dev->udev->speed == USB_SPEED_SUPER) {
1206 			if (ecmd.base.speed == 1000) {
1207 				/* disable U2 */
1208 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1209 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1210 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1211 				/* enable U1 */
1212 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1213 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1214 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1215 			} else {
1216 				/* enable U1 & U2 */
1217 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1218 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
1219 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1220 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1221 			}
1222 		}
1223 
1224 		ladv = phy_read(phydev, MII_ADVERTISE);
1225 		if (ladv < 0)
1226 			return ladv;
1227 
1228 		radv = phy_read(phydev, MII_LPA);
1229 		if (radv < 0)
1230 			return radv;
1231 
1232 		netif_dbg(dev, link, dev->net,
1233 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1234 			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1235 
1236 		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1237 						 radv);
1238 
1239 		if (!timer_pending(&dev->stat_monitor)) {
1240 			dev->delta = 1;
1241 			mod_timer(&dev->stat_monitor,
1242 				  jiffies + STAT_UPDATE_TIMER);
1243 		}
1244 	}
1245 
1246 	return ret;
1247 }
1248 
1249 /* some work can't be done in tasklets, so we use keventd
1250  *
1251  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1252  * but tasklet_schedule() doesn't.	hope the failure is rare.
1253  */
1254 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1255 {
1256 	set_bit(work, &dev->flags);
1257 	if (!schedule_delayed_work(&dev->wq, 0))
1258 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1259 }
1260 
1261 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1262 {
1263 	u32 intdata;
1264 
1265 	if (urb->actual_length != 4) {
1266 		netdev_warn(dev->net,
1267 			    "unexpected urb length %d", urb->actual_length);
1268 		return;
1269 	}
1270 
1271 	memcpy(&intdata, urb->transfer_buffer, 4);
1272 	le32_to_cpus(&intdata);
1273 
1274 	if (intdata & INT_ENP_PHY_INT) {
1275 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1276 		lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1277 
1278 		if (dev->domain_data.phyirq > 0)
1279 			generic_handle_irq(dev->domain_data.phyirq);
1280 	} else
1281 		netdev_warn(dev->net,
1282 			    "unexpected interrupt: 0x%08x\n", intdata);
1283 }
1284 
1285 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1286 {
1287 	return MAX_EEPROM_SIZE;
1288 }
1289 
1290 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1291 				      struct ethtool_eeprom *ee, u8 *data)
1292 {
1293 	struct lan78xx_net *dev = netdev_priv(netdev);
1294 	int ret;
1295 
1296 	ret = usb_autopm_get_interface(dev->intf);
1297 	if (ret)
1298 		return ret;
1299 
1300 	ee->magic = LAN78XX_EEPROM_MAGIC;
1301 
1302 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1303 
1304 	usb_autopm_put_interface(dev->intf);
1305 
1306 	return ret;
1307 }
1308 
1309 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1310 				      struct ethtool_eeprom *ee, u8 *data)
1311 {
1312 	struct lan78xx_net *dev = netdev_priv(netdev);
1313 	int ret;
1314 
1315 	ret = usb_autopm_get_interface(dev->intf);
1316 	if (ret)
1317 		return ret;
1318 
1319 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1320 	 * to load data from EEPROM
1321 	 */
1322 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1323 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1324 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1325 		 (ee->offset == 0) &&
1326 		 (ee->len == 512) &&
1327 		 (data[0] == OTP_INDICATOR_1))
1328 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1329 
1330 	usb_autopm_put_interface(dev->intf);
1331 
1332 	return ret;
1333 }
1334 
1335 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1336 				u8 *data)
1337 {
1338 	if (stringset == ETH_SS_STATS)
1339 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1340 }
1341 
1342 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1343 {
1344 	if (sset == ETH_SS_STATS)
1345 		return ARRAY_SIZE(lan78xx_gstrings);
1346 	else
1347 		return -EOPNOTSUPP;
1348 }
1349 
1350 static void lan78xx_get_stats(struct net_device *netdev,
1351 			      struct ethtool_stats *stats, u64 *data)
1352 {
1353 	struct lan78xx_net *dev = netdev_priv(netdev);
1354 
1355 	lan78xx_update_stats(dev);
1356 
1357 	mutex_lock(&dev->stats.access_lock);
1358 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1359 	mutex_unlock(&dev->stats.access_lock);
1360 }
1361 
1362 static void lan78xx_get_wol(struct net_device *netdev,
1363 			    struct ethtool_wolinfo *wol)
1364 {
1365 	struct lan78xx_net *dev = netdev_priv(netdev);
1366 	int ret;
1367 	u32 buf;
1368 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1369 
1370 	if (usb_autopm_get_interface(dev->intf) < 0)
1371 			return;
1372 
1373 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1374 	if (unlikely(ret < 0)) {
1375 		wol->supported = 0;
1376 		wol->wolopts = 0;
1377 	} else {
1378 		if (buf & USB_CFG_RMT_WKP_) {
1379 			wol->supported = WAKE_ALL;
1380 			wol->wolopts = pdata->wol;
1381 		} else {
1382 			wol->supported = 0;
1383 			wol->wolopts = 0;
1384 		}
1385 	}
1386 
1387 	usb_autopm_put_interface(dev->intf);
1388 }
1389 
1390 static int lan78xx_set_wol(struct net_device *netdev,
1391 			   struct ethtool_wolinfo *wol)
1392 {
1393 	struct lan78xx_net *dev = netdev_priv(netdev);
1394 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1395 	int ret;
1396 
1397 	ret = usb_autopm_get_interface(dev->intf);
1398 	if (ret < 0)
1399 		return ret;
1400 
1401 	pdata->wol = 0;
1402 	if (wol->wolopts & WAKE_UCAST)
1403 		pdata->wol |= WAKE_UCAST;
1404 	if (wol->wolopts & WAKE_MCAST)
1405 		pdata->wol |= WAKE_MCAST;
1406 	if (wol->wolopts & WAKE_BCAST)
1407 		pdata->wol |= WAKE_BCAST;
1408 	if (wol->wolopts & WAKE_MAGIC)
1409 		pdata->wol |= WAKE_MAGIC;
1410 	if (wol->wolopts & WAKE_PHY)
1411 		pdata->wol |= WAKE_PHY;
1412 	if (wol->wolopts & WAKE_ARP)
1413 		pdata->wol |= WAKE_ARP;
1414 
1415 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1416 
1417 	phy_ethtool_set_wol(netdev->phydev, wol);
1418 
1419 	usb_autopm_put_interface(dev->intf);
1420 
1421 	return ret;
1422 }
1423 
1424 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1425 {
1426 	struct lan78xx_net *dev = netdev_priv(net);
1427 	struct phy_device *phydev = net->phydev;
1428 	int ret;
1429 	u32 buf;
1430 
1431 	ret = usb_autopm_get_interface(dev->intf);
1432 	if (ret < 0)
1433 		return ret;
1434 
1435 	ret = phy_ethtool_get_eee(phydev, edata);
1436 	if (ret < 0)
1437 		goto exit;
1438 
1439 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1440 	if (buf & MAC_CR_EEE_EN_) {
1441 		edata->eee_enabled = true;
1442 		edata->eee_active = !!(edata->advertised &
1443 				       edata->lp_advertised);
1444 		edata->tx_lpi_enabled = true;
1445 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1446 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1447 		edata->tx_lpi_timer = buf;
1448 	} else {
1449 		edata->eee_enabled = false;
1450 		edata->eee_active = false;
1451 		edata->tx_lpi_enabled = false;
1452 		edata->tx_lpi_timer = 0;
1453 	}
1454 
1455 	ret = 0;
1456 exit:
1457 	usb_autopm_put_interface(dev->intf);
1458 
1459 	return ret;
1460 }
1461 
1462 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1463 {
1464 	struct lan78xx_net *dev = netdev_priv(net);
1465 	int ret;
1466 	u32 buf;
1467 
1468 	ret = usb_autopm_get_interface(dev->intf);
1469 	if (ret < 0)
1470 		return ret;
1471 
1472 	if (edata->eee_enabled) {
1473 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1474 		buf |= MAC_CR_EEE_EN_;
1475 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1476 
1477 		phy_ethtool_set_eee(net->phydev, edata);
1478 
1479 		buf = (u32)edata->tx_lpi_timer;
1480 		ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1481 	} else {
1482 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1483 		buf &= ~MAC_CR_EEE_EN_;
1484 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1485 	}
1486 
1487 	usb_autopm_put_interface(dev->intf);
1488 
1489 	return 0;
1490 }
1491 
1492 static u32 lan78xx_get_link(struct net_device *net)
1493 {
1494 	phy_read_status(net->phydev);
1495 
1496 	return net->phydev->link;
1497 }
1498 
1499 static void lan78xx_get_drvinfo(struct net_device *net,
1500 				struct ethtool_drvinfo *info)
1501 {
1502 	struct lan78xx_net *dev = netdev_priv(net);
1503 
1504 	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1505 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1506 }
1507 
1508 static u32 lan78xx_get_msglevel(struct net_device *net)
1509 {
1510 	struct lan78xx_net *dev = netdev_priv(net);
1511 
1512 	return dev->msg_enable;
1513 }
1514 
1515 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1516 {
1517 	struct lan78xx_net *dev = netdev_priv(net);
1518 
1519 	dev->msg_enable = level;
1520 }
1521 
1522 static int lan78xx_get_link_ksettings(struct net_device *net,
1523 				      struct ethtool_link_ksettings *cmd)
1524 {
1525 	struct lan78xx_net *dev = netdev_priv(net);
1526 	struct phy_device *phydev = net->phydev;
1527 	int ret;
1528 
1529 	ret = usb_autopm_get_interface(dev->intf);
1530 	if (ret < 0)
1531 		return ret;
1532 
1533 	phy_ethtool_ksettings_get(phydev, cmd);
1534 
1535 	usb_autopm_put_interface(dev->intf);
1536 
1537 	return ret;
1538 }
1539 
1540 static int lan78xx_set_link_ksettings(struct net_device *net,
1541 				      const struct ethtool_link_ksettings *cmd)
1542 {
1543 	struct lan78xx_net *dev = netdev_priv(net);
1544 	struct phy_device *phydev = net->phydev;
1545 	int ret = 0;
1546 	int temp;
1547 
1548 	ret = usb_autopm_get_interface(dev->intf);
1549 	if (ret < 0)
1550 		return ret;
1551 
1552 	/* change speed & duplex */
1553 	ret = phy_ethtool_ksettings_set(phydev, cmd);
1554 
1555 	if (!cmd->base.autoneg) {
1556 		/* force link down */
1557 		temp = phy_read(phydev, MII_BMCR);
1558 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1559 		mdelay(1);
1560 		phy_write(phydev, MII_BMCR, temp);
1561 	}
1562 
1563 	usb_autopm_put_interface(dev->intf);
1564 
1565 	return ret;
1566 }
1567 
1568 static void lan78xx_get_pause(struct net_device *net,
1569 			      struct ethtool_pauseparam *pause)
1570 {
1571 	struct lan78xx_net *dev = netdev_priv(net);
1572 	struct phy_device *phydev = net->phydev;
1573 	struct ethtool_link_ksettings ecmd;
1574 
1575 	phy_ethtool_ksettings_get(phydev, &ecmd);
1576 
1577 	pause->autoneg = dev->fc_autoneg;
1578 
1579 	if (dev->fc_request_control & FLOW_CTRL_TX)
1580 		pause->tx_pause = 1;
1581 
1582 	if (dev->fc_request_control & FLOW_CTRL_RX)
1583 		pause->rx_pause = 1;
1584 }
1585 
1586 static int lan78xx_set_pause(struct net_device *net,
1587 			     struct ethtool_pauseparam *pause)
1588 {
1589 	struct lan78xx_net *dev = netdev_priv(net);
1590 	struct phy_device *phydev = net->phydev;
1591 	struct ethtool_link_ksettings ecmd;
1592 	int ret;
1593 
1594 	phy_ethtool_ksettings_get(phydev, &ecmd);
1595 
1596 	if (pause->autoneg && !ecmd.base.autoneg) {
1597 		ret = -EINVAL;
1598 		goto exit;
1599 	}
1600 
1601 	dev->fc_request_control = 0;
1602 	if (pause->rx_pause)
1603 		dev->fc_request_control |= FLOW_CTRL_RX;
1604 
1605 	if (pause->tx_pause)
1606 		dev->fc_request_control |= FLOW_CTRL_TX;
1607 
1608 	if (ecmd.base.autoneg) {
1609 		u32 mii_adv;
1610 		u32 advertising;
1611 
1612 		ethtool_convert_link_mode_to_legacy_u32(
1613 			&advertising, ecmd.link_modes.advertising);
1614 
1615 		advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1616 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1617 		advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1618 
1619 		ethtool_convert_legacy_u32_to_link_mode(
1620 			ecmd.link_modes.advertising, advertising);
1621 
1622 		phy_ethtool_ksettings_set(phydev, &ecmd);
1623 	}
1624 
1625 	dev->fc_autoneg = pause->autoneg;
1626 
1627 	ret = 0;
1628 exit:
1629 	return ret;
1630 }
1631 
1632 static int lan78xx_get_regs_len(struct net_device *netdev)
1633 {
1634 	if (!netdev->phydev)
1635 		return (sizeof(lan78xx_regs));
1636 	else
1637 		return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1638 }
1639 
1640 static void
1641 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1642 		 void *buf)
1643 {
1644 	u32 *data = buf;
1645 	int i, j;
1646 	struct lan78xx_net *dev = netdev_priv(netdev);
1647 
1648 	/* Read Device/MAC registers */
1649 	for (i = 0; i < (sizeof(lan78xx_regs) / sizeof(u32)); i++)
1650 		lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1651 
1652 	if (!netdev->phydev)
1653 		return;
1654 
1655 	/* Read PHY registers */
1656 	for (j = 0; j < 32; i++, j++)
1657 		data[i] = phy_read(netdev->phydev, j);
1658 }
1659 
1660 static const struct ethtool_ops lan78xx_ethtool_ops = {
1661 	.get_link	= lan78xx_get_link,
1662 	.nway_reset	= phy_ethtool_nway_reset,
1663 	.get_drvinfo	= lan78xx_get_drvinfo,
1664 	.get_msglevel	= lan78xx_get_msglevel,
1665 	.set_msglevel	= lan78xx_set_msglevel,
1666 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1667 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1668 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1669 	.get_ethtool_stats = lan78xx_get_stats,
1670 	.get_sset_count = lan78xx_get_sset_count,
1671 	.get_strings	= lan78xx_get_strings,
1672 	.get_wol	= lan78xx_get_wol,
1673 	.set_wol	= lan78xx_set_wol,
1674 	.get_eee	= lan78xx_get_eee,
1675 	.set_eee	= lan78xx_set_eee,
1676 	.get_pauseparam	= lan78xx_get_pause,
1677 	.set_pauseparam	= lan78xx_set_pause,
1678 	.get_link_ksettings = lan78xx_get_link_ksettings,
1679 	.set_link_ksettings = lan78xx_set_link_ksettings,
1680 	.get_regs_len	= lan78xx_get_regs_len,
1681 	.get_regs	= lan78xx_get_regs,
1682 };
1683 
1684 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1685 {
1686 	if (!netif_running(netdev))
1687 		return -EINVAL;
1688 
1689 	return phy_mii_ioctl(netdev->phydev, rq, cmd);
1690 }
1691 
1692 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1693 {
1694 	u32 addr_lo, addr_hi;
1695 	int ret;
1696 	u8 addr[6];
1697 
1698 	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1699 	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1700 
1701 	addr[0] = addr_lo & 0xFF;
1702 	addr[1] = (addr_lo >> 8) & 0xFF;
1703 	addr[2] = (addr_lo >> 16) & 0xFF;
1704 	addr[3] = (addr_lo >> 24) & 0xFF;
1705 	addr[4] = addr_hi & 0xFF;
1706 	addr[5] = (addr_hi >> 8) & 0xFF;
1707 
1708 	if (!is_valid_ether_addr(addr)) {
1709 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1710 			/* valid address present in Device Tree */
1711 			netif_dbg(dev, ifup, dev->net,
1712 				  "MAC address read from Device Tree");
1713 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1714 						 ETH_ALEN, addr) == 0) ||
1715 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1716 					      ETH_ALEN, addr) == 0)) &&
1717 			   is_valid_ether_addr(addr)) {
1718 			/* eeprom values are valid so use them */
1719 			netif_dbg(dev, ifup, dev->net,
1720 				  "MAC address read from EEPROM");
1721 		} else {
1722 			/* generate random MAC */
1723 			random_ether_addr(addr);
1724 			netif_dbg(dev, ifup, dev->net,
1725 				  "MAC address set to random addr");
1726 		}
1727 
1728 		addr_lo = addr[0] | (addr[1] << 8) |
1729 			  (addr[2] << 16) | (addr[3] << 24);
1730 		addr_hi = addr[4] | (addr[5] << 8);
1731 
1732 		ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1733 		ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1734 	}
1735 
1736 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1737 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1738 
1739 	ether_addr_copy(dev->net->dev_addr, addr);
1740 }
1741 
1742 /* MDIO read and write wrappers for phylib */
1743 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1744 {
1745 	struct lan78xx_net *dev = bus->priv;
1746 	u32 val, addr;
1747 	int ret;
1748 
1749 	ret = usb_autopm_get_interface(dev->intf);
1750 	if (ret < 0)
1751 		return ret;
1752 
1753 	mutex_lock(&dev->phy_mutex);
1754 
1755 	/* confirm MII not busy */
1756 	ret = lan78xx_phy_wait_not_busy(dev);
1757 	if (ret < 0)
1758 		goto done;
1759 
1760 	/* set the address, index & direction (read from PHY) */
1761 	addr = mii_access(phy_id, idx, MII_READ);
1762 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1763 
1764 	ret = lan78xx_phy_wait_not_busy(dev);
1765 	if (ret < 0)
1766 		goto done;
1767 
1768 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
1769 
1770 	ret = (int)(val & 0xFFFF);
1771 
1772 done:
1773 	mutex_unlock(&dev->phy_mutex);
1774 	usb_autopm_put_interface(dev->intf);
1775 
1776 	return ret;
1777 }
1778 
1779 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1780 				 u16 regval)
1781 {
1782 	struct lan78xx_net *dev = bus->priv;
1783 	u32 val, addr;
1784 	int ret;
1785 
1786 	ret = usb_autopm_get_interface(dev->intf);
1787 	if (ret < 0)
1788 		return ret;
1789 
1790 	mutex_lock(&dev->phy_mutex);
1791 
1792 	/* confirm MII not busy */
1793 	ret = lan78xx_phy_wait_not_busy(dev);
1794 	if (ret < 0)
1795 		goto done;
1796 
1797 	val = (u32)regval;
1798 	ret = lan78xx_write_reg(dev, MII_DATA, val);
1799 
1800 	/* set the address, index & direction (write to PHY) */
1801 	addr = mii_access(phy_id, idx, MII_WRITE);
1802 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1803 
1804 	ret = lan78xx_phy_wait_not_busy(dev);
1805 	if (ret < 0)
1806 		goto done;
1807 
1808 done:
1809 	mutex_unlock(&dev->phy_mutex);
1810 	usb_autopm_put_interface(dev->intf);
1811 	return 0;
1812 }
1813 
1814 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1815 {
1816 	struct device_node *node;
1817 	int ret;
1818 
1819 	dev->mdiobus = mdiobus_alloc();
1820 	if (!dev->mdiobus) {
1821 		netdev_err(dev->net, "can't allocate MDIO bus\n");
1822 		return -ENOMEM;
1823 	}
1824 
1825 	dev->mdiobus->priv = (void *)dev;
1826 	dev->mdiobus->read = lan78xx_mdiobus_read;
1827 	dev->mdiobus->write = lan78xx_mdiobus_write;
1828 	dev->mdiobus->name = "lan78xx-mdiobus";
1829 
1830 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1831 		 dev->udev->bus->busnum, dev->udev->devnum);
1832 
1833 	switch (dev->chipid) {
1834 	case ID_REV_CHIP_ID_7800_:
1835 	case ID_REV_CHIP_ID_7850_:
1836 		/* set to internal PHY id */
1837 		dev->mdiobus->phy_mask = ~(1 << 1);
1838 		break;
1839 	case ID_REV_CHIP_ID_7801_:
1840 		/* scan thru PHYAD[2..0] */
1841 		dev->mdiobus->phy_mask = ~(0xFF);
1842 		break;
1843 	}
1844 
1845 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1846 	if (node) {
1847 		ret = of_mdiobus_register(dev->mdiobus, node);
1848 		of_node_put(node);
1849 	} else {
1850 		ret = mdiobus_register(dev->mdiobus);
1851 	}
1852 	if (ret) {
1853 		netdev_err(dev->net, "can't register MDIO bus\n");
1854 		goto exit1;
1855 	}
1856 
1857 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1858 	return 0;
1859 exit1:
1860 	mdiobus_free(dev->mdiobus);
1861 	return ret;
1862 }
1863 
1864 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1865 {
1866 	mdiobus_unregister(dev->mdiobus);
1867 	mdiobus_free(dev->mdiobus);
1868 }
1869 
1870 static void lan78xx_link_status_change(struct net_device *net)
1871 {
1872 	struct phy_device *phydev = net->phydev;
1873 	int ret, temp;
1874 
1875 	/* At forced 100 F/H mode, chip may fail to set mode correctly
1876 	 * when cable is switched between long(~50+m) and short one.
1877 	 * As workaround, set to 10 before setting to 100
1878 	 * at forced 100 F/H mode.
1879 	 */
1880 	if (!phydev->autoneg && (phydev->speed == 100)) {
1881 		/* disable phy interrupt */
1882 		temp = phy_read(phydev, LAN88XX_INT_MASK);
1883 		temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1884 		ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1885 
1886 		temp = phy_read(phydev, MII_BMCR);
1887 		temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1888 		phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1889 		temp |= BMCR_SPEED100;
1890 		phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1891 
1892 		/* clear pending interrupt generated while workaround */
1893 		temp = phy_read(phydev, LAN88XX_INT_STS);
1894 
1895 		/* enable phy interrupt back */
1896 		temp = phy_read(phydev, LAN88XX_INT_MASK);
1897 		temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1898 		ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1899 	}
1900 }
1901 
1902 static int irq_map(struct irq_domain *d, unsigned int irq,
1903 		   irq_hw_number_t hwirq)
1904 {
1905 	struct irq_domain_data *data = d->host_data;
1906 
1907 	irq_set_chip_data(irq, data);
1908 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1909 	irq_set_noprobe(irq);
1910 
1911 	return 0;
1912 }
1913 
1914 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1915 {
1916 	irq_set_chip_and_handler(irq, NULL, NULL);
1917 	irq_set_chip_data(irq, NULL);
1918 }
1919 
1920 static const struct irq_domain_ops chip_domain_ops = {
1921 	.map	= irq_map,
1922 	.unmap	= irq_unmap,
1923 };
1924 
1925 static void lan78xx_irq_mask(struct irq_data *irqd)
1926 {
1927 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1928 
1929 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1930 }
1931 
1932 static void lan78xx_irq_unmask(struct irq_data *irqd)
1933 {
1934 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1935 
1936 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
1937 }
1938 
1939 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1940 {
1941 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1942 
1943 	mutex_lock(&data->irq_lock);
1944 }
1945 
1946 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1947 {
1948 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1949 	struct lan78xx_net *dev =
1950 			container_of(data, struct lan78xx_net, domain_data);
1951 	u32 buf;
1952 	int ret;
1953 
1954 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
1955 	 * are only two callbacks executed in non-atomic contex.
1956 	 */
1957 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1958 	if (buf != data->irqenable)
1959 		ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1960 
1961 	mutex_unlock(&data->irq_lock);
1962 }
1963 
1964 static struct irq_chip lan78xx_irqchip = {
1965 	.name			= "lan78xx-irqs",
1966 	.irq_mask		= lan78xx_irq_mask,
1967 	.irq_unmask		= lan78xx_irq_unmask,
1968 	.irq_bus_lock		= lan78xx_irq_bus_lock,
1969 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
1970 };
1971 
1972 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1973 {
1974 	struct device_node *of_node;
1975 	struct irq_domain *irqdomain;
1976 	unsigned int irqmap = 0;
1977 	u32 buf;
1978 	int ret = 0;
1979 
1980 	of_node = dev->udev->dev.parent->of_node;
1981 
1982 	mutex_init(&dev->domain_data.irq_lock);
1983 
1984 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1985 	dev->domain_data.irqenable = buf;
1986 
1987 	dev->domain_data.irqchip = &lan78xx_irqchip;
1988 	dev->domain_data.irq_handler = handle_simple_irq;
1989 
1990 	irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1991 					  &chip_domain_ops, &dev->domain_data);
1992 	if (irqdomain) {
1993 		/* create mapping for PHY interrupt */
1994 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1995 		if (!irqmap) {
1996 			irq_domain_remove(irqdomain);
1997 
1998 			irqdomain = NULL;
1999 			ret = -EINVAL;
2000 		}
2001 	} else {
2002 		ret = -EINVAL;
2003 	}
2004 
2005 	dev->domain_data.irqdomain = irqdomain;
2006 	dev->domain_data.phyirq = irqmap;
2007 
2008 	return ret;
2009 }
2010 
2011 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2012 {
2013 	if (dev->domain_data.phyirq > 0) {
2014 		irq_dispose_mapping(dev->domain_data.phyirq);
2015 
2016 		if (dev->domain_data.irqdomain)
2017 			irq_domain_remove(dev->domain_data.irqdomain);
2018 	}
2019 	dev->domain_data.phyirq = 0;
2020 	dev->domain_data.irqdomain = NULL;
2021 }
2022 
2023 static int lan8835_fixup(struct phy_device *phydev)
2024 {
2025 	int buf;
2026 	int ret;
2027 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2028 
2029 	/* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2030 	buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2031 	buf &= ~0x1800;
2032 	buf |= 0x0800;
2033 	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2034 
2035 	/* RGMII MAC TXC Delay Enable */
2036 	ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2037 				MAC_RGMII_ID_TXC_DELAY_EN_);
2038 
2039 	/* RGMII TX DLL Tune Adjust */
2040 	ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2041 
2042 	dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2043 
2044 	return 1;
2045 }
2046 
2047 static int ksz9031rnx_fixup(struct phy_device *phydev)
2048 {
2049 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2050 
2051 	/* Micrel9301RNX PHY configuration */
2052 	/* RGMII Control Signal Pad Skew */
2053 	phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2054 	/* RGMII RX Data Pad Skew */
2055 	phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2056 	/* RGMII RX Clock Pad Skew */
2057 	phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2058 
2059 	dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2060 
2061 	return 1;
2062 }
2063 
2064 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2065 {
2066 	u32 buf;
2067 	int ret;
2068 	struct fixed_phy_status fphy_status = {
2069 		.link = 1,
2070 		.speed = SPEED_1000,
2071 		.duplex = DUPLEX_FULL,
2072 	};
2073 	struct phy_device *phydev;
2074 
2075 	phydev = phy_find_first(dev->mdiobus);
2076 	if (!phydev) {
2077 		netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2078 		phydev = fixed_phy_register(PHY_POLL, &fphy_status, -1,
2079 					    NULL);
2080 		if (IS_ERR(phydev)) {
2081 			netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2082 			return NULL;
2083 		}
2084 		netdev_dbg(dev->net, "Registered FIXED PHY\n");
2085 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2086 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2087 					MAC_RGMII_ID_TXC_DELAY_EN_);
2088 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2089 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2090 		buf |= HW_CFG_CLK125_EN_;
2091 		buf |= HW_CFG_REFCLK25_EN_;
2092 		ret = lan78xx_write_reg(dev, HW_CFG, buf);
2093 	} else {
2094 		if (!phydev->drv) {
2095 			netdev_err(dev->net, "no PHY driver found\n");
2096 			return NULL;
2097 		}
2098 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2099 		/* external PHY fixup for KSZ9031RNX */
2100 		ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2101 						 ksz9031rnx_fixup);
2102 		if (ret < 0) {
2103 			netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2104 			return NULL;
2105 		}
2106 		/* external PHY fixup for LAN8835 */
2107 		ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2108 						 lan8835_fixup);
2109 		if (ret < 0) {
2110 			netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2111 			return NULL;
2112 		}
2113 		/* add more external PHY fixup here if needed */
2114 
2115 		phydev->is_internal = false;
2116 	}
2117 	return phydev;
2118 }
2119 
2120 static int lan78xx_phy_init(struct lan78xx_net *dev)
2121 {
2122 	int ret;
2123 	u32 mii_adv;
2124 	struct phy_device *phydev;
2125 
2126 	switch (dev->chipid) {
2127 	case ID_REV_CHIP_ID_7801_:
2128 		phydev = lan7801_phy_init(dev);
2129 		if (!phydev) {
2130 			netdev_err(dev->net, "lan7801: PHY Init Failed");
2131 			return -EIO;
2132 		}
2133 		break;
2134 
2135 	case ID_REV_CHIP_ID_7800_:
2136 	case ID_REV_CHIP_ID_7850_:
2137 		phydev = phy_find_first(dev->mdiobus);
2138 		if (!phydev) {
2139 			netdev_err(dev->net, "no PHY found\n");
2140 			return -EIO;
2141 		}
2142 		phydev->is_internal = true;
2143 		dev->interface = PHY_INTERFACE_MODE_GMII;
2144 		break;
2145 
2146 	default:
2147 		netdev_err(dev->net, "Unknown CHIP ID found\n");
2148 		return -EIO;
2149 	}
2150 
2151 	/* if phyirq is not set, use polling mode in phylib */
2152 	if (dev->domain_data.phyirq > 0)
2153 		phydev->irq = dev->domain_data.phyirq;
2154 	else
2155 		phydev->irq = 0;
2156 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2157 
2158 	/* set to AUTOMDIX */
2159 	phydev->mdix = ETH_TP_MDI_AUTO;
2160 
2161 	ret = phy_connect_direct(dev->net, phydev,
2162 				 lan78xx_link_status_change,
2163 				 dev->interface);
2164 	if (ret) {
2165 		netdev_err(dev->net, "can't attach PHY to %s\n",
2166 			   dev->mdiobus->id);
2167 		if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2168 			if (phy_is_pseudo_fixed_link(phydev)) {
2169 				fixed_phy_unregister(phydev);
2170 			} else {
2171 				phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2172 							     0xfffffff0);
2173 				phy_unregister_fixup_for_uid(PHY_LAN8835,
2174 							     0xfffffff0);
2175 			}
2176 		}
2177 		return -EIO;
2178 	}
2179 
2180 	/* MAC doesn't support 1000T Half */
2181 	phydev->supported &= ~SUPPORTED_1000baseT_Half;
2182 
2183 	/* support both flow controls */
2184 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2185 	phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2186 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2187 	phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2188 
2189 	if (phydev->mdio.dev.of_node) {
2190 		u32 reg;
2191 		int len;
2192 
2193 		len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2194 						      "microchip,led-modes",
2195 						      sizeof(u32));
2196 		if (len >= 0) {
2197 			/* Ensure the appropriate LEDs are enabled */
2198 			lan78xx_read_reg(dev, HW_CFG, &reg);
2199 			reg &= ~(HW_CFG_LED0_EN_ |
2200 				 HW_CFG_LED1_EN_ |
2201 				 HW_CFG_LED2_EN_ |
2202 				 HW_CFG_LED3_EN_);
2203 			reg |= (len > 0) * HW_CFG_LED0_EN_ |
2204 				(len > 1) * HW_CFG_LED1_EN_ |
2205 				(len > 2) * HW_CFG_LED2_EN_ |
2206 				(len > 3) * HW_CFG_LED3_EN_;
2207 			lan78xx_write_reg(dev, HW_CFG, reg);
2208 		}
2209 	}
2210 
2211 	genphy_config_aneg(phydev);
2212 
2213 	dev->fc_autoneg = phydev->autoneg;
2214 
2215 	return 0;
2216 }
2217 
2218 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2219 {
2220 	int ret = 0;
2221 	u32 buf;
2222 	bool rxenabled;
2223 
2224 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2225 
2226 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2227 
2228 	if (rxenabled) {
2229 		buf &= ~MAC_RX_RXEN_;
2230 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2231 	}
2232 
2233 	/* add 4 to size for FCS */
2234 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2235 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2236 
2237 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2238 
2239 	if (rxenabled) {
2240 		buf |= MAC_RX_RXEN_;
2241 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2242 	}
2243 
2244 	return 0;
2245 }
2246 
2247 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2248 {
2249 	struct sk_buff *skb;
2250 	unsigned long flags;
2251 	int count = 0;
2252 
2253 	spin_lock_irqsave(&q->lock, flags);
2254 	while (!skb_queue_empty(q)) {
2255 		struct skb_data	*entry;
2256 		struct urb *urb;
2257 		int ret;
2258 
2259 		skb_queue_walk(q, skb) {
2260 			entry = (struct skb_data *)skb->cb;
2261 			if (entry->state != unlink_start)
2262 				goto found;
2263 		}
2264 		break;
2265 found:
2266 		entry->state = unlink_start;
2267 		urb = entry->urb;
2268 
2269 		/* Get reference count of the URB to avoid it to be
2270 		 * freed during usb_unlink_urb, which may trigger
2271 		 * use-after-free problem inside usb_unlink_urb since
2272 		 * usb_unlink_urb is always racing with .complete
2273 		 * handler(include defer_bh).
2274 		 */
2275 		usb_get_urb(urb);
2276 		spin_unlock_irqrestore(&q->lock, flags);
2277 		/* during some PM-driven resume scenarios,
2278 		 * these (async) unlinks complete immediately
2279 		 */
2280 		ret = usb_unlink_urb(urb);
2281 		if (ret != -EINPROGRESS && ret != 0)
2282 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2283 		else
2284 			count++;
2285 		usb_put_urb(urb);
2286 		spin_lock_irqsave(&q->lock, flags);
2287 	}
2288 	spin_unlock_irqrestore(&q->lock, flags);
2289 	return count;
2290 }
2291 
2292 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2293 {
2294 	struct lan78xx_net *dev = netdev_priv(netdev);
2295 	int ll_mtu = new_mtu + netdev->hard_header_len;
2296 	int old_hard_mtu = dev->hard_mtu;
2297 	int old_rx_urb_size = dev->rx_urb_size;
2298 	int ret;
2299 
2300 	/* no second zero-length packet read wanted after mtu-sized packets */
2301 	if ((ll_mtu % dev->maxpacket) == 0)
2302 		return -EDOM;
2303 
2304 	ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
2305 
2306 	netdev->mtu = new_mtu;
2307 
2308 	dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2309 	if (dev->rx_urb_size == old_hard_mtu) {
2310 		dev->rx_urb_size = dev->hard_mtu;
2311 		if (dev->rx_urb_size > old_rx_urb_size) {
2312 			if (netif_running(dev->net)) {
2313 				unlink_urbs(dev, &dev->rxq);
2314 				tasklet_schedule(&dev->bh);
2315 			}
2316 		}
2317 	}
2318 
2319 	return 0;
2320 }
2321 
2322 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2323 {
2324 	struct lan78xx_net *dev = netdev_priv(netdev);
2325 	struct sockaddr *addr = p;
2326 	u32 addr_lo, addr_hi;
2327 	int ret;
2328 
2329 	if (netif_running(netdev))
2330 		return -EBUSY;
2331 
2332 	if (!is_valid_ether_addr(addr->sa_data))
2333 		return -EADDRNOTAVAIL;
2334 
2335 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
2336 
2337 	addr_lo = netdev->dev_addr[0] |
2338 		  netdev->dev_addr[1] << 8 |
2339 		  netdev->dev_addr[2] << 16 |
2340 		  netdev->dev_addr[3] << 24;
2341 	addr_hi = netdev->dev_addr[4] |
2342 		  netdev->dev_addr[5] << 8;
2343 
2344 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2345 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2346 
2347 	return 0;
2348 }
2349 
2350 /* Enable or disable Rx checksum offload engine */
2351 static int lan78xx_set_features(struct net_device *netdev,
2352 				netdev_features_t features)
2353 {
2354 	struct lan78xx_net *dev = netdev_priv(netdev);
2355 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2356 	unsigned long flags;
2357 	int ret;
2358 
2359 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2360 
2361 	if (features & NETIF_F_RXCSUM) {
2362 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2363 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2364 	} else {
2365 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2366 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2367 	}
2368 
2369 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2370 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2371 	else
2372 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2373 
2374 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2375 
2376 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2377 
2378 	return 0;
2379 }
2380 
2381 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2382 {
2383 	struct lan78xx_priv *pdata =
2384 			container_of(param, struct lan78xx_priv, set_vlan);
2385 	struct lan78xx_net *dev = pdata->dev;
2386 
2387 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2388 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2389 }
2390 
2391 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2392 				   __be16 proto, u16 vid)
2393 {
2394 	struct lan78xx_net *dev = netdev_priv(netdev);
2395 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2396 	u16 vid_bit_index;
2397 	u16 vid_dword_index;
2398 
2399 	vid_dword_index = (vid >> 5) & 0x7F;
2400 	vid_bit_index = vid & 0x1F;
2401 
2402 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2403 
2404 	/* defer register writes to a sleepable context */
2405 	schedule_work(&pdata->set_vlan);
2406 
2407 	return 0;
2408 }
2409 
2410 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2411 				    __be16 proto, u16 vid)
2412 {
2413 	struct lan78xx_net *dev = netdev_priv(netdev);
2414 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2415 	u16 vid_bit_index;
2416 	u16 vid_dword_index;
2417 
2418 	vid_dword_index = (vid >> 5) & 0x7F;
2419 	vid_bit_index = vid & 0x1F;
2420 
2421 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2422 
2423 	/* defer register writes to a sleepable context */
2424 	schedule_work(&pdata->set_vlan);
2425 
2426 	return 0;
2427 }
2428 
2429 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2430 {
2431 	int ret;
2432 	u32 buf;
2433 	u32 regs[6] = { 0 };
2434 
2435 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2436 	if (buf & USB_CFG1_LTM_ENABLE_) {
2437 		u8 temp[2];
2438 		/* Get values from EEPROM first */
2439 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2440 			if (temp[0] == 24) {
2441 				ret = lan78xx_read_raw_eeprom(dev,
2442 							      temp[1] * 2,
2443 							      24,
2444 							      (u8 *)regs);
2445 				if (ret < 0)
2446 					return;
2447 			}
2448 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2449 			if (temp[0] == 24) {
2450 				ret = lan78xx_read_raw_otp(dev,
2451 							   temp[1] * 2,
2452 							   24,
2453 							   (u8 *)regs);
2454 				if (ret < 0)
2455 					return;
2456 			}
2457 		}
2458 	}
2459 
2460 	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2461 	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2462 	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2463 	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2464 	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2465 	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2466 }
2467 
2468 static int lan78xx_reset(struct lan78xx_net *dev)
2469 {
2470 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2471 	u32 buf;
2472 	int ret = 0;
2473 	unsigned long timeout;
2474 	u8 sig;
2475 
2476 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2477 	buf |= HW_CFG_LRST_;
2478 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2479 
2480 	timeout = jiffies + HZ;
2481 	do {
2482 		mdelay(1);
2483 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2484 		if (time_after(jiffies, timeout)) {
2485 			netdev_warn(dev->net,
2486 				    "timeout on completion of LiteReset");
2487 			return -EIO;
2488 		}
2489 	} while (buf & HW_CFG_LRST_);
2490 
2491 	lan78xx_init_mac_address(dev);
2492 
2493 	/* save DEVID for later usage */
2494 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
2495 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2496 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2497 
2498 	/* Respond to the IN token with a NAK */
2499 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2500 	buf |= USB_CFG_BIR_;
2501 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2502 
2503 	/* Init LTM */
2504 	lan78xx_init_ltm(dev);
2505 
2506 	if (dev->udev->speed == USB_SPEED_SUPER) {
2507 		buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2508 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2509 		dev->rx_qlen = 4;
2510 		dev->tx_qlen = 4;
2511 	} else if (dev->udev->speed == USB_SPEED_HIGH) {
2512 		buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2513 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2514 		dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2515 		dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2516 	} else {
2517 		buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2518 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2519 		dev->rx_qlen = 4;
2520 		dev->tx_qlen = 4;
2521 	}
2522 
2523 	ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2524 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2525 
2526 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2527 	buf |= HW_CFG_MEF_;
2528 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2529 
2530 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2531 	buf |= USB_CFG_BCE_;
2532 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2533 
2534 	/* set FIFO sizes */
2535 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2536 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2537 
2538 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2539 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2540 
2541 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2542 	ret = lan78xx_write_reg(dev, FLOW, 0);
2543 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2544 
2545 	/* Don't need rfe_ctl_lock during initialisation */
2546 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2547 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2548 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2549 
2550 	/* Enable or disable checksum offload engines */
2551 	lan78xx_set_features(dev->net, dev->net->features);
2552 
2553 	lan78xx_set_multicast(dev->net);
2554 
2555 	/* reset PHY */
2556 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2557 	buf |= PMT_CTL_PHY_RST_;
2558 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2559 
2560 	timeout = jiffies + HZ;
2561 	do {
2562 		mdelay(1);
2563 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2564 		if (time_after(jiffies, timeout)) {
2565 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
2566 			return -EIO;
2567 		}
2568 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2569 
2570 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2571 	/* LAN7801 only has RGMII mode */
2572 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
2573 		buf &= ~MAC_CR_GMII_EN_;
2574 
2575 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2576 		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2577 		if (!ret && sig != EEPROM_INDICATOR) {
2578 			/* Implies there is no external eeprom. Set mac speed */
2579 			netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2580 			buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2581 		}
2582 	}
2583 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
2584 
2585 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2586 	buf |= MAC_TX_TXEN_;
2587 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
2588 
2589 	ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2590 	buf |= FCT_TX_CTL_EN_;
2591 	ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2592 
2593 	ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2594 
2595 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2596 	buf |= MAC_RX_RXEN_;
2597 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2598 
2599 	ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2600 	buf |= FCT_RX_CTL_EN_;
2601 	ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2602 
2603 	return 0;
2604 }
2605 
2606 static void lan78xx_init_stats(struct lan78xx_net *dev)
2607 {
2608 	u32 *p;
2609 	int i;
2610 
2611 	/* initialize for stats update
2612 	 * some counters are 20bits and some are 32bits
2613 	 */
2614 	p = (u32 *)&dev->stats.rollover_max;
2615 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2616 		p[i] = 0xFFFFF;
2617 
2618 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2619 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2620 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2621 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2622 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2623 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2624 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2625 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2626 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2627 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2628 
2629 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
2630 }
2631 
2632 static int lan78xx_open(struct net_device *net)
2633 {
2634 	struct lan78xx_net *dev = netdev_priv(net);
2635 	int ret;
2636 
2637 	ret = usb_autopm_get_interface(dev->intf);
2638 	if (ret < 0)
2639 		goto out;
2640 
2641 	phy_start(net->phydev);
2642 
2643 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2644 
2645 	/* for Link Check */
2646 	if (dev->urb_intr) {
2647 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2648 		if (ret < 0) {
2649 			netif_err(dev, ifup, dev->net,
2650 				  "intr submit %d\n", ret);
2651 			goto done;
2652 		}
2653 	}
2654 
2655 	lan78xx_init_stats(dev);
2656 
2657 	set_bit(EVENT_DEV_OPEN, &dev->flags);
2658 
2659 	netif_start_queue(net);
2660 
2661 	dev->link_on = false;
2662 
2663 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2664 done:
2665 	usb_autopm_put_interface(dev->intf);
2666 
2667 out:
2668 	return ret;
2669 }
2670 
2671 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2672 {
2673 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2674 	DECLARE_WAITQUEUE(wait, current);
2675 	int temp;
2676 
2677 	/* ensure there are no more active urbs */
2678 	add_wait_queue(&unlink_wakeup, &wait);
2679 	set_current_state(TASK_UNINTERRUPTIBLE);
2680 	dev->wait = &unlink_wakeup;
2681 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2682 
2683 	/* maybe wait for deletions to finish. */
2684 	while (!skb_queue_empty(&dev->rxq) &&
2685 	       !skb_queue_empty(&dev->txq) &&
2686 	       !skb_queue_empty(&dev->done)) {
2687 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2688 		set_current_state(TASK_UNINTERRUPTIBLE);
2689 		netif_dbg(dev, ifdown, dev->net,
2690 			  "waited for %d urb completions\n", temp);
2691 	}
2692 	set_current_state(TASK_RUNNING);
2693 	dev->wait = NULL;
2694 	remove_wait_queue(&unlink_wakeup, &wait);
2695 }
2696 
2697 static int lan78xx_stop(struct net_device *net)
2698 {
2699 	struct lan78xx_net		*dev = netdev_priv(net);
2700 
2701 	if (timer_pending(&dev->stat_monitor))
2702 		del_timer_sync(&dev->stat_monitor);
2703 
2704 	if (net->phydev)
2705 		phy_stop(net->phydev);
2706 
2707 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
2708 	netif_stop_queue(net);
2709 
2710 	netif_info(dev, ifdown, dev->net,
2711 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2712 		   net->stats.rx_packets, net->stats.tx_packets,
2713 		   net->stats.rx_errors, net->stats.tx_errors);
2714 
2715 	lan78xx_terminate_urbs(dev);
2716 
2717 	usb_kill_urb(dev->urb_intr);
2718 
2719 	skb_queue_purge(&dev->rxq_pause);
2720 
2721 	/* deferred work (task, timer, softirq) must also stop.
2722 	 * can't flush_scheduled_work() until we drop rtnl (later),
2723 	 * else workers could deadlock; so make workers a NOP.
2724 	 */
2725 	dev->flags = 0;
2726 	cancel_delayed_work_sync(&dev->wq);
2727 	tasklet_kill(&dev->bh);
2728 
2729 	usb_autopm_put_interface(dev->intf);
2730 
2731 	return 0;
2732 }
2733 
2734 static int lan78xx_linearize(struct sk_buff *skb)
2735 {
2736 	return skb_linearize(skb);
2737 }
2738 
2739 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2740 				       struct sk_buff *skb, gfp_t flags)
2741 {
2742 	u32 tx_cmd_a, tx_cmd_b;
2743 
2744 	if (skb_cow_head(skb, TX_OVERHEAD)) {
2745 		dev_kfree_skb_any(skb);
2746 		return NULL;
2747 	}
2748 
2749 	if (lan78xx_linearize(skb) < 0)
2750 		return NULL;
2751 
2752 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2753 
2754 	if (skb->ip_summed == CHECKSUM_PARTIAL)
2755 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2756 
2757 	tx_cmd_b = 0;
2758 	if (skb_is_gso(skb)) {
2759 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2760 
2761 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2762 
2763 		tx_cmd_a |= TX_CMD_A_LSO_;
2764 	}
2765 
2766 	if (skb_vlan_tag_present(skb)) {
2767 		tx_cmd_a |= TX_CMD_A_IVTG_;
2768 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2769 	}
2770 
2771 	skb_push(skb, 4);
2772 	cpu_to_le32s(&tx_cmd_b);
2773 	memcpy(skb->data, &tx_cmd_b, 4);
2774 
2775 	skb_push(skb, 4);
2776 	cpu_to_le32s(&tx_cmd_a);
2777 	memcpy(skb->data, &tx_cmd_a, 4);
2778 
2779 	return skb;
2780 }
2781 
2782 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2783 			       struct sk_buff_head *list, enum skb_state state)
2784 {
2785 	unsigned long flags;
2786 	enum skb_state old_state;
2787 	struct skb_data *entry = (struct skb_data *)skb->cb;
2788 
2789 	spin_lock_irqsave(&list->lock, flags);
2790 	old_state = entry->state;
2791 	entry->state = state;
2792 
2793 	__skb_unlink(skb, list);
2794 	spin_unlock(&list->lock);
2795 	spin_lock(&dev->done.lock);
2796 
2797 	__skb_queue_tail(&dev->done, skb);
2798 	if (skb_queue_len(&dev->done) == 1)
2799 		tasklet_schedule(&dev->bh);
2800 	spin_unlock_irqrestore(&dev->done.lock, flags);
2801 
2802 	return old_state;
2803 }
2804 
2805 static void tx_complete(struct urb *urb)
2806 {
2807 	struct sk_buff *skb = (struct sk_buff *)urb->context;
2808 	struct skb_data *entry = (struct skb_data *)skb->cb;
2809 	struct lan78xx_net *dev = entry->dev;
2810 
2811 	if (urb->status == 0) {
2812 		dev->net->stats.tx_packets += entry->num_of_packet;
2813 		dev->net->stats.tx_bytes += entry->length;
2814 	} else {
2815 		dev->net->stats.tx_errors++;
2816 
2817 		switch (urb->status) {
2818 		case -EPIPE:
2819 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2820 			break;
2821 
2822 		/* software-driven interface shutdown */
2823 		case -ECONNRESET:
2824 		case -ESHUTDOWN:
2825 			break;
2826 
2827 		case -EPROTO:
2828 		case -ETIME:
2829 		case -EILSEQ:
2830 			netif_stop_queue(dev->net);
2831 			break;
2832 		default:
2833 			netif_dbg(dev, tx_err, dev->net,
2834 				  "tx err %d\n", entry->urb->status);
2835 			break;
2836 		}
2837 	}
2838 
2839 	usb_autopm_put_interface_async(dev->intf);
2840 
2841 	defer_bh(dev, skb, &dev->txq, tx_done);
2842 }
2843 
2844 static void lan78xx_queue_skb(struct sk_buff_head *list,
2845 			      struct sk_buff *newsk, enum skb_state state)
2846 {
2847 	struct skb_data *entry = (struct skb_data *)newsk->cb;
2848 
2849 	__skb_queue_tail(list, newsk);
2850 	entry->state = state;
2851 }
2852 
2853 static netdev_tx_t
2854 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2855 {
2856 	struct lan78xx_net *dev = netdev_priv(net);
2857 	struct sk_buff *skb2 = NULL;
2858 
2859 	if (skb) {
2860 		skb_tx_timestamp(skb);
2861 		skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2862 	}
2863 
2864 	if (skb2) {
2865 		skb_queue_tail(&dev->txq_pend, skb2);
2866 
2867 		/* throttle TX patch at slower than SUPER SPEED USB */
2868 		if ((dev->udev->speed < USB_SPEED_SUPER) &&
2869 		    (skb_queue_len(&dev->txq_pend) > 10))
2870 			netif_stop_queue(net);
2871 	} else {
2872 		netif_dbg(dev, tx_err, dev->net,
2873 			  "lan78xx_tx_prep return NULL\n");
2874 		dev->net->stats.tx_errors++;
2875 		dev->net->stats.tx_dropped++;
2876 	}
2877 
2878 	tasklet_schedule(&dev->bh);
2879 
2880 	return NETDEV_TX_OK;
2881 }
2882 
2883 static int
2884 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2885 {
2886 	int tmp;
2887 	struct usb_host_interface *alt = NULL;
2888 	struct usb_host_endpoint *in = NULL, *out = NULL;
2889 	struct usb_host_endpoint *status = NULL;
2890 
2891 	for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2892 		unsigned ep;
2893 
2894 		in = NULL;
2895 		out = NULL;
2896 		status = NULL;
2897 		alt = intf->altsetting + tmp;
2898 
2899 		for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2900 			struct usb_host_endpoint *e;
2901 			int intr = 0;
2902 
2903 			e = alt->endpoint + ep;
2904 			switch (e->desc.bmAttributes) {
2905 			case USB_ENDPOINT_XFER_INT:
2906 				if (!usb_endpoint_dir_in(&e->desc))
2907 					continue;
2908 				intr = 1;
2909 				/* FALLTHROUGH */
2910 			case USB_ENDPOINT_XFER_BULK:
2911 				break;
2912 			default:
2913 				continue;
2914 			}
2915 			if (usb_endpoint_dir_in(&e->desc)) {
2916 				if (!intr && !in)
2917 					in = e;
2918 				else if (intr && !status)
2919 					status = e;
2920 			} else {
2921 				if (!out)
2922 					out = e;
2923 			}
2924 		}
2925 		if (in && out)
2926 			break;
2927 	}
2928 	if (!alt || !in || !out)
2929 		return -EINVAL;
2930 
2931 	dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2932 				       in->desc.bEndpointAddress &
2933 				       USB_ENDPOINT_NUMBER_MASK);
2934 	dev->pipe_out = usb_sndbulkpipe(dev->udev,
2935 					out->desc.bEndpointAddress &
2936 					USB_ENDPOINT_NUMBER_MASK);
2937 	dev->ep_intr = status;
2938 
2939 	return 0;
2940 }
2941 
2942 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2943 {
2944 	struct lan78xx_priv *pdata = NULL;
2945 	int ret;
2946 	int i;
2947 
2948 	ret = lan78xx_get_endpoints(dev, intf);
2949 
2950 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2951 
2952 	pdata = (struct lan78xx_priv *)(dev->data[0]);
2953 	if (!pdata) {
2954 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2955 		return -ENOMEM;
2956 	}
2957 
2958 	pdata->dev = dev;
2959 
2960 	spin_lock_init(&pdata->rfe_ctl_lock);
2961 	mutex_init(&pdata->dataport_mutex);
2962 
2963 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2964 
2965 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2966 		pdata->vlan_table[i] = 0;
2967 
2968 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2969 
2970 	dev->net->features = 0;
2971 
2972 	if (DEFAULT_TX_CSUM_ENABLE)
2973 		dev->net->features |= NETIF_F_HW_CSUM;
2974 
2975 	if (DEFAULT_RX_CSUM_ENABLE)
2976 		dev->net->features |= NETIF_F_RXCSUM;
2977 
2978 	if (DEFAULT_TSO_CSUM_ENABLE)
2979 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2980 
2981 	dev->net->hw_features = dev->net->features;
2982 
2983 	ret = lan78xx_setup_irq_domain(dev);
2984 	if (ret < 0) {
2985 		netdev_warn(dev->net,
2986 			    "lan78xx_setup_irq_domain() failed : %d", ret);
2987 		goto out1;
2988 	}
2989 
2990 	dev->net->hard_header_len += TX_OVERHEAD;
2991 	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2992 
2993 	/* Init all registers */
2994 	ret = lan78xx_reset(dev);
2995 	if (ret) {
2996 		netdev_warn(dev->net, "Registers INIT FAILED....");
2997 		goto out2;
2998 	}
2999 
3000 	ret = lan78xx_mdio_init(dev);
3001 	if (ret) {
3002 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
3003 		goto out2;
3004 	}
3005 
3006 	dev->net->flags |= IFF_MULTICAST;
3007 
3008 	pdata->wol = WAKE_MAGIC;
3009 
3010 	return ret;
3011 
3012 out2:
3013 	lan78xx_remove_irq_domain(dev);
3014 
3015 out1:
3016 	netdev_warn(dev->net, "Bind routine FAILED");
3017 	cancel_work_sync(&pdata->set_multicast);
3018 	cancel_work_sync(&pdata->set_vlan);
3019 	kfree(pdata);
3020 	return ret;
3021 }
3022 
3023 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3024 {
3025 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3026 
3027 	lan78xx_remove_irq_domain(dev);
3028 
3029 	lan78xx_remove_mdio(dev);
3030 
3031 	if (pdata) {
3032 		cancel_work_sync(&pdata->set_multicast);
3033 		cancel_work_sync(&pdata->set_vlan);
3034 		netif_dbg(dev, ifdown, dev->net, "free pdata");
3035 		kfree(pdata);
3036 		pdata = NULL;
3037 		dev->data[0] = 0;
3038 	}
3039 }
3040 
3041 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3042 				    struct sk_buff *skb,
3043 				    u32 rx_cmd_a, u32 rx_cmd_b)
3044 {
3045 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
3046 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
3047 		skb->ip_summed = CHECKSUM_NONE;
3048 	} else {
3049 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3050 		skb->ip_summed = CHECKSUM_COMPLETE;
3051 	}
3052 }
3053 
3054 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3055 {
3056 	int		status;
3057 
3058 	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
3059 		skb_queue_tail(&dev->rxq_pause, skb);
3060 		return;
3061 	}
3062 
3063 	dev->net->stats.rx_packets++;
3064 	dev->net->stats.rx_bytes += skb->len;
3065 
3066 	skb->protocol = eth_type_trans(skb, dev->net);
3067 
3068 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3069 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3070 	memset(skb->cb, 0, sizeof(struct skb_data));
3071 
3072 	if (skb_defer_rx_timestamp(skb))
3073 		return;
3074 
3075 	status = netif_rx(skb);
3076 	if (status != NET_RX_SUCCESS)
3077 		netif_dbg(dev, rx_err, dev->net,
3078 			  "netif_rx status %d\n", status);
3079 }
3080 
3081 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3082 {
3083 	if (skb->len < dev->net->hard_header_len)
3084 		return 0;
3085 
3086 	while (skb->len > 0) {
3087 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3088 		u16 rx_cmd_c;
3089 		struct sk_buff *skb2;
3090 		unsigned char *packet;
3091 
3092 		memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
3093 		le32_to_cpus(&rx_cmd_a);
3094 		skb_pull(skb, sizeof(rx_cmd_a));
3095 
3096 		memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
3097 		le32_to_cpus(&rx_cmd_b);
3098 		skb_pull(skb, sizeof(rx_cmd_b));
3099 
3100 		memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
3101 		le16_to_cpus(&rx_cmd_c);
3102 		skb_pull(skb, sizeof(rx_cmd_c));
3103 
3104 		packet = skb->data;
3105 
3106 		/* get the packet length */
3107 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3108 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3109 
3110 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3111 			netif_dbg(dev, rx_err, dev->net,
3112 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3113 		} else {
3114 			/* last frame in this batch */
3115 			if (skb->len == size) {
3116 				lan78xx_rx_csum_offload(dev, skb,
3117 							rx_cmd_a, rx_cmd_b);
3118 
3119 				skb_trim(skb, skb->len - 4); /* remove fcs */
3120 				skb->truesize = size + sizeof(struct sk_buff);
3121 
3122 				return 1;
3123 			}
3124 
3125 			skb2 = skb_clone(skb, GFP_ATOMIC);
3126 			if (unlikely(!skb2)) {
3127 				netdev_warn(dev->net, "Error allocating skb");
3128 				return 0;
3129 			}
3130 
3131 			skb2->len = size;
3132 			skb2->data = packet;
3133 			skb_set_tail_pointer(skb2, size);
3134 
3135 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3136 
3137 			skb_trim(skb2, skb2->len - 4); /* remove fcs */
3138 			skb2->truesize = size + sizeof(struct sk_buff);
3139 
3140 			lan78xx_skb_return(dev, skb2);
3141 		}
3142 
3143 		skb_pull(skb, size);
3144 
3145 		/* padding bytes before the next frame starts */
3146 		if (skb->len)
3147 			skb_pull(skb, align_count);
3148 	}
3149 
3150 	return 1;
3151 }
3152 
3153 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3154 {
3155 	if (!lan78xx_rx(dev, skb)) {
3156 		dev->net->stats.rx_errors++;
3157 		goto done;
3158 	}
3159 
3160 	if (skb->len) {
3161 		lan78xx_skb_return(dev, skb);
3162 		return;
3163 	}
3164 
3165 	netif_dbg(dev, rx_err, dev->net, "drop\n");
3166 	dev->net->stats.rx_errors++;
3167 done:
3168 	skb_queue_tail(&dev->done, skb);
3169 }
3170 
3171 static void rx_complete(struct urb *urb);
3172 
3173 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3174 {
3175 	struct sk_buff *skb;
3176 	struct skb_data *entry;
3177 	unsigned long lockflags;
3178 	size_t size = dev->rx_urb_size;
3179 	int ret = 0;
3180 
3181 	skb = netdev_alloc_skb_ip_align(dev->net, size);
3182 	if (!skb) {
3183 		usb_free_urb(urb);
3184 		return -ENOMEM;
3185 	}
3186 
3187 	entry = (struct skb_data *)skb->cb;
3188 	entry->urb = urb;
3189 	entry->dev = dev;
3190 	entry->length = 0;
3191 
3192 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3193 			  skb->data, size, rx_complete, skb);
3194 
3195 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
3196 
3197 	if (netif_device_present(dev->net) &&
3198 	    netif_running(dev->net) &&
3199 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3200 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3201 		ret = usb_submit_urb(urb, GFP_ATOMIC);
3202 		switch (ret) {
3203 		case 0:
3204 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3205 			break;
3206 		case -EPIPE:
3207 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3208 			break;
3209 		case -ENODEV:
3210 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
3211 			netif_device_detach(dev->net);
3212 			break;
3213 		case -EHOSTUNREACH:
3214 			ret = -ENOLINK;
3215 			break;
3216 		default:
3217 			netif_dbg(dev, rx_err, dev->net,
3218 				  "rx submit, %d\n", ret);
3219 			tasklet_schedule(&dev->bh);
3220 		}
3221 	} else {
3222 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3223 		ret = -ENOLINK;
3224 	}
3225 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3226 	if (ret) {
3227 		dev_kfree_skb_any(skb);
3228 		usb_free_urb(urb);
3229 	}
3230 	return ret;
3231 }
3232 
3233 static void rx_complete(struct urb *urb)
3234 {
3235 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3236 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3237 	struct lan78xx_net *dev = entry->dev;
3238 	int urb_status = urb->status;
3239 	enum skb_state state;
3240 
3241 	skb_put(skb, urb->actual_length);
3242 	state = rx_done;
3243 	entry->urb = NULL;
3244 
3245 	switch (urb_status) {
3246 	case 0:
3247 		if (skb->len < dev->net->hard_header_len) {
3248 			state = rx_cleanup;
3249 			dev->net->stats.rx_errors++;
3250 			dev->net->stats.rx_length_errors++;
3251 			netif_dbg(dev, rx_err, dev->net,
3252 				  "rx length %d\n", skb->len);
3253 		}
3254 		usb_mark_last_busy(dev->udev);
3255 		break;
3256 	case -EPIPE:
3257 		dev->net->stats.rx_errors++;
3258 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3259 		/* FALLTHROUGH */
3260 	case -ECONNRESET:				/* async unlink */
3261 	case -ESHUTDOWN:				/* hardware gone */
3262 		netif_dbg(dev, ifdown, dev->net,
3263 			  "rx shutdown, code %d\n", urb_status);
3264 		state = rx_cleanup;
3265 		entry->urb = urb;
3266 		urb = NULL;
3267 		break;
3268 	case -EPROTO:
3269 	case -ETIME:
3270 	case -EILSEQ:
3271 		dev->net->stats.rx_errors++;
3272 		state = rx_cleanup;
3273 		entry->urb = urb;
3274 		urb = NULL;
3275 		break;
3276 
3277 	/* data overrun ... flush fifo? */
3278 	case -EOVERFLOW:
3279 		dev->net->stats.rx_over_errors++;
3280 		/* FALLTHROUGH */
3281 
3282 	default:
3283 		state = rx_cleanup;
3284 		dev->net->stats.rx_errors++;
3285 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3286 		break;
3287 	}
3288 
3289 	state = defer_bh(dev, skb, &dev->rxq, state);
3290 
3291 	if (urb) {
3292 		if (netif_running(dev->net) &&
3293 		    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3294 		    state != unlink_start) {
3295 			rx_submit(dev, urb, GFP_ATOMIC);
3296 			return;
3297 		}
3298 		usb_free_urb(urb);
3299 	}
3300 	netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3301 }
3302 
3303 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3304 {
3305 	int length;
3306 	struct urb *urb = NULL;
3307 	struct skb_data *entry;
3308 	unsigned long flags;
3309 	struct sk_buff_head *tqp = &dev->txq_pend;
3310 	struct sk_buff *skb, *skb2;
3311 	int ret;
3312 	int count, pos;
3313 	int skb_totallen, pkt_cnt;
3314 
3315 	skb_totallen = 0;
3316 	pkt_cnt = 0;
3317 	count = 0;
3318 	length = 0;
3319 	for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3320 		if (skb_is_gso(skb)) {
3321 			if (pkt_cnt) {
3322 				/* handle previous packets first */
3323 				break;
3324 			}
3325 			count = 1;
3326 			length = skb->len - TX_OVERHEAD;
3327 			skb2 = skb_dequeue(tqp);
3328 			goto gso_skb;
3329 		}
3330 
3331 		if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3332 			break;
3333 		skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3334 		pkt_cnt++;
3335 	}
3336 
3337 	/* copy to a single skb */
3338 	skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3339 	if (!skb)
3340 		goto drop;
3341 
3342 	skb_put(skb, skb_totallen);
3343 
3344 	for (count = pos = 0; count < pkt_cnt; count++) {
3345 		skb2 = skb_dequeue(tqp);
3346 		if (skb2) {
3347 			length += (skb2->len - TX_OVERHEAD);
3348 			memcpy(skb->data + pos, skb2->data, skb2->len);
3349 			pos += roundup(skb2->len, sizeof(u32));
3350 			dev_kfree_skb(skb2);
3351 		}
3352 	}
3353 
3354 gso_skb:
3355 	urb = usb_alloc_urb(0, GFP_ATOMIC);
3356 	if (!urb)
3357 		goto drop;
3358 
3359 	entry = (struct skb_data *)skb->cb;
3360 	entry->urb = urb;
3361 	entry->dev = dev;
3362 	entry->length = length;
3363 	entry->num_of_packet = count;
3364 
3365 	spin_lock_irqsave(&dev->txq.lock, flags);
3366 	ret = usb_autopm_get_interface_async(dev->intf);
3367 	if (ret < 0) {
3368 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3369 		goto drop;
3370 	}
3371 
3372 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3373 			  skb->data, skb->len, tx_complete, skb);
3374 
3375 	if (length % dev->maxpacket == 0) {
3376 		/* send USB_ZERO_PACKET */
3377 		urb->transfer_flags |= URB_ZERO_PACKET;
3378 	}
3379 
3380 #ifdef CONFIG_PM
3381 	/* if this triggers the device is still a sleep */
3382 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3383 		/* transmission will be done in resume */
3384 		usb_anchor_urb(urb, &dev->deferred);
3385 		/* no use to process more packets */
3386 		netif_stop_queue(dev->net);
3387 		usb_put_urb(urb);
3388 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3389 		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3390 		return;
3391 	}
3392 #endif
3393 
3394 	ret = usb_submit_urb(urb, GFP_ATOMIC);
3395 	switch (ret) {
3396 	case 0:
3397 		netif_trans_update(dev->net);
3398 		lan78xx_queue_skb(&dev->txq, skb, tx_start);
3399 		if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3400 			netif_stop_queue(dev->net);
3401 		break;
3402 	case -EPIPE:
3403 		netif_stop_queue(dev->net);
3404 		lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3405 		usb_autopm_put_interface_async(dev->intf);
3406 		break;
3407 	default:
3408 		usb_autopm_put_interface_async(dev->intf);
3409 		netif_dbg(dev, tx_err, dev->net,
3410 			  "tx: submit urb err %d\n", ret);
3411 		break;
3412 	}
3413 
3414 	spin_unlock_irqrestore(&dev->txq.lock, flags);
3415 
3416 	if (ret) {
3417 		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3418 drop:
3419 		dev->net->stats.tx_dropped++;
3420 		if (skb)
3421 			dev_kfree_skb_any(skb);
3422 		usb_free_urb(urb);
3423 	} else
3424 		netif_dbg(dev, tx_queued, dev->net,
3425 			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
3426 }
3427 
3428 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3429 {
3430 	struct urb *urb;
3431 	int i;
3432 
3433 	if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3434 		for (i = 0; i < 10; i++) {
3435 			if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3436 				break;
3437 			urb = usb_alloc_urb(0, GFP_ATOMIC);
3438 			if (urb)
3439 				if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3440 					return;
3441 		}
3442 
3443 		if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3444 			tasklet_schedule(&dev->bh);
3445 	}
3446 	if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3447 		netif_wake_queue(dev->net);
3448 }
3449 
3450 static void lan78xx_bh(unsigned long param)
3451 {
3452 	struct lan78xx_net *dev = (struct lan78xx_net *)param;
3453 	struct sk_buff *skb;
3454 	struct skb_data *entry;
3455 
3456 	while ((skb = skb_dequeue(&dev->done))) {
3457 		entry = (struct skb_data *)(skb->cb);
3458 		switch (entry->state) {
3459 		case rx_done:
3460 			entry->state = rx_cleanup;
3461 			rx_process(dev, skb);
3462 			continue;
3463 		case tx_done:
3464 			usb_free_urb(entry->urb);
3465 			dev_kfree_skb(skb);
3466 			continue;
3467 		case rx_cleanup:
3468 			usb_free_urb(entry->urb);
3469 			dev_kfree_skb(skb);
3470 			continue;
3471 		default:
3472 			netdev_dbg(dev->net, "skb state %d\n", entry->state);
3473 			return;
3474 		}
3475 	}
3476 
3477 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
3478 		/* reset update timer delta */
3479 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3480 			dev->delta = 1;
3481 			mod_timer(&dev->stat_monitor,
3482 				  jiffies + STAT_UPDATE_TIMER);
3483 		}
3484 
3485 		if (!skb_queue_empty(&dev->txq_pend))
3486 			lan78xx_tx_bh(dev);
3487 
3488 		if (!timer_pending(&dev->delay) &&
3489 		    !test_bit(EVENT_RX_HALT, &dev->flags))
3490 			lan78xx_rx_bh(dev);
3491 	}
3492 }
3493 
3494 static void lan78xx_delayedwork(struct work_struct *work)
3495 {
3496 	int status;
3497 	struct lan78xx_net *dev;
3498 
3499 	dev = container_of(work, struct lan78xx_net, wq.work);
3500 
3501 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3502 		unlink_urbs(dev, &dev->txq);
3503 		status = usb_autopm_get_interface(dev->intf);
3504 		if (status < 0)
3505 			goto fail_pipe;
3506 		status = usb_clear_halt(dev->udev, dev->pipe_out);
3507 		usb_autopm_put_interface(dev->intf);
3508 		if (status < 0 &&
3509 		    status != -EPIPE &&
3510 		    status != -ESHUTDOWN) {
3511 			if (netif_msg_tx_err(dev))
3512 fail_pipe:
3513 				netdev_err(dev->net,
3514 					   "can't clear tx halt, status %d\n",
3515 					   status);
3516 		} else {
3517 			clear_bit(EVENT_TX_HALT, &dev->flags);
3518 			if (status != -ESHUTDOWN)
3519 				netif_wake_queue(dev->net);
3520 		}
3521 	}
3522 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3523 		unlink_urbs(dev, &dev->rxq);
3524 		status = usb_autopm_get_interface(dev->intf);
3525 		if (status < 0)
3526 				goto fail_halt;
3527 		status = usb_clear_halt(dev->udev, dev->pipe_in);
3528 		usb_autopm_put_interface(dev->intf);
3529 		if (status < 0 &&
3530 		    status != -EPIPE &&
3531 		    status != -ESHUTDOWN) {
3532 			if (netif_msg_rx_err(dev))
3533 fail_halt:
3534 				netdev_err(dev->net,
3535 					   "can't clear rx halt, status %d\n",
3536 					   status);
3537 		} else {
3538 			clear_bit(EVENT_RX_HALT, &dev->flags);
3539 			tasklet_schedule(&dev->bh);
3540 		}
3541 	}
3542 
3543 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3544 		int ret = 0;
3545 
3546 		clear_bit(EVENT_LINK_RESET, &dev->flags);
3547 		status = usb_autopm_get_interface(dev->intf);
3548 		if (status < 0)
3549 			goto skip_reset;
3550 		if (lan78xx_link_reset(dev) < 0) {
3551 			usb_autopm_put_interface(dev->intf);
3552 skip_reset:
3553 			netdev_info(dev->net, "link reset failed (%d)\n",
3554 				    ret);
3555 		} else {
3556 			usb_autopm_put_interface(dev->intf);
3557 		}
3558 	}
3559 
3560 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3561 		lan78xx_update_stats(dev);
3562 
3563 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3564 
3565 		mod_timer(&dev->stat_monitor,
3566 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
3567 
3568 		dev->delta = min((dev->delta * 2), 50);
3569 	}
3570 }
3571 
3572 static void intr_complete(struct urb *urb)
3573 {
3574 	struct lan78xx_net *dev = urb->context;
3575 	int status = urb->status;
3576 
3577 	switch (status) {
3578 	/* success */
3579 	case 0:
3580 		lan78xx_status(dev, urb);
3581 		break;
3582 
3583 	/* software-driven interface shutdown */
3584 	case -ENOENT:			/* urb killed */
3585 	case -ESHUTDOWN:		/* hardware gone */
3586 		netif_dbg(dev, ifdown, dev->net,
3587 			  "intr shutdown, code %d\n", status);
3588 		return;
3589 
3590 	/* NOTE:  not throttling like RX/TX, since this endpoint
3591 	 * already polls infrequently
3592 	 */
3593 	default:
3594 		netdev_dbg(dev->net, "intr status %d\n", status);
3595 		break;
3596 	}
3597 
3598 	if (!netif_running(dev->net))
3599 		return;
3600 
3601 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3602 	status = usb_submit_urb(urb, GFP_ATOMIC);
3603 	if (status != 0)
3604 		netif_err(dev, timer, dev->net,
3605 			  "intr resubmit --> %d\n", status);
3606 }
3607 
3608 static void lan78xx_disconnect(struct usb_interface *intf)
3609 {
3610 	struct lan78xx_net		*dev;
3611 	struct usb_device		*udev;
3612 	struct net_device		*net;
3613 	struct phy_device		*phydev;
3614 
3615 	dev = usb_get_intfdata(intf);
3616 	usb_set_intfdata(intf, NULL);
3617 	if (!dev)
3618 		return;
3619 
3620 	udev = interface_to_usbdev(intf);
3621 	net = dev->net;
3622 	phydev = net->phydev;
3623 
3624 	phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3625 	phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3626 
3627 	phy_disconnect(net->phydev);
3628 
3629 	if (phy_is_pseudo_fixed_link(phydev))
3630 		fixed_phy_unregister(phydev);
3631 
3632 	unregister_netdev(net);
3633 
3634 	cancel_delayed_work_sync(&dev->wq);
3635 
3636 	usb_scuttle_anchored_urbs(&dev->deferred);
3637 
3638 	lan78xx_unbind(dev, intf);
3639 
3640 	usb_kill_urb(dev->urb_intr);
3641 	usb_free_urb(dev->urb_intr);
3642 
3643 	free_netdev(net);
3644 	usb_put_dev(udev);
3645 }
3646 
3647 static void lan78xx_tx_timeout(struct net_device *net)
3648 {
3649 	struct lan78xx_net *dev = netdev_priv(net);
3650 
3651 	unlink_urbs(dev, &dev->txq);
3652 	tasklet_schedule(&dev->bh);
3653 }
3654 
3655 static const struct net_device_ops lan78xx_netdev_ops = {
3656 	.ndo_open		= lan78xx_open,
3657 	.ndo_stop		= lan78xx_stop,
3658 	.ndo_start_xmit		= lan78xx_start_xmit,
3659 	.ndo_tx_timeout		= lan78xx_tx_timeout,
3660 	.ndo_change_mtu		= lan78xx_change_mtu,
3661 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
3662 	.ndo_validate_addr	= eth_validate_addr,
3663 	.ndo_do_ioctl		= lan78xx_ioctl,
3664 	.ndo_set_rx_mode	= lan78xx_set_multicast,
3665 	.ndo_set_features	= lan78xx_set_features,
3666 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
3667 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
3668 };
3669 
3670 static void lan78xx_stat_monitor(struct timer_list *t)
3671 {
3672 	struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3673 
3674 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3675 }
3676 
3677 static int lan78xx_probe(struct usb_interface *intf,
3678 			 const struct usb_device_id *id)
3679 {
3680 	struct lan78xx_net *dev;
3681 	struct net_device *netdev;
3682 	struct usb_device *udev;
3683 	int ret;
3684 	unsigned maxp;
3685 	unsigned period;
3686 	u8 *buf = NULL;
3687 
3688 	udev = interface_to_usbdev(intf);
3689 	udev = usb_get_dev(udev);
3690 
3691 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3692 	if (!netdev) {
3693 		dev_err(&intf->dev, "Error: OOM\n");
3694 		ret = -ENOMEM;
3695 		goto out1;
3696 	}
3697 
3698 	/* netdev_printk() needs this */
3699 	SET_NETDEV_DEV(netdev, &intf->dev);
3700 
3701 	dev = netdev_priv(netdev);
3702 	dev->udev = udev;
3703 	dev->intf = intf;
3704 	dev->net = netdev;
3705 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3706 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
3707 
3708 	skb_queue_head_init(&dev->rxq);
3709 	skb_queue_head_init(&dev->txq);
3710 	skb_queue_head_init(&dev->done);
3711 	skb_queue_head_init(&dev->rxq_pause);
3712 	skb_queue_head_init(&dev->txq_pend);
3713 	mutex_init(&dev->phy_mutex);
3714 
3715 	tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3716 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3717 	init_usb_anchor(&dev->deferred);
3718 
3719 	netdev->netdev_ops = &lan78xx_netdev_ops;
3720 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3721 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
3722 
3723 	dev->delta = 1;
3724 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3725 
3726 	mutex_init(&dev->stats.access_lock);
3727 
3728 	ret = lan78xx_bind(dev, intf);
3729 	if (ret < 0)
3730 		goto out2;
3731 	strcpy(netdev->name, "eth%d");
3732 
3733 	if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3734 		netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3735 
3736 	/* MTU range: 68 - 9000 */
3737 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3738 
3739 	dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3740 	dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3741 	dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3742 
3743 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3744 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3745 
3746 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
3747 					dev->ep_intr->desc.bEndpointAddress &
3748 					USB_ENDPOINT_NUMBER_MASK);
3749 	period = dev->ep_intr->desc.bInterval;
3750 
3751 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3752 	buf = kmalloc(maxp, GFP_KERNEL);
3753 	if (buf) {
3754 		dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3755 		if (!dev->urb_intr) {
3756 			ret = -ENOMEM;
3757 			kfree(buf);
3758 			goto out3;
3759 		} else {
3760 			usb_fill_int_urb(dev->urb_intr, dev->udev,
3761 					 dev->pipe_intr, buf, maxp,
3762 					 intr_complete, dev, period);
3763 		}
3764 	}
3765 
3766 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3767 
3768 	/* driver requires remote-wakeup capability during autosuspend. */
3769 	intf->needs_remote_wakeup = 1;
3770 
3771 	ret = register_netdev(netdev);
3772 	if (ret != 0) {
3773 		netif_err(dev, probe, netdev, "couldn't register the device\n");
3774 		goto out3;
3775 	}
3776 
3777 	usb_set_intfdata(intf, dev);
3778 
3779 	ret = device_set_wakeup_enable(&udev->dev, true);
3780 
3781 	 /* Default delay of 2sec has more overhead than advantage.
3782 	  * Set to 10sec as default.
3783 	  */
3784 	pm_runtime_set_autosuspend_delay(&udev->dev,
3785 					 DEFAULT_AUTOSUSPEND_DELAY);
3786 
3787 	ret = lan78xx_phy_init(dev);
3788 	if (ret < 0)
3789 		goto out4;
3790 
3791 	return 0;
3792 
3793 out4:
3794 	unregister_netdev(netdev);
3795 out3:
3796 	lan78xx_unbind(dev, intf);
3797 out2:
3798 	free_netdev(netdev);
3799 out1:
3800 	usb_put_dev(udev);
3801 
3802 	return ret;
3803 }
3804 
3805 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3806 {
3807 	const u16 crc16poly = 0x8005;
3808 	int i;
3809 	u16 bit, crc, msb;
3810 	u8 data;
3811 
3812 	crc = 0xFFFF;
3813 	for (i = 0; i < len; i++) {
3814 		data = *buf++;
3815 		for (bit = 0; bit < 8; bit++) {
3816 			msb = crc >> 15;
3817 			crc <<= 1;
3818 
3819 			if (msb ^ (u16)(data & 1)) {
3820 				crc ^= crc16poly;
3821 				crc |= (u16)0x0001U;
3822 			}
3823 			data >>= 1;
3824 		}
3825 	}
3826 
3827 	return crc;
3828 }
3829 
3830 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3831 {
3832 	u32 buf;
3833 	int ret;
3834 	int mask_index;
3835 	u16 crc;
3836 	u32 temp_wucsr;
3837 	u32 temp_pmt_ctl;
3838 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3839 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3840 	const u8 arp_type[2] = { 0x08, 0x06 };
3841 
3842 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3843 	buf &= ~MAC_TX_TXEN_;
3844 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
3845 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3846 	buf &= ~MAC_RX_RXEN_;
3847 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3848 
3849 	ret = lan78xx_write_reg(dev, WUCSR, 0);
3850 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
3851 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3852 
3853 	temp_wucsr = 0;
3854 
3855 	temp_pmt_ctl = 0;
3856 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3857 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3858 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3859 
3860 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3861 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3862 
3863 	mask_index = 0;
3864 	if (wol & WAKE_PHY) {
3865 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3866 
3867 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3868 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3869 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3870 	}
3871 	if (wol & WAKE_MAGIC) {
3872 		temp_wucsr |= WUCSR_MPEN_;
3873 
3874 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3875 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3876 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3877 	}
3878 	if (wol & WAKE_BCAST) {
3879 		temp_wucsr |= WUCSR_BCST_EN_;
3880 
3881 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3882 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3883 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3884 	}
3885 	if (wol & WAKE_MCAST) {
3886 		temp_wucsr |= WUCSR_WAKE_EN_;
3887 
3888 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3889 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3890 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3891 					WUF_CFGX_EN_ |
3892 					WUF_CFGX_TYPE_MCAST_ |
3893 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3894 					(crc & WUF_CFGX_CRC16_MASK_));
3895 
3896 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3897 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3898 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3899 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3900 		mask_index++;
3901 
3902 		/* for IPv6 Multicast */
3903 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3904 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3905 					WUF_CFGX_EN_ |
3906 					WUF_CFGX_TYPE_MCAST_ |
3907 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3908 					(crc & WUF_CFGX_CRC16_MASK_));
3909 
3910 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3911 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3912 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3913 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3914 		mask_index++;
3915 
3916 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3917 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3918 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3919 	}
3920 	if (wol & WAKE_UCAST) {
3921 		temp_wucsr |= WUCSR_PFDA_EN_;
3922 
3923 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3924 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3925 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3926 	}
3927 	if (wol & WAKE_ARP) {
3928 		temp_wucsr |= WUCSR_WAKE_EN_;
3929 
3930 		/* set WUF_CFG & WUF_MASK
3931 		 * for packettype (offset 12,13) = ARP (0x0806)
3932 		 */
3933 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
3934 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3935 					WUF_CFGX_EN_ |
3936 					WUF_CFGX_TYPE_ALL_ |
3937 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3938 					(crc & WUF_CFGX_CRC16_MASK_));
3939 
3940 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3941 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3942 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3943 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3944 		mask_index++;
3945 
3946 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3947 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3948 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3949 	}
3950 
3951 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3952 
3953 	/* when multiple WOL bits are set */
3954 	if (hweight_long((unsigned long)wol) > 1) {
3955 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3956 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3957 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3958 	}
3959 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3960 
3961 	/* clear WUPS */
3962 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3963 	buf |= PMT_CTL_WUPS_MASK_;
3964 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3965 
3966 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3967 	buf |= MAC_RX_RXEN_;
3968 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3969 
3970 	return 0;
3971 }
3972 
3973 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3974 {
3975 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3976 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3977 	u32 buf;
3978 	int ret;
3979 	int event;
3980 
3981 	event = message.event;
3982 
3983 	if (!dev->suspend_count++) {
3984 		spin_lock_irq(&dev->txq.lock);
3985 		/* don't autosuspend while transmitting */
3986 		if ((skb_queue_len(&dev->txq) ||
3987 		     skb_queue_len(&dev->txq_pend)) &&
3988 			PMSG_IS_AUTO(message)) {
3989 			spin_unlock_irq(&dev->txq.lock);
3990 			ret = -EBUSY;
3991 			goto out;
3992 		} else {
3993 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3994 			spin_unlock_irq(&dev->txq.lock);
3995 		}
3996 
3997 		/* stop TX & RX */
3998 		ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3999 		buf &= ~MAC_TX_TXEN_;
4000 		ret = lan78xx_write_reg(dev, MAC_TX, buf);
4001 		ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4002 		buf &= ~MAC_RX_RXEN_;
4003 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
4004 
4005 		/* empty out the rx and queues */
4006 		netif_device_detach(dev->net);
4007 		lan78xx_terminate_urbs(dev);
4008 		usb_kill_urb(dev->urb_intr);
4009 
4010 		/* reattach */
4011 		netif_device_attach(dev->net);
4012 	}
4013 
4014 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4015 		del_timer(&dev->stat_monitor);
4016 
4017 		if (PMSG_IS_AUTO(message)) {
4018 			/* auto suspend (selective suspend) */
4019 			ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4020 			buf &= ~MAC_TX_TXEN_;
4021 			ret = lan78xx_write_reg(dev, MAC_TX, buf);
4022 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4023 			buf &= ~MAC_RX_RXEN_;
4024 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
4025 
4026 			ret = lan78xx_write_reg(dev, WUCSR, 0);
4027 			ret = lan78xx_write_reg(dev, WUCSR2, 0);
4028 			ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4029 
4030 			/* set goodframe wakeup */
4031 			ret = lan78xx_read_reg(dev, WUCSR, &buf);
4032 
4033 			buf |= WUCSR_RFE_WAKE_EN_;
4034 			buf |= WUCSR_STORE_WAKE_;
4035 
4036 			ret = lan78xx_write_reg(dev, WUCSR, buf);
4037 
4038 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4039 
4040 			buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4041 			buf |= PMT_CTL_RES_CLR_WKP_STS_;
4042 
4043 			buf |= PMT_CTL_PHY_WAKE_EN_;
4044 			buf |= PMT_CTL_WOL_EN_;
4045 			buf &= ~PMT_CTL_SUS_MODE_MASK_;
4046 			buf |= PMT_CTL_SUS_MODE_3_;
4047 
4048 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4049 
4050 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4051 
4052 			buf |= PMT_CTL_WUPS_MASK_;
4053 
4054 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4055 
4056 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4057 			buf |= MAC_RX_RXEN_;
4058 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
4059 		} else {
4060 			lan78xx_set_suspend(dev, pdata->wol);
4061 		}
4062 	}
4063 
4064 	ret = 0;
4065 out:
4066 	return ret;
4067 }
4068 
4069 static int lan78xx_resume(struct usb_interface *intf)
4070 {
4071 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4072 	struct sk_buff *skb;
4073 	struct urb *res;
4074 	int ret;
4075 	u32 buf;
4076 
4077 	if (!timer_pending(&dev->stat_monitor)) {
4078 		dev->delta = 1;
4079 		mod_timer(&dev->stat_monitor,
4080 			  jiffies + STAT_UPDATE_TIMER);
4081 	}
4082 
4083 	if (!--dev->suspend_count) {
4084 		/* resume interrupt URBs */
4085 		if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
4086 				usb_submit_urb(dev->urb_intr, GFP_NOIO);
4087 
4088 		spin_lock_irq(&dev->txq.lock);
4089 		while ((res = usb_get_from_anchor(&dev->deferred))) {
4090 			skb = (struct sk_buff *)res->context;
4091 			ret = usb_submit_urb(res, GFP_ATOMIC);
4092 			if (ret < 0) {
4093 				dev_kfree_skb_any(skb);
4094 				usb_free_urb(res);
4095 				usb_autopm_put_interface_async(dev->intf);
4096 			} else {
4097 				netif_trans_update(dev->net);
4098 				lan78xx_queue_skb(&dev->txq, skb, tx_start);
4099 			}
4100 		}
4101 
4102 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4103 		spin_unlock_irq(&dev->txq.lock);
4104 
4105 		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
4106 			if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
4107 				netif_start_queue(dev->net);
4108 			tasklet_schedule(&dev->bh);
4109 		}
4110 	}
4111 
4112 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4113 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4114 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4115 
4116 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4117 					     WUCSR2_ARP_RCD_ |
4118 					     WUCSR2_IPV6_TCPSYN_RCD_ |
4119 					     WUCSR2_IPV4_TCPSYN_RCD_);
4120 
4121 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4122 					    WUCSR_EEE_RX_WAKE_ |
4123 					    WUCSR_PFDA_FR_ |
4124 					    WUCSR_RFE_WAKE_FR_ |
4125 					    WUCSR_WUFR_ |
4126 					    WUCSR_MPR_ |
4127 					    WUCSR_BCST_FR_);
4128 
4129 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4130 	buf |= MAC_TX_TXEN_;
4131 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
4132 
4133 	return 0;
4134 }
4135 
4136 static int lan78xx_reset_resume(struct usb_interface *intf)
4137 {
4138 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4139 
4140 	lan78xx_reset(dev);
4141 
4142 	phy_start(dev->net->phydev);
4143 
4144 	return lan78xx_resume(intf);
4145 }
4146 
4147 static const struct usb_device_id products[] = {
4148 	{
4149 	/* LAN7800 USB Gigabit Ethernet Device */
4150 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4151 	},
4152 	{
4153 	/* LAN7850 USB Gigabit Ethernet Device */
4154 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4155 	},
4156 	{
4157 	/* LAN7801 USB Gigabit Ethernet Device */
4158 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4159 	},
4160 	{},
4161 };
4162 MODULE_DEVICE_TABLE(usb, products);
4163 
4164 static struct usb_driver lan78xx_driver = {
4165 	.name			= DRIVER_NAME,
4166 	.id_table		= products,
4167 	.probe			= lan78xx_probe,
4168 	.disconnect		= lan78xx_disconnect,
4169 	.suspend		= lan78xx_suspend,
4170 	.resume			= lan78xx_resume,
4171 	.reset_resume		= lan78xx_reset_resume,
4172 	.supports_autosuspend	= 1,
4173 	.disable_hub_initiated_lpm = 1,
4174 };
4175 
4176 module_usb_driver(lan78xx_driver);
4177 
4178 MODULE_AUTHOR(DRIVER_AUTHOR);
4179 MODULE_DESCRIPTION(DRIVER_DESC);
4180 MODULE_LICENSE("GPL");
4181