xref: /linux/drivers/net/usb/lan78xx.c (revision 4ab5a5d2a4a2289c2af07accbec7170ca5671f41)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/version.h>
6 #include <linux/module.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/usb.h>
11 #include <linux/crc32.h>
12 #include <linux/signal.h>
13 #include <linux/slab.h>
14 #include <linux/if_vlan.h>
15 #include <linux/uaccess.h>
16 #include <linux/list.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/mdio.h>
20 #include <linux/phy.h>
21 #include <net/ip6_checksum.h>
22 #include <linux/interrupt.h>
23 #include <linux/irqdomain.h>
24 #include <linux/irq.h>
25 #include <linux/irqchip/chained_irq.h>
26 #include <linux/microchipphy.h>
27 #include <linux/phy_fixed.h>
28 #include <linux/of_mdio.h>
29 #include <linux/of_net.h>
30 #include "lan78xx.h"
31 
32 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
33 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
34 #define DRIVER_NAME	"lan78xx"
35 
36 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
37 #define THROTTLE_JIFFIES		(HZ / 8)
38 #define UNLINK_TIMEOUT_MS		3
39 
40 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
41 
42 #define SS_USB_PKT_SIZE			(1024)
43 #define HS_USB_PKT_SIZE			(512)
44 #define FS_USB_PKT_SIZE			(64)
45 
46 #define MAX_RX_FIFO_SIZE		(12 * 1024)
47 #define MAX_TX_FIFO_SIZE		(12 * 1024)
48 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
49 #define DEFAULT_BULK_IN_DELAY		(0x0800)
50 #define MAX_SINGLE_PACKET_SIZE		(9000)
51 #define DEFAULT_TX_CSUM_ENABLE		(true)
52 #define DEFAULT_RX_CSUM_ENABLE		(true)
53 #define DEFAULT_TSO_CSUM_ENABLE		(true)
54 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
55 #define DEFAULT_VLAN_RX_OFFLOAD		(true)
56 #define TX_OVERHEAD			(8)
57 #define RXW_PADDING			2
58 
59 #define LAN78XX_USB_VENDOR_ID		(0x0424)
60 #define LAN7800_USB_PRODUCT_ID		(0x7800)
61 #define LAN7850_USB_PRODUCT_ID		(0x7850)
62 #define LAN7801_USB_PRODUCT_ID		(0x7801)
63 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
64 #define LAN78XX_OTP_MAGIC		(0x78F3)
65 
66 #define	MII_READ			1
67 #define	MII_WRITE			0
68 
69 #define EEPROM_INDICATOR		(0xA5)
70 #define EEPROM_MAC_OFFSET		(0x01)
71 #define MAX_EEPROM_SIZE			512
72 #define OTP_INDICATOR_1			(0xF3)
73 #define OTP_INDICATOR_2			(0xF7)
74 
75 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
76 					 WAKE_MCAST | WAKE_BCAST | \
77 					 WAKE_ARP | WAKE_MAGIC)
78 
79 /* USB related defines */
80 #define BULK_IN_PIPE			1
81 #define BULK_OUT_PIPE			2
82 
83 /* default autosuspend delay (mSec)*/
84 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
85 
86 /* statistic update interval (mSec) */
87 #define STAT_UPDATE_TIMER		(1 * 1000)
88 
89 /* defines interrupts from interrupt EP */
90 #define MAX_INT_EP			(32)
91 #define INT_EP_INTEP			(31)
92 #define INT_EP_OTP_WR_DONE		(28)
93 #define INT_EP_EEE_TX_LPI_START		(26)
94 #define INT_EP_EEE_TX_LPI_STOP		(25)
95 #define INT_EP_EEE_RX_LPI		(24)
96 #define INT_EP_MAC_RESET_TIMEOUT	(23)
97 #define INT_EP_RDFO			(22)
98 #define INT_EP_TXE			(21)
99 #define INT_EP_USB_STATUS		(20)
100 #define INT_EP_TX_DIS			(19)
101 #define INT_EP_RX_DIS			(18)
102 #define INT_EP_PHY			(17)
103 #define INT_EP_DP			(16)
104 #define INT_EP_MAC_ERR			(15)
105 #define INT_EP_TDFU			(14)
106 #define INT_EP_TDFO			(13)
107 #define INT_EP_UTX			(12)
108 #define INT_EP_GPIO_11			(11)
109 #define INT_EP_GPIO_10			(10)
110 #define INT_EP_GPIO_9			(9)
111 #define INT_EP_GPIO_8			(8)
112 #define INT_EP_GPIO_7			(7)
113 #define INT_EP_GPIO_6			(6)
114 #define INT_EP_GPIO_5			(5)
115 #define INT_EP_GPIO_4			(4)
116 #define INT_EP_GPIO_3			(3)
117 #define INT_EP_GPIO_2			(2)
118 #define INT_EP_GPIO_1			(1)
119 #define INT_EP_GPIO_0			(0)
120 
121 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
122 	"RX FCS Errors",
123 	"RX Alignment Errors",
124 	"Rx Fragment Errors",
125 	"RX Jabber Errors",
126 	"RX Undersize Frame Errors",
127 	"RX Oversize Frame Errors",
128 	"RX Dropped Frames",
129 	"RX Unicast Byte Count",
130 	"RX Broadcast Byte Count",
131 	"RX Multicast Byte Count",
132 	"RX Unicast Frames",
133 	"RX Broadcast Frames",
134 	"RX Multicast Frames",
135 	"RX Pause Frames",
136 	"RX 64 Byte Frames",
137 	"RX 65 - 127 Byte Frames",
138 	"RX 128 - 255 Byte Frames",
139 	"RX 256 - 511 Bytes Frames",
140 	"RX 512 - 1023 Byte Frames",
141 	"RX 1024 - 1518 Byte Frames",
142 	"RX Greater 1518 Byte Frames",
143 	"EEE RX LPI Transitions",
144 	"EEE RX LPI Time",
145 	"TX FCS Errors",
146 	"TX Excess Deferral Errors",
147 	"TX Carrier Errors",
148 	"TX Bad Byte Count",
149 	"TX Single Collisions",
150 	"TX Multiple Collisions",
151 	"TX Excessive Collision",
152 	"TX Late Collisions",
153 	"TX Unicast Byte Count",
154 	"TX Broadcast Byte Count",
155 	"TX Multicast Byte Count",
156 	"TX Unicast Frames",
157 	"TX Broadcast Frames",
158 	"TX Multicast Frames",
159 	"TX Pause Frames",
160 	"TX 64 Byte Frames",
161 	"TX 65 - 127 Byte Frames",
162 	"TX 128 - 255 Byte Frames",
163 	"TX 256 - 511 Bytes Frames",
164 	"TX 512 - 1023 Byte Frames",
165 	"TX 1024 - 1518 Byte Frames",
166 	"TX Greater 1518 Byte Frames",
167 	"EEE TX LPI Transitions",
168 	"EEE TX LPI Time",
169 };
170 
171 struct lan78xx_statstage {
172 	u32 rx_fcs_errors;
173 	u32 rx_alignment_errors;
174 	u32 rx_fragment_errors;
175 	u32 rx_jabber_errors;
176 	u32 rx_undersize_frame_errors;
177 	u32 rx_oversize_frame_errors;
178 	u32 rx_dropped_frames;
179 	u32 rx_unicast_byte_count;
180 	u32 rx_broadcast_byte_count;
181 	u32 rx_multicast_byte_count;
182 	u32 rx_unicast_frames;
183 	u32 rx_broadcast_frames;
184 	u32 rx_multicast_frames;
185 	u32 rx_pause_frames;
186 	u32 rx_64_byte_frames;
187 	u32 rx_65_127_byte_frames;
188 	u32 rx_128_255_byte_frames;
189 	u32 rx_256_511_bytes_frames;
190 	u32 rx_512_1023_byte_frames;
191 	u32 rx_1024_1518_byte_frames;
192 	u32 rx_greater_1518_byte_frames;
193 	u32 eee_rx_lpi_transitions;
194 	u32 eee_rx_lpi_time;
195 	u32 tx_fcs_errors;
196 	u32 tx_excess_deferral_errors;
197 	u32 tx_carrier_errors;
198 	u32 tx_bad_byte_count;
199 	u32 tx_single_collisions;
200 	u32 tx_multiple_collisions;
201 	u32 tx_excessive_collision;
202 	u32 tx_late_collisions;
203 	u32 tx_unicast_byte_count;
204 	u32 tx_broadcast_byte_count;
205 	u32 tx_multicast_byte_count;
206 	u32 tx_unicast_frames;
207 	u32 tx_broadcast_frames;
208 	u32 tx_multicast_frames;
209 	u32 tx_pause_frames;
210 	u32 tx_64_byte_frames;
211 	u32 tx_65_127_byte_frames;
212 	u32 tx_128_255_byte_frames;
213 	u32 tx_256_511_bytes_frames;
214 	u32 tx_512_1023_byte_frames;
215 	u32 tx_1024_1518_byte_frames;
216 	u32 tx_greater_1518_byte_frames;
217 	u32 eee_tx_lpi_transitions;
218 	u32 eee_tx_lpi_time;
219 };
220 
221 struct lan78xx_statstage64 {
222 	u64 rx_fcs_errors;
223 	u64 rx_alignment_errors;
224 	u64 rx_fragment_errors;
225 	u64 rx_jabber_errors;
226 	u64 rx_undersize_frame_errors;
227 	u64 rx_oversize_frame_errors;
228 	u64 rx_dropped_frames;
229 	u64 rx_unicast_byte_count;
230 	u64 rx_broadcast_byte_count;
231 	u64 rx_multicast_byte_count;
232 	u64 rx_unicast_frames;
233 	u64 rx_broadcast_frames;
234 	u64 rx_multicast_frames;
235 	u64 rx_pause_frames;
236 	u64 rx_64_byte_frames;
237 	u64 rx_65_127_byte_frames;
238 	u64 rx_128_255_byte_frames;
239 	u64 rx_256_511_bytes_frames;
240 	u64 rx_512_1023_byte_frames;
241 	u64 rx_1024_1518_byte_frames;
242 	u64 rx_greater_1518_byte_frames;
243 	u64 eee_rx_lpi_transitions;
244 	u64 eee_rx_lpi_time;
245 	u64 tx_fcs_errors;
246 	u64 tx_excess_deferral_errors;
247 	u64 tx_carrier_errors;
248 	u64 tx_bad_byte_count;
249 	u64 tx_single_collisions;
250 	u64 tx_multiple_collisions;
251 	u64 tx_excessive_collision;
252 	u64 tx_late_collisions;
253 	u64 tx_unicast_byte_count;
254 	u64 tx_broadcast_byte_count;
255 	u64 tx_multicast_byte_count;
256 	u64 tx_unicast_frames;
257 	u64 tx_broadcast_frames;
258 	u64 tx_multicast_frames;
259 	u64 tx_pause_frames;
260 	u64 tx_64_byte_frames;
261 	u64 tx_65_127_byte_frames;
262 	u64 tx_128_255_byte_frames;
263 	u64 tx_256_511_bytes_frames;
264 	u64 tx_512_1023_byte_frames;
265 	u64 tx_1024_1518_byte_frames;
266 	u64 tx_greater_1518_byte_frames;
267 	u64 eee_tx_lpi_transitions;
268 	u64 eee_tx_lpi_time;
269 };
270 
271 static u32 lan78xx_regs[] = {
272 	ID_REV,
273 	INT_STS,
274 	HW_CFG,
275 	PMT_CTL,
276 	E2P_CMD,
277 	E2P_DATA,
278 	USB_STATUS,
279 	VLAN_TYPE,
280 	MAC_CR,
281 	MAC_RX,
282 	MAC_TX,
283 	FLOW,
284 	ERR_STS,
285 	MII_ACC,
286 	MII_DATA,
287 	EEE_TX_LPI_REQ_DLY,
288 	EEE_TW_TX_SYS,
289 	EEE_TX_LPI_REM_DLY,
290 	WUCSR
291 };
292 
293 #define PHY_REG_SIZE (32 * sizeof(u32))
294 
295 struct lan78xx_net;
296 
297 struct lan78xx_priv {
298 	struct lan78xx_net *dev;
299 	u32 rfe_ctl;
300 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
301 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
302 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
303 	struct mutex dataport_mutex; /* for dataport access */
304 	spinlock_t rfe_ctl_lock; /* for rfe register access */
305 	struct work_struct set_multicast;
306 	struct work_struct set_vlan;
307 	u32 wol;
308 };
309 
310 enum skb_state {
311 	illegal = 0,
312 	tx_start,
313 	tx_done,
314 	rx_start,
315 	rx_done,
316 	rx_cleanup,
317 	unlink_start
318 };
319 
320 struct skb_data {		/* skb->cb is one of these */
321 	struct urb *urb;
322 	struct lan78xx_net *dev;
323 	enum skb_state state;
324 	size_t length;
325 	int num_of_packet;
326 };
327 
328 struct usb_context {
329 	struct usb_ctrlrequest req;
330 	struct lan78xx_net *dev;
331 };
332 
333 #define EVENT_TX_HALT			0
334 #define EVENT_RX_HALT			1
335 #define EVENT_RX_MEMORY			2
336 #define EVENT_STS_SPLIT			3
337 #define EVENT_LINK_RESET		4
338 #define EVENT_RX_PAUSED			5
339 #define EVENT_DEV_WAKING		6
340 #define EVENT_DEV_ASLEEP		7
341 #define EVENT_DEV_OPEN			8
342 #define EVENT_STAT_UPDATE		9
343 
344 struct statstage {
345 	struct mutex			access_lock;	/* for stats access */
346 	struct lan78xx_statstage	saved;
347 	struct lan78xx_statstage	rollover_count;
348 	struct lan78xx_statstage	rollover_max;
349 	struct lan78xx_statstage64	curr_stat;
350 };
351 
352 struct irq_domain_data {
353 	struct irq_domain	*irqdomain;
354 	unsigned int		phyirq;
355 	struct irq_chip		*irqchip;
356 	irq_flow_handler_t	irq_handler;
357 	u32			irqenable;
358 	struct mutex		irq_lock;		/* for irq bus access */
359 };
360 
361 struct lan78xx_net {
362 	struct net_device	*net;
363 	struct usb_device	*udev;
364 	struct usb_interface	*intf;
365 	void			*driver_priv;
366 
367 	int			rx_qlen;
368 	int			tx_qlen;
369 	struct sk_buff_head	rxq;
370 	struct sk_buff_head	txq;
371 	struct sk_buff_head	done;
372 	struct sk_buff_head	rxq_pause;
373 	struct sk_buff_head	txq_pend;
374 
375 	struct tasklet_struct	bh;
376 	struct delayed_work	wq;
377 
378 	struct usb_host_endpoint *ep_blkin;
379 	struct usb_host_endpoint *ep_blkout;
380 	struct usb_host_endpoint *ep_intr;
381 
382 	int			msg_enable;
383 
384 	struct urb		*urb_intr;
385 	struct usb_anchor	deferred;
386 
387 	struct mutex		phy_mutex; /* for phy access */
388 	unsigned		pipe_in, pipe_out, pipe_intr;
389 
390 	u32			hard_mtu;	/* count any extra framing */
391 	size_t			rx_urb_size;	/* size for rx urbs */
392 
393 	unsigned long		flags;
394 
395 	wait_queue_head_t	*wait;
396 	unsigned char		suspend_count;
397 
398 	unsigned		maxpacket;
399 	struct timer_list	delay;
400 	struct timer_list	stat_monitor;
401 
402 	unsigned long		data[5];
403 
404 	int			link_on;
405 	u8			mdix_ctrl;
406 
407 	u32			chipid;
408 	u32			chiprev;
409 	struct mii_bus		*mdiobus;
410 	phy_interface_t		interface;
411 
412 	int			fc_autoneg;
413 	u8			fc_request_control;
414 
415 	int			delta;
416 	struct statstage	stats;
417 
418 	struct irq_domain_data	domain_data;
419 };
420 
421 /* define external phy id */
422 #define	PHY_LAN8835			(0x0007C130)
423 #define	PHY_KSZ9031RNX			(0x00221620)
424 
425 /* use ethtool to change the level for any given device */
426 static int msg_level = -1;
427 module_param(msg_level, int, 0);
428 MODULE_PARM_DESC(msg_level, "Override default message level");
429 
430 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
431 {
432 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
433 	int ret;
434 
435 	if (!buf)
436 		return -ENOMEM;
437 
438 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
439 			      USB_VENDOR_REQUEST_READ_REGISTER,
440 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
441 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
442 	if (likely(ret >= 0)) {
443 		le32_to_cpus(buf);
444 		*data = *buf;
445 	} else {
446 		netdev_warn(dev->net,
447 			    "Failed to read register index 0x%08x. ret = %d",
448 			    index, ret);
449 	}
450 
451 	kfree(buf);
452 
453 	return ret;
454 }
455 
456 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
457 {
458 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
459 	int ret;
460 
461 	if (!buf)
462 		return -ENOMEM;
463 
464 	*buf = data;
465 	cpu_to_le32s(buf);
466 
467 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
468 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
469 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
470 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
471 	if (unlikely(ret < 0)) {
472 		netdev_warn(dev->net,
473 			    "Failed to write register index 0x%08x. ret = %d",
474 			    index, ret);
475 	}
476 
477 	kfree(buf);
478 
479 	return ret;
480 }
481 
482 static int lan78xx_read_stats(struct lan78xx_net *dev,
483 			      struct lan78xx_statstage *data)
484 {
485 	int ret = 0;
486 	int i;
487 	struct lan78xx_statstage *stats;
488 	u32 *src;
489 	u32 *dst;
490 
491 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
492 	if (!stats)
493 		return -ENOMEM;
494 
495 	ret = usb_control_msg(dev->udev,
496 			      usb_rcvctrlpipe(dev->udev, 0),
497 			      USB_VENDOR_REQUEST_GET_STATS,
498 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
499 			      0,
500 			      0,
501 			      (void *)stats,
502 			      sizeof(*stats),
503 			      USB_CTRL_SET_TIMEOUT);
504 	if (likely(ret >= 0)) {
505 		src = (u32 *)stats;
506 		dst = (u32 *)data;
507 		for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
508 			le32_to_cpus(&src[i]);
509 			dst[i] = src[i];
510 		}
511 	} else {
512 		netdev_warn(dev->net,
513 			    "Failed to read stat ret = 0x%x", ret);
514 	}
515 
516 	kfree(stats);
517 
518 	return ret;
519 }
520 
521 #define check_counter_rollover(struct1, dev_stats, member) {	\
522 	if (struct1->member < dev_stats.saved.member)		\
523 		dev_stats.rollover_count.member++;		\
524 	}
525 
526 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
527 					struct lan78xx_statstage *stats)
528 {
529 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
530 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
531 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
532 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
533 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
534 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
535 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
536 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
537 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
538 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
539 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
540 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
541 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
542 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
543 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
544 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
545 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
546 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
547 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
548 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
549 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
550 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
551 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
552 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
553 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
554 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
555 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
556 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
557 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
558 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
559 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
560 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
561 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
562 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
563 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
564 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
565 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
566 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
567 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
568 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
569 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
570 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
571 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
572 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
573 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
574 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
575 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
576 
577 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
578 }
579 
580 static void lan78xx_update_stats(struct lan78xx_net *dev)
581 {
582 	u32 *p, *count, *max;
583 	u64 *data;
584 	int i;
585 	struct lan78xx_statstage lan78xx_stats;
586 
587 	if (usb_autopm_get_interface(dev->intf) < 0)
588 		return;
589 
590 	p = (u32 *)&lan78xx_stats;
591 	count = (u32 *)&dev->stats.rollover_count;
592 	max = (u32 *)&dev->stats.rollover_max;
593 	data = (u64 *)&dev->stats.curr_stat;
594 
595 	mutex_lock(&dev->stats.access_lock);
596 
597 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
598 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
599 
600 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
601 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
602 
603 	mutex_unlock(&dev->stats.access_lock);
604 
605 	usb_autopm_put_interface(dev->intf);
606 }
607 
608 /* Loop until the read is completed with timeout called with phy_mutex held */
609 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
610 {
611 	unsigned long start_time = jiffies;
612 	u32 val;
613 	int ret;
614 
615 	do {
616 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
617 		if (unlikely(ret < 0))
618 			return -EIO;
619 
620 		if (!(val & MII_ACC_MII_BUSY_))
621 			return 0;
622 	} while (!time_after(jiffies, start_time + HZ));
623 
624 	return -EIO;
625 }
626 
627 static inline u32 mii_access(int id, int index, int read)
628 {
629 	u32 ret;
630 
631 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
632 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
633 	if (read)
634 		ret |= MII_ACC_MII_READ_;
635 	else
636 		ret |= MII_ACC_MII_WRITE_;
637 	ret |= MII_ACC_MII_BUSY_;
638 
639 	return ret;
640 }
641 
642 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
643 {
644 	unsigned long start_time = jiffies;
645 	u32 val;
646 	int ret;
647 
648 	do {
649 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
650 		if (unlikely(ret < 0))
651 			return -EIO;
652 
653 		if (!(val & E2P_CMD_EPC_BUSY_) ||
654 		    (val & E2P_CMD_EPC_TIMEOUT_))
655 			break;
656 		usleep_range(40, 100);
657 	} while (!time_after(jiffies, start_time + HZ));
658 
659 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
660 		netdev_warn(dev->net, "EEPROM read operation timeout");
661 		return -EIO;
662 	}
663 
664 	return 0;
665 }
666 
667 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
668 {
669 	unsigned long start_time = jiffies;
670 	u32 val;
671 	int ret;
672 
673 	do {
674 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
675 		if (unlikely(ret < 0))
676 			return -EIO;
677 
678 		if (!(val & E2P_CMD_EPC_BUSY_))
679 			return 0;
680 
681 		usleep_range(40, 100);
682 	} while (!time_after(jiffies, start_time + HZ));
683 
684 	netdev_warn(dev->net, "EEPROM is busy");
685 	return -EIO;
686 }
687 
688 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
689 				   u32 length, u8 *data)
690 {
691 	u32 val;
692 	u32 saved;
693 	int i, ret;
694 	int retval;
695 
696 	/* depends on chip, some EEPROM pins are muxed with LED function.
697 	 * disable & restore LED function to access EEPROM.
698 	 */
699 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
700 	saved = val;
701 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
702 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
703 		ret = lan78xx_write_reg(dev, HW_CFG, val);
704 	}
705 
706 	retval = lan78xx_eeprom_confirm_not_busy(dev);
707 	if (retval)
708 		return retval;
709 
710 	for (i = 0; i < length; i++) {
711 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
712 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
713 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
714 		if (unlikely(ret < 0)) {
715 			retval = -EIO;
716 			goto exit;
717 		}
718 
719 		retval = lan78xx_wait_eeprom(dev);
720 		if (retval < 0)
721 			goto exit;
722 
723 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
724 		if (unlikely(ret < 0)) {
725 			retval = -EIO;
726 			goto exit;
727 		}
728 
729 		data[i] = val & 0xFF;
730 		offset++;
731 	}
732 
733 	retval = 0;
734 exit:
735 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
736 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
737 
738 	return retval;
739 }
740 
741 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
742 			       u32 length, u8 *data)
743 {
744 	u8 sig;
745 	int ret;
746 
747 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
748 	if ((ret == 0) && (sig == EEPROM_INDICATOR))
749 		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
750 	else
751 		ret = -EINVAL;
752 
753 	return ret;
754 }
755 
756 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
757 				    u32 length, u8 *data)
758 {
759 	u32 val;
760 	u32 saved;
761 	int i, ret;
762 	int retval;
763 
764 	/* depends on chip, some EEPROM pins are muxed with LED function.
765 	 * disable & restore LED function to access EEPROM.
766 	 */
767 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
768 	saved = val;
769 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
770 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
771 		ret = lan78xx_write_reg(dev, HW_CFG, val);
772 	}
773 
774 	retval = lan78xx_eeprom_confirm_not_busy(dev);
775 	if (retval)
776 		goto exit;
777 
778 	/* Issue write/erase enable command */
779 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
780 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
781 	if (unlikely(ret < 0)) {
782 		retval = -EIO;
783 		goto exit;
784 	}
785 
786 	retval = lan78xx_wait_eeprom(dev);
787 	if (retval < 0)
788 		goto exit;
789 
790 	for (i = 0; i < length; i++) {
791 		/* Fill data register */
792 		val = data[i];
793 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
794 		if (ret < 0) {
795 			retval = -EIO;
796 			goto exit;
797 		}
798 
799 		/* Send "write" command */
800 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
801 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
802 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
803 		if (ret < 0) {
804 			retval = -EIO;
805 			goto exit;
806 		}
807 
808 		retval = lan78xx_wait_eeprom(dev);
809 		if (retval < 0)
810 			goto exit;
811 
812 		offset++;
813 	}
814 
815 	retval = 0;
816 exit:
817 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
818 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
819 
820 	return retval;
821 }
822 
823 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
824 				u32 length, u8 *data)
825 {
826 	int i;
827 	int ret;
828 	u32 buf;
829 	unsigned long timeout;
830 
831 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
832 
833 	if (buf & OTP_PWR_DN_PWRDN_N_) {
834 		/* clear it and wait to be cleared */
835 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
836 
837 		timeout = jiffies + HZ;
838 		do {
839 			usleep_range(1, 10);
840 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
841 			if (time_after(jiffies, timeout)) {
842 				netdev_warn(dev->net,
843 					    "timeout on OTP_PWR_DN");
844 				return -EIO;
845 			}
846 		} while (buf & OTP_PWR_DN_PWRDN_N_);
847 	}
848 
849 	for (i = 0; i < length; i++) {
850 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
851 					((offset + i) >> 8) & OTP_ADDR1_15_11);
852 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
853 					((offset + i) & OTP_ADDR2_10_3));
854 
855 		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
856 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
857 
858 		timeout = jiffies + HZ;
859 		do {
860 			udelay(1);
861 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
862 			if (time_after(jiffies, timeout)) {
863 				netdev_warn(dev->net,
864 					    "timeout on OTP_STATUS");
865 				return -EIO;
866 			}
867 		} while (buf & OTP_STATUS_BUSY_);
868 
869 		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
870 
871 		data[i] = (u8)(buf & 0xFF);
872 	}
873 
874 	return 0;
875 }
876 
877 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
878 				 u32 length, u8 *data)
879 {
880 	int i;
881 	int ret;
882 	u32 buf;
883 	unsigned long timeout;
884 
885 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
886 
887 	if (buf & OTP_PWR_DN_PWRDN_N_) {
888 		/* clear it and wait to be cleared */
889 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
890 
891 		timeout = jiffies + HZ;
892 		do {
893 			udelay(1);
894 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
895 			if (time_after(jiffies, timeout)) {
896 				netdev_warn(dev->net,
897 					    "timeout on OTP_PWR_DN completion");
898 				return -EIO;
899 			}
900 		} while (buf & OTP_PWR_DN_PWRDN_N_);
901 	}
902 
903 	/* set to BYTE program mode */
904 	ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
905 
906 	for (i = 0; i < length; i++) {
907 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
908 					((offset + i) >> 8) & OTP_ADDR1_15_11);
909 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
910 					((offset + i) & OTP_ADDR2_10_3));
911 		ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
912 		ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
913 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
914 
915 		timeout = jiffies + HZ;
916 		do {
917 			udelay(1);
918 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
919 			if (time_after(jiffies, timeout)) {
920 				netdev_warn(dev->net,
921 					    "Timeout on OTP_STATUS completion");
922 				return -EIO;
923 			}
924 		} while (buf & OTP_STATUS_BUSY_);
925 	}
926 
927 	return 0;
928 }
929 
930 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
931 			    u32 length, u8 *data)
932 {
933 	u8 sig;
934 	int ret;
935 
936 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
937 
938 	if (ret == 0) {
939 		if (sig == OTP_INDICATOR_2)
940 			offset += 0x100;
941 		else if (sig != OTP_INDICATOR_1)
942 			ret = -EINVAL;
943 		if (!ret)
944 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
945 	}
946 
947 	return ret;
948 }
949 
950 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
951 {
952 	int i, ret;
953 
954 	for (i = 0; i < 100; i++) {
955 		u32 dp_sel;
956 
957 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
958 		if (unlikely(ret < 0))
959 			return -EIO;
960 
961 		if (dp_sel & DP_SEL_DPRDY_)
962 			return 0;
963 
964 		usleep_range(40, 100);
965 	}
966 
967 	netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
968 
969 	return -EIO;
970 }
971 
972 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
973 				  u32 addr, u32 length, u32 *buf)
974 {
975 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
976 	u32 dp_sel;
977 	int i, ret;
978 
979 	if (usb_autopm_get_interface(dev->intf) < 0)
980 			return 0;
981 
982 	mutex_lock(&pdata->dataport_mutex);
983 
984 	ret = lan78xx_dataport_wait_not_busy(dev);
985 	if (ret < 0)
986 		goto done;
987 
988 	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
989 
990 	dp_sel &= ~DP_SEL_RSEL_MASK_;
991 	dp_sel |= ram_select;
992 	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
993 
994 	for (i = 0; i < length; i++) {
995 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
996 
997 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
998 
999 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1000 
1001 		ret = lan78xx_dataport_wait_not_busy(dev);
1002 		if (ret < 0)
1003 			goto done;
1004 	}
1005 
1006 done:
1007 	mutex_unlock(&pdata->dataport_mutex);
1008 	usb_autopm_put_interface(dev->intf);
1009 
1010 	return ret;
1011 }
1012 
1013 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1014 				    int index, u8 addr[ETH_ALEN])
1015 {
1016 	u32 temp;
1017 
1018 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1019 		temp = addr[3];
1020 		temp = addr[2] | (temp << 8);
1021 		temp = addr[1] | (temp << 8);
1022 		temp = addr[0] | (temp << 8);
1023 		pdata->pfilter_table[index][1] = temp;
1024 		temp = addr[5];
1025 		temp = addr[4] | (temp << 8);
1026 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1027 		pdata->pfilter_table[index][0] = temp;
1028 	}
1029 }
1030 
1031 /* returns hash bit number for given MAC address */
1032 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1033 {
1034 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1035 }
1036 
1037 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1038 {
1039 	struct lan78xx_priv *pdata =
1040 			container_of(param, struct lan78xx_priv, set_multicast);
1041 	struct lan78xx_net *dev = pdata->dev;
1042 	int i;
1043 	int ret;
1044 
1045 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1046 		  pdata->rfe_ctl);
1047 
1048 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1049 			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1050 
1051 	for (i = 1; i < NUM_OF_MAF; i++) {
1052 		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1053 		ret = lan78xx_write_reg(dev, MAF_LO(i),
1054 					pdata->pfilter_table[i][1]);
1055 		ret = lan78xx_write_reg(dev, MAF_HI(i),
1056 					pdata->pfilter_table[i][0]);
1057 	}
1058 
1059 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1060 }
1061 
1062 static void lan78xx_set_multicast(struct net_device *netdev)
1063 {
1064 	struct lan78xx_net *dev = netdev_priv(netdev);
1065 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1066 	unsigned long flags;
1067 	int i;
1068 
1069 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1070 
1071 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1072 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1073 
1074 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1075 			pdata->mchash_table[i] = 0;
1076 	/* pfilter_table[0] has own HW address */
1077 	for (i = 1; i < NUM_OF_MAF; i++) {
1078 			pdata->pfilter_table[i][0] =
1079 			pdata->pfilter_table[i][1] = 0;
1080 	}
1081 
1082 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1083 
1084 	if (dev->net->flags & IFF_PROMISC) {
1085 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1086 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1087 	} else {
1088 		if (dev->net->flags & IFF_ALLMULTI) {
1089 			netif_dbg(dev, drv, dev->net,
1090 				  "receive all multicast enabled");
1091 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1092 		}
1093 	}
1094 
1095 	if (netdev_mc_count(dev->net)) {
1096 		struct netdev_hw_addr *ha;
1097 		int i;
1098 
1099 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1100 
1101 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1102 
1103 		i = 1;
1104 		netdev_for_each_mc_addr(ha, netdev) {
1105 			/* set first 32 into Perfect Filter */
1106 			if (i < 33) {
1107 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1108 			} else {
1109 				u32 bitnum = lan78xx_hash(ha->addr);
1110 
1111 				pdata->mchash_table[bitnum / 32] |=
1112 							(1 << (bitnum % 32));
1113 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1114 			}
1115 			i++;
1116 		}
1117 	}
1118 
1119 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1120 
1121 	/* defer register writes to a sleepable context */
1122 	schedule_work(&pdata->set_multicast);
1123 }
1124 
1125 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1126 				      u16 lcladv, u16 rmtadv)
1127 {
1128 	u32 flow = 0, fct_flow = 0;
1129 	int ret;
1130 	u8 cap;
1131 
1132 	if (dev->fc_autoneg)
1133 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1134 	else
1135 		cap = dev->fc_request_control;
1136 
1137 	if (cap & FLOW_CTRL_TX)
1138 		flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1139 
1140 	if (cap & FLOW_CTRL_RX)
1141 		flow |= FLOW_CR_RX_FCEN_;
1142 
1143 	if (dev->udev->speed == USB_SPEED_SUPER)
1144 		fct_flow = 0x817;
1145 	else if (dev->udev->speed == USB_SPEED_HIGH)
1146 		fct_flow = 0x211;
1147 
1148 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1149 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1150 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1151 
1152 	ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1153 
1154 	/* threshold value should be set before enabling flow */
1155 	ret = lan78xx_write_reg(dev, FLOW, flow);
1156 
1157 	return 0;
1158 }
1159 
1160 static int lan78xx_link_reset(struct lan78xx_net *dev)
1161 {
1162 	struct phy_device *phydev = dev->net->phydev;
1163 	struct ethtool_link_ksettings ecmd;
1164 	int ladv, radv, ret;
1165 	u32 buf;
1166 
1167 	/* clear LAN78xx interrupt status */
1168 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1169 	if (unlikely(ret < 0))
1170 		return -EIO;
1171 
1172 	phy_read_status(phydev);
1173 
1174 	if (!phydev->link && dev->link_on) {
1175 		dev->link_on = false;
1176 
1177 		/* reset MAC */
1178 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1179 		if (unlikely(ret < 0))
1180 			return -EIO;
1181 		buf |= MAC_CR_RST_;
1182 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1183 		if (unlikely(ret < 0))
1184 			return -EIO;
1185 
1186 		del_timer(&dev->stat_monitor);
1187 	} else if (phydev->link && !dev->link_on) {
1188 		dev->link_on = true;
1189 
1190 		phy_ethtool_ksettings_get(phydev, &ecmd);
1191 
1192 		if (dev->udev->speed == USB_SPEED_SUPER) {
1193 			if (ecmd.base.speed == 1000) {
1194 				/* disable U2 */
1195 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1196 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1197 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1198 				/* enable U1 */
1199 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1200 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1201 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1202 			} else {
1203 				/* enable U1 & U2 */
1204 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1205 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
1206 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1207 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1208 			}
1209 		}
1210 
1211 		ladv = phy_read(phydev, MII_ADVERTISE);
1212 		if (ladv < 0)
1213 			return ladv;
1214 
1215 		radv = phy_read(phydev, MII_LPA);
1216 		if (radv < 0)
1217 			return radv;
1218 
1219 		netif_dbg(dev, link, dev->net,
1220 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1221 			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1222 
1223 		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1224 						 radv);
1225 
1226 		if (!timer_pending(&dev->stat_monitor)) {
1227 			dev->delta = 1;
1228 			mod_timer(&dev->stat_monitor,
1229 				  jiffies + STAT_UPDATE_TIMER);
1230 		}
1231 
1232 		tasklet_schedule(&dev->bh);
1233 	}
1234 
1235 	return ret;
1236 }
1237 
1238 /* some work can't be done in tasklets, so we use keventd
1239  *
1240  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1241  * but tasklet_schedule() doesn't.	hope the failure is rare.
1242  */
1243 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1244 {
1245 	set_bit(work, &dev->flags);
1246 	if (!schedule_delayed_work(&dev->wq, 0))
1247 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1248 }
1249 
1250 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1251 {
1252 	u32 intdata;
1253 
1254 	if (urb->actual_length != 4) {
1255 		netdev_warn(dev->net,
1256 			    "unexpected urb length %d", urb->actual_length);
1257 		return;
1258 	}
1259 
1260 	memcpy(&intdata, urb->transfer_buffer, 4);
1261 	le32_to_cpus(&intdata);
1262 
1263 	if (intdata & INT_ENP_PHY_INT) {
1264 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1265 		lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1266 
1267 		if (dev->domain_data.phyirq > 0)
1268 			generic_handle_irq(dev->domain_data.phyirq);
1269 	} else
1270 		netdev_warn(dev->net,
1271 			    "unexpected interrupt: 0x%08x\n", intdata);
1272 }
1273 
1274 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1275 {
1276 	return MAX_EEPROM_SIZE;
1277 }
1278 
1279 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1280 				      struct ethtool_eeprom *ee, u8 *data)
1281 {
1282 	struct lan78xx_net *dev = netdev_priv(netdev);
1283 	int ret;
1284 
1285 	ret = usb_autopm_get_interface(dev->intf);
1286 	if (ret)
1287 		return ret;
1288 
1289 	ee->magic = LAN78XX_EEPROM_MAGIC;
1290 
1291 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1292 
1293 	usb_autopm_put_interface(dev->intf);
1294 
1295 	return ret;
1296 }
1297 
1298 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1299 				      struct ethtool_eeprom *ee, u8 *data)
1300 {
1301 	struct lan78xx_net *dev = netdev_priv(netdev);
1302 	int ret;
1303 
1304 	ret = usb_autopm_get_interface(dev->intf);
1305 	if (ret)
1306 		return ret;
1307 
1308 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1309 	 * to load data from EEPROM
1310 	 */
1311 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1312 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1313 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1314 		 (ee->offset == 0) &&
1315 		 (ee->len == 512) &&
1316 		 (data[0] == OTP_INDICATOR_1))
1317 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1318 
1319 	usb_autopm_put_interface(dev->intf);
1320 
1321 	return ret;
1322 }
1323 
1324 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1325 				u8 *data)
1326 {
1327 	if (stringset == ETH_SS_STATS)
1328 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1329 }
1330 
1331 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1332 {
1333 	if (sset == ETH_SS_STATS)
1334 		return ARRAY_SIZE(lan78xx_gstrings);
1335 	else
1336 		return -EOPNOTSUPP;
1337 }
1338 
1339 static void lan78xx_get_stats(struct net_device *netdev,
1340 			      struct ethtool_stats *stats, u64 *data)
1341 {
1342 	struct lan78xx_net *dev = netdev_priv(netdev);
1343 
1344 	lan78xx_update_stats(dev);
1345 
1346 	mutex_lock(&dev->stats.access_lock);
1347 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1348 	mutex_unlock(&dev->stats.access_lock);
1349 }
1350 
1351 static void lan78xx_get_wol(struct net_device *netdev,
1352 			    struct ethtool_wolinfo *wol)
1353 {
1354 	struct lan78xx_net *dev = netdev_priv(netdev);
1355 	int ret;
1356 	u32 buf;
1357 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1358 
1359 	if (usb_autopm_get_interface(dev->intf) < 0)
1360 			return;
1361 
1362 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1363 	if (unlikely(ret < 0)) {
1364 		wol->supported = 0;
1365 		wol->wolopts = 0;
1366 	} else {
1367 		if (buf & USB_CFG_RMT_WKP_) {
1368 			wol->supported = WAKE_ALL;
1369 			wol->wolopts = pdata->wol;
1370 		} else {
1371 			wol->supported = 0;
1372 			wol->wolopts = 0;
1373 		}
1374 	}
1375 
1376 	usb_autopm_put_interface(dev->intf);
1377 }
1378 
1379 static int lan78xx_set_wol(struct net_device *netdev,
1380 			   struct ethtool_wolinfo *wol)
1381 {
1382 	struct lan78xx_net *dev = netdev_priv(netdev);
1383 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1384 	int ret;
1385 
1386 	ret = usb_autopm_get_interface(dev->intf);
1387 	if (ret < 0)
1388 		return ret;
1389 
1390 	if (wol->wolopts & ~WAKE_ALL)
1391 		return -EINVAL;
1392 
1393 	pdata->wol = wol->wolopts;
1394 
1395 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1396 
1397 	phy_ethtool_set_wol(netdev->phydev, wol);
1398 
1399 	usb_autopm_put_interface(dev->intf);
1400 
1401 	return ret;
1402 }
1403 
1404 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1405 {
1406 	struct lan78xx_net *dev = netdev_priv(net);
1407 	struct phy_device *phydev = net->phydev;
1408 	int ret;
1409 	u32 buf;
1410 
1411 	ret = usb_autopm_get_interface(dev->intf);
1412 	if (ret < 0)
1413 		return ret;
1414 
1415 	ret = phy_ethtool_get_eee(phydev, edata);
1416 	if (ret < 0)
1417 		goto exit;
1418 
1419 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1420 	if (buf & MAC_CR_EEE_EN_) {
1421 		edata->eee_enabled = true;
1422 		edata->eee_active = !!(edata->advertised &
1423 				       edata->lp_advertised);
1424 		edata->tx_lpi_enabled = true;
1425 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1426 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1427 		edata->tx_lpi_timer = buf;
1428 	} else {
1429 		edata->eee_enabled = false;
1430 		edata->eee_active = false;
1431 		edata->tx_lpi_enabled = false;
1432 		edata->tx_lpi_timer = 0;
1433 	}
1434 
1435 	ret = 0;
1436 exit:
1437 	usb_autopm_put_interface(dev->intf);
1438 
1439 	return ret;
1440 }
1441 
1442 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1443 {
1444 	struct lan78xx_net *dev = netdev_priv(net);
1445 	int ret;
1446 	u32 buf;
1447 
1448 	ret = usb_autopm_get_interface(dev->intf);
1449 	if (ret < 0)
1450 		return ret;
1451 
1452 	if (edata->eee_enabled) {
1453 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1454 		buf |= MAC_CR_EEE_EN_;
1455 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1456 
1457 		phy_ethtool_set_eee(net->phydev, edata);
1458 
1459 		buf = (u32)edata->tx_lpi_timer;
1460 		ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1461 	} else {
1462 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1463 		buf &= ~MAC_CR_EEE_EN_;
1464 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1465 	}
1466 
1467 	usb_autopm_put_interface(dev->intf);
1468 
1469 	return 0;
1470 }
1471 
1472 static u32 lan78xx_get_link(struct net_device *net)
1473 {
1474 	phy_read_status(net->phydev);
1475 
1476 	return net->phydev->link;
1477 }
1478 
1479 static void lan78xx_get_drvinfo(struct net_device *net,
1480 				struct ethtool_drvinfo *info)
1481 {
1482 	struct lan78xx_net *dev = netdev_priv(net);
1483 
1484 	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1485 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1486 }
1487 
1488 static u32 lan78xx_get_msglevel(struct net_device *net)
1489 {
1490 	struct lan78xx_net *dev = netdev_priv(net);
1491 
1492 	return dev->msg_enable;
1493 }
1494 
1495 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1496 {
1497 	struct lan78xx_net *dev = netdev_priv(net);
1498 
1499 	dev->msg_enable = level;
1500 }
1501 
1502 static int lan78xx_get_link_ksettings(struct net_device *net,
1503 				      struct ethtool_link_ksettings *cmd)
1504 {
1505 	struct lan78xx_net *dev = netdev_priv(net);
1506 	struct phy_device *phydev = net->phydev;
1507 	int ret;
1508 
1509 	ret = usb_autopm_get_interface(dev->intf);
1510 	if (ret < 0)
1511 		return ret;
1512 
1513 	phy_ethtool_ksettings_get(phydev, cmd);
1514 
1515 	usb_autopm_put_interface(dev->intf);
1516 
1517 	return ret;
1518 }
1519 
1520 static int lan78xx_set_link_ksettings(struct net_device *net,
1521 				      const struct ethtool_link_ksettings *cmd)
1522 {
1523 	struct lan78xx_net *dev = netdev_priv(net);
1524 	struct phy_device *phydev = net->phydev;
1525 	int ret = 0;
1526 	int temp;
1527 
1528 	ret = usb_autopm_get_interface(dev->intf);
1529 	if (ret < 0)
1530 		return ret;
1531 
1532 	/* change speed & duplex */
1533 	ret = phy_ethtool_ksettings_set(phydev, cmd);
1534 
1535 	if (!cmd->base.autoneg) {
1536 		/* force link down */
1537 		temp = phy_read(phydev, MII_BMCR);
1538 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1539 		mdelay(1);
1540 		phy_write(phydev, MII_BMCR, temp);
1541 	}
1542 
1543 	usb_autopm_put_interface(dev->intf);
1544 
1545 	return ret;
1546 }
1547 
1548 static void lan78xx_get_pause(struct net_device *net,
1549 			      struct ethtool_pauseparam *pause)
1550 {
1551 	struct lan78xx_net *dev = netdev_priv(net);
1552 	struct phy_device *phydev = net->phydev;
1553 	struct ethtool_link_ksettings ecmd;
1554 
1555 	phy_ethtool_ksettings_get(phydev, &ecmd);
1556 
1557 	pause->autoneg = dev->fc_autoneg;
1558 
1559 	if (dev->fc_request_control & FLOW_CTRL_TX)
1560 		pause->tx_pause = 1;
1561 
1562 	if (dev->fc_request_control & FLOW_CTRL_RX)
1563 		pause->rx_pause = 1;
1564 }
1565 
1566 static int lan78xx_set_pause(struct net_device *net,
1567 			     struct ethtool_pauseparam *pause)
1568 {
1569 	struct lan78xx_net *dev = netdev_priv(net);
1570 	struct phy_device *phydev = net->phydev;
1571 	struct ethtool_link_ksettings ecmd;
1572 	int ret;
1573 
1574 	phy_ethtool_ksettings_get(phydev, &ecmd);
1575 
1576 	if (pause->autoneg && !ecmd.base.autoneg) {
1577 		ret = -EINVAL;
1578 		goto exit;
1579 	}
1580 
1581 	dev->fc_request_control = 0;
1582 	if (pause->rx_pause)
1583 		dev->fc_request_control |= FLOW_CTRL_RX;
1584 
1585 	if (pause->tx_pause)
1586 		dev->fc_request_control |= FLOW_CTRL_TX;
1587 
1588 	if (ecmd.base.autoneg) {
1589 		u32 mii_adv;
1590 		u32 advertising;
1591 
1592 		ethtool_convert_link_mode_to_legacy_u32(
1593 			&advertising, ecmd.link_modes.advertising);
1594 
1595 		advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1596 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1597 		advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1598 
1599 		ethtool_convert_legacy_u32_to_link_mode(
1600 			ecmd.link_modes.advertising, advertising);
1601 
1602 		phy_ethtool_ksettings_set(phydev, &ecmd);
1603 	}
1604 
1605 	dev->fc_autoneg = pause->autoneg;
1606 
1607 	ret = 0;
1608 exit:
1609 	return ret;
1610 }
1611 
1612 static int lan78xx_get_regs_len(struct net_device *netdev)
1613 {
1614 	if (!netdev->phydev)
1615 		return (sizeof(lan78xx_regs));
1616 	else
1617 		return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1618 }
1619 
1620 static void
1621 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1622 		 void *buf)
1623 {
1624 	u32 *data = buf;
1625 	int i, j;
1626 	struct lan78xx_net *dev = netdev_priv(netdev);
1627 
1628 	/* Read Device/MAC registers */
1629 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1630 		lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1631 
1632 	if (!netdev->phydev)
1633 		return;
1634 
1635 	/* Read PHY registers */
1636 	for (j = 0; j < 32; i++, j++)
1637 		data[i] = phy_read(netdev->phydev, j);
1638 }
1639 
1640 static const struct ethtool_ops lan78xx_ethtool_ops = {
1641 	.get_link	= lan78xx_get_link,
1642 	.nway_reset	= phy_ethtool_nway_reset,
1643 	.get_drvinfo	= lan78xx_get_drvinfo,
1644 	.get_msglevel	= lan78xx_get_msglevel,
1645 	.set_msglevel	= lan78xx_set_msglevel,
1646 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1647 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1648 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1649 	.get_ethtool_stats = lan78xx_get_stats,
1650 	.get_sset_count = lan78xx_get_sset_count,
1651 	.get_strings	= lan78xx_get_strings,
1652 	.get_wol	= lan78xx_get_wol,
1653 	.set_wol	= lan78xx_set_wol,
1654 	.get_eee	= lan78xx_get_eee,
1655 	.set_eee	= lan78xx_set_eee,
1656 	.get_pauseparam	= lan78xx_get_pause,
1657 	.set_pauseparam	= lan78xx_set_pause,
1658 	.get_link_ksettings = lan78xx_get_link_ksettings,
1659 	.set_link_ksettings = lan78xx_set_link_ksettings,
1660 	.get_regs_len	= lan78xx_get_regs_len,
1661 	.get_regs	= lan78xx_get_regs,
1662 };
1663 
1664 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1665 {
1666 	if (!netif_running(netdev))
1667 		return -EINVAL;
1668 
1669 	return phy_mii_ioctl(netdev->phydev, rq, cmd);
1670 }
1671 
1672 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1673 {
1674 	u32 addr_lo, addr_hi;
1675 	int ret;
1676 	u8 addr[6];
1677 
1678 	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1679 	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1680 
1681 	addr[0] = addr_lo & 0xFF;
1682 	addr[1] = (addr_lo >> 8) & 0xFF;
1683 	addr[2] = (addr_lo >> 16) & 0xFF;
1684 	addr[3] = (addr_lo >> 24) & 0xFF;
1685 	addr[4] = addr_hi & 0xFF;
1686 	addr[5] = (addr_hi >> 8) & 0xFF;
1687 
1688 	if (!is_valid_ether_addr(addr)) {
1689 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1690 			/* valid address present in Device Tree */
1691 			netif_dbg(dev, ifup, dev->net,
1692 				  "MAC address read from Device Tree");
1693 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1694 						 ETH_ALEN, addr) == 0) ||
1695 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1696 					      ETH_ALEN, addr) == 0)) &&
1697 			   is_valid_ether_addr(addr)) {
1698 			/* eeprom values are valid so use them */
1699 			netif_dbg(dev, ifup, dev->net,
1700 				  "MAC address read from EEPROM");
1701 		} else {
1702 			/* generate random MAC */
1703 			eth_random_addr(addr);
1704 			netif_dbg(dev, ifup, dev->net,
1705 				  "MAC address set to random addr");
1706 		}
1707 
1708 		addr_lo = addr[0] | (addr[1] << 8) |
1709 			  (addr[2] << 16) | (addr[3] << 24);
1710 		addr_hi = addr[4] | (addr[5] << 8);
1711 
1712 		ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1713 		ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1714 	}
1715 
1716 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1717 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1718 
1719 	ether_addr_copy(dev->net->dev_addr, addr);
1720 }
1721 
1722 /* MDIO read and write wrappers for phylib */
1723 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1724 {
1725 	struct lan78xx_net *dev = bus->priv;
1726 	u32 val, addr;
1727 	int ret;
1728 
1729 	ret = usb_autopm_get_interface(dev->intf);
1730 	if (ret < 0)
1731 		return ret;
1732 
1733 	mutex_lock(&dev->phy_mutex);
1734 
1735 	/* confirm MII not busy */
1736 	ret = lan78xx_phy_wait_not_busy(dev);
1737 	if (ret < 0)
1738 		goto done;
1739 
1740 	/* set the address, index & direction (read from PHY) */
1741 	addr = mii_access(phy_id, idx, MII_READ);
1742 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1743 
1744 	ret = lan78xx_phy_wait_not_busy(dev);
1745 	if (ret < 0)
1746 		goto done;
1747 
1748 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
1749 
1750 	ret = (int)(val & 0xFFFF);
1751 
1752 done:
1753 	mutex_unlock(&dev->phy_mutex);
1754 	usb_autopm_put_interface(dev->intf);
1755 
1756 	return ret;
1757 }
1758 
1759 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1760 				 u16 regval)
1761 {
1762 	struct lan78xx_net *dev = bus->priv;
1763 	u32 val, addr;
1764 	int ret;
1765 
1766 	ret = usb_autopm_get_interface(dev->intf);
1767 	if (ret < 0)
1768 		return ret;
1769 
1770 	mutex_lock(&dev->phy_mutex);
1771 
1772 	/* confirm MII not busy */
1773 	ret = lan78xx_phy_wait_not_busy(dev);
1774 	if (ret < 0)
1775 		goto done;
1776 
1777 	val = (u32)regval;
1778 	ret = lan78xx_write_reg(dev, MII_DATA, val);
1779 
1780 	/* set the address, index & direction (write to PHY) */
1781 	addr = mii_access(phy_id, idx, MII_WRITE);
1782 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1783 
1784 	ret = lan78xx_phy_wait_not_busy(dev);
1785 	if (ret < 0)
1786 		goto done;
1787 
1788 done:
1789 	mutex_unlock(&dev->phy_mutex);
1790 	usb_autopm_put_interface(dev->intf);
1791 	return 0;
1792 }
1793 
1794 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1795 {
1796 	struct device_node *node;
1797 	int ret;
1798 
1799 	dev->mdiobus = mdiobus_alloc();
1800 	if (!dev->mdiobus) {
1801 		netdev_err(dev->net, "can't allocate MDIO bus\n");
1802 		return -ENOMEM;
1803 	}
1804 
1805 	dev->mdiobus->priv = (void *)dev;
1806 	dev->mdiobus->read = lan78xx_mdiobus_read;
1807 	dev->mdiobus->write = lan78xx_mdiobus_write;
1808 	dev->mdiobus->name = "lan78xx-mdiobus";
1809 
1810 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1811 		 dev->udev->bus->busnum, dev->udev->devnum);
1812 
1813 	switch (dev->chipid) {
1814 	case ID_REV_CHIP_ID_7800_:
1815 	case ID_REV_CHIP_ID_7850_:
1816 		/* set to internal PHY id */
1817 		dev->mdiobus->phy_mask = ~(1 << 1);
1818 		break;
1819 	case ID_REV_CHIP_ID_7801_:
1820 		/* scan thru PHYAD[2..0] */
1821 		dev->mdiobus->phy_mask = ~(0xFF);
1822 		break;
1823 	}
1824 
1825 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1826 	ret = of_mdiobus_register(dev->mdiobus, node);
1827 	of_node_put(node);
1828 	if (ret) {
1829 		netdev_err(dev->net, "can't register MDIO bus\n");
1830 		goto exit1;
1831 	}
1832 
1833 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1834 	return 0;
1835 exit1:
1836 	mdiobus_free(dev->mdiobus);
1837 	return ret;
1838 }
1839 
1840 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1841 {
1842 	mdiobus_unregister(dev->mdiobus);
1843 	mdiobus_free(dev->mdiobus);
1844 }
1845 
1846 static void lan78xx_link_status_change(struct net_device *net)
1847 {
1848 	struct phy_device *phydev = net->phydev;
1849 	int ret, temp;
1850 
1851 	/* At forced 100 F/H mode, chip may fail to set mode correctly
1852 	 * when cable is switched between long(~50+m) and short one.
1853 	 * As workaround, set to 10 before setting to 100
1854 	 * at forced 100 F/H mode.
1855 	 */
1856 	if (!phydev->autoneg && (phydev->speed == 100)) {
1857 		/* disable phy interrupt */
1858 		temp = phy_read(phydev, LAN88XX_INT_MASK);
1859 		temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1860 		ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1861 
1862 		temp = phy_read(phydev, MII_BMCR);
1863 		temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1864 		phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1865 		temp |= BMCR_SPEED100;
1866 		phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1867 
1868 		/* clear pending interrupt generated while workaround */
1869 		temp = phy_read(phydev, LAN88XX_INT_STS);
1870 
1871 		/* enable phy interrupt back */
1872 		temp = phy_read(phydev, LAN88XX_INT_MASK);
1873 		temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1874 		ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1875 	}
1876 }
1877 
1878 static int irq_map(struct irq_domain *d, unsigned int irq,
1879 		   irq_hw_number_t hwirq)
1880 {
1881 	struct irq_domain_data *data = d->host_data;
1882 
1883 	irq_set_chip_data(irq, data);
1884 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1885 	irq_set_noprobe(irq);
1886 
1887 	return 0;
1888 }
1889 
1890 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1891 {
1892 	irq_set_chip_and_handler(irq, NULL, NULL);
1893 	irq_set_chip_data(irq, NULL);
1894 }
1895 
1896 static const struct irq_domain_ops chip_domain_ops = {
1897 	.map	= irq_map,
1898 	.unmap	= irq_unmap,
1899 };
1900 
1901 static void lan78xx_irq_mask(struct irq_data *irqd)
1902 {
1903 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1904 
1905 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1906 }
1907 
1908 static void lan78xx_irq_unmask(struct irq_data *irqd)
1909 {
1910 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1911 
1912 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
1913 }
1914 
1915 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1916 {
1917 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1918 
1919 	mutex_lock(&data->irq_lock);
1920 }
1921 
1922 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1923 {
1924 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1925 	struct lan78xx_net *dev =
1926 			container_of(data, struct lan78xx_net, domain_data);
1927 	u32 buf;
1928 	int ret;
1929 
1930 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
1931 	 * are only two callbacks executed in non-atomic contex.
1932 	 */
1933 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1934 	if (buf != data->irqenable)
1935 		ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1936 
1937 	mutex_unlock(&data->irq_lock);
1938 }
1939 
1940 static struct irq_chip lan78xx_irqchip = {
1941 	.name			= "lan78xx-irqs",
1942 	.irq_mask		= lan78xx_irq_mask,
1943 	.irq_unmask		= lan78xx_irq_unmask,
1944 	.irq_bus_lock		= lan78xx_irq_bus_lock,
1945 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
1946 };
1947 
1948 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1949 {
1950 	struct device_node *of_node;
1951 	struct irq_domain *irqdomain;
1952 	unsigned int irqmap = 0;
1953 	u32 buf;
1954 	int ret = 0;
1955 
1956 	of_node = dev->udev->dev.parent->of_node;
1957 
1958 	mutex_init(&dev->domain_data.irq_lock);
1959 
1960 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1961 	dev->domain_data.irqenable = buf;
1962 
1963 	dev->domain_data.irqchip = &lan78xx_irqchip;
1964 	dev->domain_data.irq_handler = handle_simple_irq;
1965 
1966 	irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1967 					  &chip_domain_ops, &dev->domain_data);
1968 	if (irqdomain) {
1969 		/* create mapping for PHY interrupt */
1970 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1971 		if (!irqmap) {
1972 			irq_domain_remove(irqdomain);
1973 
1974 			irqdomain = NULL;
1975 			ret = -EINVAL;
1976 		}
1977 	} else {
1978 		ret = -EINVAL;
1979 	}
1980 
1981 	dev->domain_data.irqdomain = irqdomain;
1982 	dev->domain_data.phyirq = irqmap;
1983 
1984 	return ret;
1985 }
1986 
1987 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1988 {
1989 	if (dev->domain_data.phyirq > 0) {
1990 		irq_dispose_mapping(dev->domain_data.phyirq);
1991 
1992 		if (dev->domain_data.irqdomain)
1993 			irq_domain_remove(dev->domain_data.irqdomain);
1994 	}
1995 	dev->domain_data.phyirq = 0;
1996 	dev->domain_data.irqdomain = NULL;
1997 }
1998 
1999 static int lan8835_fixup(struct phy_device *phydev)
2000 {
2001 	int buf;
2002 	int ret;
2003 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2004 
2005 	/* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2006 	buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2007 	buf &= ~0x1800;
2008 	buf |= 0x0800;
2009 	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2010 
2011 	/* RGMII MAC TXC Delay Enable */
2012 	ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2013 				MAC_RGMII_ID_TXC_DELAY_EN_);
2014 
2015 	/* RGMII TX DLL Tune Adjust */
2016 	ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2017 
2018 	dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2019 
2020 	return 1;
2021 }
2022 
2023 static int ksz9031rnx_fixup(struct phy_device *phydev)
2024 {
2025 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2026 
2027 	/* Micrel9301RNX PHY configuration */
2028 	/* RGMII Control Signal Pad Skew */
2029 	phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2030 	/* RGMII RX Data Pad Skew */
2031 	phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2032 	/* RGMII RX Clock Pad Skew */
2033 	phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2034 
2035 	dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2036 
2037 	return 1;
2038 }
2039 
2040 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2041 {
2042 	u32 buf;
2043 	int ret;
2044 	struct fixed_phy_status fphy_status = {
2045 		.link = 1,
2046 		.speed = SPEED_1000,
2047 		.duplex = DUPLEX_FULL,
2048 	};
2049 	struct phy_device *phydev;
2050 
2051 	phydev = phy_find_first(dev->mdiobus);
2052 	if (!phydev) {
2053 		netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2054 		phydev = fixed_phy_register(PHY_POLL, &fphy_status, -1,
2055 					    NULL);
2056 		if (IS_ERR(phydev)) {
2057 			netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2058 			return NULL;
2059 		}
2060 		netdev_dbg(dev->net, "Registered FIXED PHY\n");
2061 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2062 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2063 					MAC_RGMII_ID_TXC_DELAY_EN_);
2064 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2065 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2066 		buf |= HW_CFG_CLK125_EN_;
2067 		buf |= HW_CFG_REFCLK25_EN_;
2068 		ret = lan78xx_write_reg(dev, HW_CFG, buf);
2069 	} else {
2070 		if (!phydev->drv) {
2071 			netdev_err(dev->net, "no PHY driver found\n");
2072 			return NULL;
2073 		}
2074 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2075 		/* external PHY fixup for KSZ9031RNX */
2076 		ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2077 						 ksz9031rnx_fixup);
2078 		if (ret < 0) {
2079 			netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2080 			return NULL;
2081 		}
2082 		/* external PHY fixup for LAN8835 */
2083 		ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2084 						 lan8835_fixup);
2085 		if (ret < 0) {
2086 			netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2087 			return NULL;
2088 		}
2089 		/* add more external PHY fixup here if needed */
2090 
2091 		phydev->is_internal = false;
2092 	}
2093 	return phydev;
2094 }
2095 
2096 static int lan78xx_phy_init(struct lan78xx_net *dev)
2097 {
2098 	int ret;
2099 	u32 mii_adv;
2100 	struct phy_device *phydev;
2101 
2102 	switch (dev->chipid) {
2103 	case ID_REV_CHIP_ID_7801_:
2104 		phydev = lan7801_phy_init(dev);
2105 		if (!phydev) {
2106 			netdev_err(dev->net, "lan7801: PHY Init Failed");
2107 			return -EIO;
2108 		}
2109 		break;
2110 
2111 	case ID_REV_CHIP_ID_7800_:
2112 	case ID_REV_CHIP_ID_7850_:
2113 		phydev = phy_find_first(dev->mdiobus);
2114 		if (!phydev) {
2115 			netdev_err(dev->net, "no PHY found\n");
2116 			return -EIO;
2117 		}
2118 		phydev->is_internal = true;
2119 		dev->interface = PHY_INTERFACE_MODE_GMII;
2120 		break;
2121 
2122 	default:
2123 		netdev_err(dev->net, "Unknown CHIP ID found\n");
2124 		return -EIO;
2125 	}
2126 
2127 	/* if phyirq is not set, use polling mode in phylib */
2128 	if (dev->domain_data.phyirq > 0)
2129 		phydev->irq = dev->domain_data.phyirq;
2130 	else
2131 		phydev->irq = 0;
2132 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2133 
2134 	/* set to AUTOMDIX */
2135 	phydev->mdix = ETH_TP_MDI_AUTO;
2136 
2137 	ret = phy_connect_direct(dev->net, phydev,
2138 				 lan78xx_link_status_change,
2139 				 dev->interface);
2140 	if (ret) {
2141 		netdev_err(dev->net, "can't attach PHY to %s\n",
2142 			   dev->mdiobus->id);
2143 		if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2144 			if (phy_is_pseudo_fixed_link(phydev)) {
2145 				fixed_phy_unregister(phydev);
2146 			} else {
2147 				phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2148 							     0xfffffff0);
2149 				phy_unregister_fixup_for_uid(PHY_LAN8835,
2150 							     0xfffffff0);
2151 			}
2152 		}
2153 		return -EIO;
2154 	}
2155 
2156 	/* MAC doesn't support 1000T Half */
2157 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2158 
2159 	/* support both flow controls */
2160 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2161 	phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2162 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2163 	phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2164 
2165 	if (phydev->mdio.dev.of_node) {
2166 		u32 reg;
2167 		int len;
2168 
2169 		len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2170 						      "microchip,led-modes",
2171 						      sizeof(u32));
2172 		if (len >= 0) {
2173 			/* Ensure the appropriate LEDs are enabled */
2174 			lan78xx_read_reg(dev, HW_CFG, &reg);
2175 			reg &= ~(HW_CFG_LED0_EN_ |
2176 				 HW_CFG_LED1_EN_ |
2177 				 HW_CFG_LED2_EN_ |
2178 				 HW_CFG_LED3_EN_);
2179 			reg |= (len > 0) * HW_CFG_LED0_EN_ |
2180 				(len > 1) * HW_CFG_LED1_EN_ |
2181 				(len > 2) * HW_CFG_LED2_EN_ |
2182 				(len > 3) * HW_CFG_LED3_EN_;
2183 			lan78xx_write_reg(dev, HW_CFG, reg);
2184 		}
2185 	}
2186 
2187 	genphy_config_aneg(phydev);
2188 
2189 	dev->fc_autoneg = phydev->autoneg;
2190 
2191 	return 0;
2192 }
2193 
2194 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2195 {
2196 	int ret = 0;
2197 	u32 buf;
2198 	bool rxenabled;
2199 
2200 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2201 
2202 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2203 
2204 	if (rxenabled) {
2205 		buf &= ~MAC_RX_RXEN_;
2206 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2207 	}
2208 
2209 	/* add 4 to size for FCS */
2210 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2211 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2212 
2213 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2214 
2215 	if (rxenabled) {
2216 		buf |= MAC_RX_RXEN_;
2217 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2218 	}
2219 
2220 	return 0;
2221 }
2222 
2223 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2224 {
2225 	struct sk_buff *skb;
2226 	unsigned long flags;
2227 	int count = 0;
2228 
2229 	spin_lock_irqsave(&q->lock, flags);
2230 	while (!skb_queue_empty(q)) {
2231 		struct skb_data	*entry;
2232 		struct urb *urb;
2233 		int ret;
2234 
2235 		skb_queue_walk(q, skb) {
2236 			entry = (struct skb_data *)skb->cb;
2237 			if (entry->state != unlink_start)
2238 				goto found;
2239 		}
2240 		break;
2241 found:
2242 		entry->state = unlink_start;
2243 		urb = entry->urb;
2244 
2245 		/* Get reference count of the URB to avoid it to be
2246 		 * freed during usb_unlink_urb, which may trigger
2247 		 * use-after-free problem inside usb_unlink_urb since
2248 		 * usb_unlink_urb is always racing with .complete
2249 		 * handler(include defer_bh).
2250 		 */
2251 		usb_get_urb(urb);
2252 		spin_unlock_irqrestore(&q->lock, flags);
2253 		/* during some PM-driven resume scenarios,
2254 		 * these (async) unlinks complete immediately
2255 		 */
2256 		ret = usb_unlink_urb(urb);
2257 		if (ret != -EINPROGRESS && ret != 0)
2258 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2259 		else
2260 			count++;
2261 		usb_put_urb(urb);
2262 		spin_lock_irqsave(&q->lock, flags);
2263 	}
2264 	spin_unlock_irqrestore(&q->lock, flags);
2265 	return count;
2266 }
2267 
2268 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2269 {
2270 	struct lan78xx_net *dev = netdev_priv(netdev);
2271 	int ll_mtu = new_mtu + netdev->hard_header_len;
2272 	int old_hard_mtu = dev->hard_mtu;
2273 	int old_rx_urb_size = dev->rx_urb_size;
2274 	int ret;
2275 
2276 	/* no second zero-length packet read wanted after mtu-sized packets */
2277 	if ((ll_mtu % dev->maxpacket) == 0)
2278 		return -EDOM;
2279 
2280 	ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2281 
2282 	netdev->mtu = new_mtu;
2283 
2284 	dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2285 	if (dev->rx_urb_size == old_hard_mtu) {
2286 		dev->rx_urb_size = dev->hard_mtu;
2287 		if (dev->rx_urb_size > old_rx_urb_size) {
2288 			if (netif_running(dev->net)) {
2289 				unlink_urbs(dev, &dev->rxq);
2290 				tasklet_schedule(&dev->bh);
2291 			}
2292 		}
2293 	}
2294 
2295 	return 0;
2296 }
2297 
2298 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2299 {
2300 	struct lan78xx_net *dev = netdev_priv(netdev);
2301 	struct sockaddr *addr = p;
2302 	u32 addr_lo, addr_hi;
2303 	int ret;
2304 
2305 	if (netif_running(netdev))
2306 		return -EBUSY;
2307 
2308 	if (!is_valid_ether_addr(addr->sa_data))
2309 		return -EADDRNOTAVAIL;
2310 
2311 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
2312 
2313 	addr_lo = netdev->dev_addr[0] |
2314 		  netdev->dev_addr[1] << 8 |
2315 		  netdev->dev_addr[2] << 16 |
2316 		  netdev->dev_addr[3] << 24;
2317 	addr_hi = netdev->dev_addr[4] |
2318 		  netdev->dev_addr[5] << 8;
2319 
2320 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2321 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2322 
2323 	/* Added to support MAC address changes */
2324 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2325 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2326 
2327 	return 0;
2328 }
2329 
2330 /* Enable or disable Rx checksum offload engine */
2331 static int lan78xx_set_features(struct net_device *netdev,
2332 				netdev_features_t features)
2333 {
2334 	struct lan78xx_net *dev = netdev_priv(netdev);
2335 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2336 	unsigned long flags;
2337 	int ret;
2338 
2339 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2340 
2341 	if (features & NETIF_F_RXCSUM) {
2342 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2343 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2344 	} else {
2345 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2346 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2347 	}
2348 
2349 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2350 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2351 	else
2352 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2353 
2354 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2355 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2356 	else
2357 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2358 
2359 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2360 
2361 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2362 
2363 	return 0;
2364 }
2365 
2366 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2367 {
2368 	struct lan78xx_priv *pdata =
2369 			container_of(param, struct lan78xx_priv, set_vlan);
2370 	struct lan78xx_net *dev = pdata->dev;
2371 
2372 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2373 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2374 }
2375 
2376 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2377 				   __be16 proto, u16 vid)
2378 {
2379 	struct lan78xx_net *dev = netdev_priv(netdev);
2380 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2381 	u16 vid_bit_index;
2382 	u16 vid_dword_index;
2383 
2384 	vid_dword_index = (vid >> 5) & 0x7F;
2385 	vid_bit_index = vid & 0x1F;
2386 
2387 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2388 
2389 	/* defer register writes to a sleepable context */
2390 	schedule_work(&pdata->set_vlan);
2391 
2392 	return 0;
2393 }
2394 
2395 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2396 				    __be16 proto, u16 vid)
2397 {
2398 	struct lan78xx_net *dev = netdev_priv(netdev);
2399 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2400 	u16 vid_bit_index;
2401 	u16 vid_dword_index;
2402 
2403 	vid_dword_index = (vid >> 5) & 0x7F;
2404 	vid_bit_index = vid & 0x1F;
2405 
2406 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2407 
2408 	/* defer register writes to a sleepable context */
2409 	schedule_work(&pdata->set_vlan);
2410 
2411 	return 0;
2412 }
2413 
2414 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2415 {
2416 	int ret;
2417 	u32 buf;
2418 	u32 regs[6] = { 0 };
2419 
2420 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2421 	if (buf & USB_CFG1_LTM_ENABLE_) {
2422 		u8 temp[2];
2423 		/* Get values from EEPROM first */
2424 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2425 			if (temp[0] == 24) {
2426 				ret = lan78xx_read_raw_eeprom(dev,
2427 							      temp[1] * 2,
2428 							      24,
2429 							      (u8 *)regs);
2430 				if (ret < 0)
2431 					return;
2432 			}
2433 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2434 			if (temp[0] == 24) {
2435 				ret = lan78xx_read_raw_otp(dev,
2436 							   temp[1] * 2,
2437 							   24,
2438 							   (u8 *)regs);
2439 				if (ret < 0)
2440 					return;
2441 			}
2442 		}
2443 	}
2444 
2445 	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2446 	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2447 	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2448 	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2449 	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2450 	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2451 }
2452 
2453 static int lan78xx_reset(struct lan78xx_net *dev)
2454 {
2455 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2456 	u32 buf;
2457 	int ret = 0;
2458 	unsigned long timeout;
2459 	u8 sig;
2460 
2461 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2462 	buf |= HW_CFG_LRST_;
2463 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2464 
2465 	timeout = jiffies + HZ;
2466 	do {
2467 		mdelay(1);
2468 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2469 		if (time_after(jiffies, timeout)) {
2470 			netdev_warn(dev->net,
2471 				    "timeout on completion of LiteReset");
2472 			return -EIO;
2473 		}
2474 	} while (buf & HW_CFG_LRST_);
2475 
2476 	lan78xx_init_mac_address(dev);
2477 
2478 	/* save DEVID for later usage */
2479 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
2480 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2481 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2482 
2483 	/* Respond to the IN token with a NAK */
2484 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2485 	buf |= USB_CFG_BIR_;
2486 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2487 
2488 	/* Init LTM */
2489 	lan78xx_init_ltm(dev);
2490 
2491 	if (dev->udev->speed == USB_SPEED_SUPER) {
2492 		buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2493 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2494 		dev->rx_qlen = 4;
2495 		dev->tx_qlen = 4;
2496 	} else if (dev->udev->speed == USB_SPEED_HIGH) {
2497 		buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2498 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2499 		dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2500 		dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2501 	} else {
2502 		buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2503 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2504 		dev->rx_qlen = 4;
2505 		dev->tx_qlen = 4;
2506 	}
2507 
2508 	ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2509 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2510 
2511 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2512 	buf |= HW_CFG_MEF_;
2513 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2514 
2515 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2516 	buf |= USB_CFG_BCE_;
2517 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2518 
2519 	/* set FIFO sizes */
2520 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2521 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2522 
2523 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2524 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2525 
2526 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2527 	ret = lan78xx_write_reg(dev, FLOW, 0);
2528 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2529 
2530 	/* Don't need rfe_ctl_lock during initialisation */
2531 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2532 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2533 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2534 
2535 	/* Enable or disable checksum offload engines */
2536 	lan78xx_set_features(dev->net, dev->net->features);
2537 
2538 	lan78xx_set_multicast(dev->net);
2539 
2540 	/* reset PHY */
2541 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2542 	buf |= PMT_CTL_PHY_RST_;
2543 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2544 
2545 	timeout = jiffies + HZ;
2546 	do {
2547 		mdelay(1);
2548 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2549 		if (time_after(jiffies, timeout)) {
2550 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
2551 			return -EIO;
2552 		}
2553 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2554 
2555 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2556 	/* LAN7801 only has RGMII mode */
2557 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
2558 		buf &= ~MAC_CR_GMII_EN_;
2559 
2560 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2561 		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2562 		if (!ret && sig != EEPROM_INDICATOR) {
2563 			/* Implies there is no external eeprom. Set mac speed */
2564 			netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2565 			buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2566 		}
2567 	}
2568 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
2569 
2570 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2571 	buf |= MAC_TX_TXEN_;
2572 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
2573 
2574 	ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2575 	buf |= FCT_TX_CTL_EN_;
2576 	ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2577 
2578 	ret = lan78xx_set_rx_max_frame_length(dev,
2579 					      dev->net->mtu + VLAN_ETH_HLEN);
2580 
2581 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2582 	buf |= MAC_RX_RXEN_;
2583 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2584 
2585 	ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2586 	buf |= FCT_RX_CTL_EN_;
2587 	ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2588 
2589 	return 0;
2590 }
2591 
2592 static void lan78xx_init_stats(struct lan78xx_net *dev)
2593 {
2594 	u32 *p;
2595 	int i;
2596 
2597 	/* initialize for stats update
2598 	 * some counters are 20bits and some are 32bits
2599 	 */
2600 	p = (u32 *)&dev->stats.rollover_max;
2601 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2602 		p[i] = 0xFFFFF;
2603 
2604 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2605 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2606 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2607 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2608 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2609 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2610 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2611 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2612 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2613 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2614 
2615 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
2616 }
2617 
2618 static int lan78xx_open(struct net_device *net)
2619 {
2620 	struct lan78xx_net *dev = netdev_priv(net);
2621 	int ret;
2622 
2623 	ret = usb_autopm_get_interface(dev->intf);
2624 	if (ret < 0)
2625 		goto out;
2626 
2627 	phy_start(net->phydev);
2628 
2629 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2630 
2631 	/* for Link Check */
2632 	if (dev->urb_intr) {
2633 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2634 		if (ret < 0) {
2635 			netif_err(dev, ifup, dev->net,
2636 				  "intr submit %d\n", ret);
2637 			goto done;
2638 		}
2639 	}
2640 
2641 	lan78xx_init_stats(dev);
2642 
2643 	set_bit(EVENT_DEV_OPEN, &dev->flags);
2644 
2645 	netif_start_queue(net);
2646 
2647 	dev->link_on = false;
2648 
2649 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2650 done:
2651 	usb_autopm_put_interface(dev->intf);
2652 
2653 out:
2654 	return ret;
2655 }
2656 
2657 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2658 {
2659 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2660 	DECLARE_WAITQUEUE(wait, current);
2661 	int temp;
2662 
2663 	/* ensure there are no more active urbs */
2664 	add_wait_queue(&unlink_wakeup, &wait);
2665 	set_current_state(TASK_UNINTERRUPTIBLE);
2666 	dev->wait = &unlink_wakeup;
2667 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2668 
2669 	/* maybe wait for deletions to finish. */
2670 	while (!skb_queue_empty(&dev->rxq) &&
2671 	       !skb_queue_empty(&dev->txq) &&
2672 	       !skb_queue_empty(&dev->done)) {
2673 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2674 		set_current_state(TASK_UNINTERRUPTIBLE);
2675 		netif_dbg(dev, ifdown, dev->net,
2676 			  "waited for %d urb completions\n", temp);
2677 	}
2678 	set_current_state(TASK_RUNNING);
2679 	dev->wait = NULL;
2680 	remove_wait_queue(&unlink_wakeup, &wait);
2681 }
2682 
2683 static int lan78xx_stop(struct net_device *net)
2684 {
2685 	struct lan78xx_net *dev = netdev_priv(net);
2686 
2687 	if (timer_pending(&dev->stat_monitor))
2688 		del_timer_sync(&dev->stat_monitor);
2689 
2690 	if (net->phydev)
2691 		phy_stop(net->phydev);
2692 
2693 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
2694 	netif_stop_queue(net);
2695 
2696 	netif_info(dev, ifdown, dev->net,
2697 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2698 		   net->stats.rx_packets, net->stats.tx_packets,
2699 		   net->stats.rx_errors, net->stats.tx_errors);
2700 
2701 	lan78xx_terminate_urbs(dev);
2702 
2703 	usb_kill_urb(dev->urb_intr);
2704 
2705 	skb_queue_purge(&dev->rxq_pause);
2706 
2707 	/* deferred work (task, timer, softirq) must also stop.
2708 	 * can't flush_scheduled_work() until we drop rtnl (later),
2709 	 * else workers could deadlock; so make workers a NOP.
2710 	 */
2711 	dev->flags = 0;
2712 	cancel_delayed_work_sync(&dev->wq);
2713 	tasklet_kill(&dev->bh);
2714 
2715 	usb_autopm_put_interface(dev->intf);
2716 
2717 	return 0;
2718 }
2719 
2720 static int lan78xx_linearize(struct sk_buff *skb)
2721 {
2722 	return skb_linearize(skb);
2723 }
2724 
2725 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2726 				       struct sk_buff *skb, gfp_t flags)
2727 {
2728 	u32 tx_cmd_a, tx_cmd_b;
2729 
2730 	if (skb_cow_head(skb, TX_OVERHEAD)) {
2731 		dev_kfree_skb_any(skb);
2732 		return NULL;
2733 	}
2734 
2735 	if (lan78xx_linearize(skb) < 0)
2736 		return NULL;
2737 
2738 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2739 
2740 	if (skb->ip_summed == CHECKSUM_PARTIAL)
2741 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2742 
2743 	tx_cmd_b = 0;
2744 	if (skb_is_gso(skb)) {
2745 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2746 
2747 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2748 
2749 		tx_cmd_a |= TX_CMD_A_LSO_;
2750 	}
2751 
2752 	if (skb_vlan_tag_present(skb)) {
2753 		tx_cmd_a |= TX_CMD_A_IVTG_;
2754 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2755 	}
2756 
2757 	skb_push(skb, 4);
2758 	cpu_to_le32s(&tx_cmd_b);
2759 	memcpy(skb->data, &tx_cmd_b, 4);
2760 
2761 	skb_push(skb, 4);
2762 	cpu_to_le32s(&tx_cmd_a);
2763 	memcpy(skb->data, &tx_cmd_a, 4);
2764 
2765 	return skb;
2766 }
2767 
2768 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2769 			       struct sk_buff_head *list, enum skb_state state)
2770 {
2771 	unsigned long flags;
2772 	enum skb_state old_state;
2773 	struct skb_data *entry = (struct skb_data *)skb->cb;
2774 
2775 	spin_lock_irqsave(&list->lock, flags);
2776 	old_state = entry->state;
2777 	entry->state = state;
2778 
2779 	__skb_unlink(skb, list);
2780 	spin_unlock(&list->lock);
2781 	spin_lock(&dev->done.lock);
2782 
2783 	__skb_queue_tail(&dev->done, skb);
2784 	if (skb_queue_len(&dev->done) == 1)
2785 		tasklet_schedule(&dev->bh);
2786 	spin_unlock_irqrestore(&dev->done.lock, flags);
2787 
2788 	return old_state;
2789 }
2790 
2791 static void tx_complete(struct urb *urb)
2792 {
2793 	struct sk_buff *skb = (struct sk_buff *)urb->context;
2794 	struct skb_data *entry = (struct skb_data *)skb->cb;
2795 	struct lan78xx_net *dev = entry->dev;
2796 
2797 	if (urb->status == 0) {
2798 		dev->net->stats.tx_packets += entry->num_of_packet;
2799 		dev->net->stats.tx_bytes += entry->length;
2800 	} else {
2801 		dev->net->stats.tx_errors++;
2802 
2803 		switch (urb->status) {
2804 		case -EPIPE:
2805 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2806 			break;
2807 
2808 		/* software-driven interface shutdown */
2809 		case -ECONNRESET:
2810 		case -ESHUTDOWN:
2811 			break;
2812 
2813 		case -EPROTO:
2814 		case -ETIME:
2815 		case -EILSEQ:
2816 			netif_stop_queue(dev->net);
2817 			break;
2818 		default:
2819 			netif_dbg(dev, tx_err, dev->net,
2820 				  "tx err %d\n", entry->urb->status);
2821 			break;
2822 		}
2823 	}
2824 
2825 	usb_autopm_put_interface_async(dev->intf);
2826 
2827 	defer_bh(dev, skb, &dev->txq, tx_done);
2828 }
2829 
2830 static void lan78xx_queue_skb(struct sk_buff_head *list,
2831 			      struct sk_buff *newsk, enum skb_state state)
2832 {
2833 	struct skb_data *entry = (struct skb_data *)newsk->cb;
2834 
2835 	__skb_queue_tail(list, newsk);
2836 	entry->state = state;
2837 }
2838 
2839 static netdev_tx_t
2840 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2841 {
2842 	struct lan78xx_net *dev = netdev_priv(net);
2843 	struct sk_buff *skb2 = NULL;
2844 
2845 	if (skb) {
2846 		skb_tx_timestamp(skb);
2847 		skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2848 	}
2849 
2850 	if (skb2) {
2851 		skb_queue_tail(&dev->txq_pend, skb2);
2852 
2853 		/* throttle TX patch at slower than SUPER SPEED USB */
2854 		if ((dev->udev->speed < USB_SPEED_SUPER) &&
2855 		    (skb_queue_len(&dev->txq_pend) > 10))
2856 			netif_stop_queue(net);
2857 	} else {
2858 		netif_dbg(dev, tx_err, dev->net,
2859 			  "lan78xx_tx_prep return NULL\n");
2860 		dev->net->stats.tx_errors++;
2861 		dev->net->stats.tx_dropped++;
2862 	}
2863 
2864 	tasklet_schedule(&dev->bh);
2865 
2866 	return NETDEV_TX_OK;
2867 }
2868 
2869 static int
2870 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2871 {
2872 	int tmp;
2873 	struct usb_host_interface *alt = NULL;
2874 	struct usb_host_endpoint *in = NULL, *out = NULL;
2875 	struct usb_host_endpoint *status = NULL;
2876 
2877 	for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2878 		unsigned ep;
2879 
2880 		in = NULL;
2881 		out = NULL;
2882 		status = NULL;
2883 		alt = intf->altsetting + tmp;
2884 
2885 		for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2886 			struct usb_host_endpoint *e;
2887 			int intr = 0;
2888 
2889 			e = alt->endpoint + ep;
2890 			switch (e->desc.bmAttributes) {
2891 			case USB_ENDPOINT_XFER_INT:
2892 				if (!usb_endpoint_dir_in(&e->desc))
2893 					continue;
2894 				intr = 1;
2895 				/* FALLTHROUGH */
2896 			case USB_ENDPOINT_XFER_BULK:
2897 				break;
2898 			default:
2899 				continue;
2900 			}
2901 			if (usb_endpoint_dir_in(&e->desc)) {
2902 				if (!intr && !in)
2903 					in = e;
2904 				else if (intr && !status)
2905 					status = e;
2906 			} else {
2907 				if (!out)
2908 					out = e;
2909 			}
2910 		}
2911 		if (in && out)
2912 			break;
2913 	}
2914 	if (!alt || !in || !out)
2915 		return -EINVAL;
2916 
2917 	dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2918 				       in->desc.bEndpointAddress &
2919 				       USB_ENDPOINT_NUMBER_MASK);
2920 	dev->pipe_out = usb_sndbulkpipe(dev->udev,
2921 					out->desc.bEndpointAddress &
2922 					USB_ENDPOINT_NUMBER_MASK);
2923 	dev->ep_intr = status;
2924 
2925 	return 0;
2926 }
2927 
2928 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2929 {
2930 	struct lan78xx_priv *pdata = NULL;
2931 	int ret;
2932 	int i;
2933 
2934 	ret = lan78xx_get_endpoints(dev, intf);
2935 	if (ret) {
2936 		netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n",
2937 			    ret);
2938 		return ret;
2939 	}
2940 
2941 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2942 
2943 	pdata = (struct lan78xx_priv *)(dev->data[0]);
2944 	if (!pdata) {
2945 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2946 		return -ENOMEM;
2947 	}
2948 
2949 	pdata->dev = dev;
2950 
2951 	spin_lock_init(&pdata->rfe_ctl_lock);
2952 	mutex_init(&pdata->dataport_mutex);
2953 
2954 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2955 
2956 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2957 		pdata->vlan_table[i] = 0;
2958 
2959 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2960 
2961 	dev->net->features = 0;
2962 
2963 	if (DEFAULT_TX_CSUM_ENABLE)
2964 		dev->net->features |= NETIF_F_HW_CSUM;
2965 
2966 	if (DEFAULT_RX_CSUM_ENABLE)
2967 		dev->net->features |= NETIF_F_RXCSUM;
2968 
2969 	if (DEFAULT_TSO_CSUM_ENABLE)
2970 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2971 
2972 	if (DEFAULT_VLAN_RX_OFFLOAD)
2973 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
2974 
2975 	if (DEFAULT_VLAN_FILTER_ENABLE)
2976 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2977 
2978 	dev->net->hw_features = dev->net->features;
2979 
2980 	ret = lan78xx_setup_irq_domain(dev);
2981 	if (ret < 0) {
2982 		netdev_warn(dev->net,
2983 			    "lan78xx_setup_irq_domain() failed : %d", ret);
2984 		goto out1;
2985 	}
2986 
2987 	dev->net->hard_header_len += TX_OVERHEAD;
2988 	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2989 
2990 	/* Init all registers */
2991 	ret = lan78xx_reset(dev);
2992 	if (ret) {
2993 		netdev_warn(dev->net, "Registers INIT FAILED....");
2994 		goto out2;
2995 	}
2996 
2997 	ret = lan78xx_mdio_init(dev);
2998 	if (ret) {
2999 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
3000 		goto out2;
3001 	}
3002 
3003 	dev->net->flags |= IFF_MULTICAST;
3004 
3005 	pdata->wol = WAKE_MAGIC;
3006 
3007 	return ret;
3008 
3009 out2:
3010 	lan78xx_remove_irq_domain(dev);
3011 
3012 out1:
3013 	netdev_warn(dev->net, "Bind routine FAILED");
3014 	cancel_work_sync(&pdata->set_multicast);
3015 	cancel_work_sync(&pdata->set_vlan);
3016 	kfree(pdata);
3017 	return ret;
3018 }
3019 
3020 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3021 {
3022 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3023 
3024 	lan78xx_remove_irq_domain(dev);
3025 
3026 	lan78xx_remove_mdio(dev);
3027 
3028 	if (pdata) {
3029 		cancel_work_sync(&pdata->set_multicast);
3030 		cancel_work_sync(&pdata->set_vlan);
3031 		netif_dbg(dev, ifdown, dev->net, "free pdata");
3032 		kfree(pdata);
3033 		pdata = NULL;
3034 		dev->data[0] = 0;
3035 	}
3036 }
3037 
3038 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3039 				    struct sk_buff *skb,
3040 				    u32 rx_cmd_a, u32 rx_cmd_b)
3041 {
3042 	/* HW Checksum offload appears to be flawed if used when not stripping
3043 	 * VLAN headers. Drop back to S/W checksums under these conditions.
3044 	 */
3045 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
3046 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3047 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3048 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3049 		skb->ip_summed = CHECKSUM_NONE;
3050 	} else {
3051 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3052 		skb->ip_summed = CHECKSUM_COMPLETE;
3053 	}
3054 }
3055 
3056 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3057 				    struct sk_buff *skb,
3058 				    u32 rx_cmd_a, u32 rx_cmd_b)
3059 {
3060 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3061 	    (rx_cmd_a & RX_CMD_A_FVTG_))
3062 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3063 				       (rx_cmd_b & 0xffff));
3064 }
3065 
3066 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3067 {
3068 	int status;
3069 
3070 	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
3071 		skb_queue_tail(&dev->rxq_pause, skb);
3072 		return;
3073 	}
3074 
3075 	dev->net->stats.rx_packets++;
3076 	dev->net->stats.rx_bytes += skb->len;
3077 
3078 	skb->protocol = eth_type_trans(skb, dev->net);
3079 
3080 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3081 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3082 	memset(skb->cb, 0, sizeof(struct skb_data));
3083 
3084 	if (skb_defer_rx_timestamp(skb))
3085 		return;
3086 
3087 	status = netif_rx(skb);
3088 	if (status != NET_RX_SUCCESS)
3089 		netif_dbg(dev, rx_err, dev->net,
3090 			  "netif_rx status %d\n", status);
3091 }
3092 
3093 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3094 {
3095 	if (skb->len < dev->net->hard_header_len)
3096 		return 0;
3097 
3098 	while (skb->len > 0) {
3099 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3100 		u16 rx_cmd_c;
3101 		struct sk_buff *skb2;
3102 		unsigned char *packet;
3103 
3104 		memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
3105 		le32_to_cpus(&rx_cmd_a);
3106 		skb_pull(skb, sizeof(rx_cmd_a));
3107 
3108 		memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
3109 		le32_to_cpus(&rx_cmd_b);
3110 		skb_pull(skb, sizeof(rx_cmd_b));
3111 
3112 		memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
3113 		le16_to_cpus(&rx_cmd_c);
3114 		skb_pull(skb, sizeof(rx_cmd_c));
3115 
3116 		packet = skb->data;
3117 
3118 		/* get the packet length */
3119 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3120 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3121 
3122 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3123 			netif_dbg(dev, rx_err, dev->net,
3124 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3125 		} else {
3126 			/* last frame in this batch */
3127 			if (skb->len == size) {
3128 				lan78xx_rx_csum_offload(dev, skb,
3129 							rx_cmd_a, rx_cmd_b);
3130 				lan78xx_rx_vlan_offload(dev, skb,
3131 							rx_cmd_a, rx_cmd_b);
3132 
3133 				skb_trim(skb, skb->len - 4); /* remove fcs */
3134 				skb->truesize = size + sizeof(struct sk_buff);
3135 
3136 				return 1;
3137 			}
3138 
3139 			skb2 = skb_clone(skb, GFP_ATOMIC);
3140 			if (unlikely(!skb2)) {
3141 				netdev_warn(dev->net, "Error allocating skb");
3142 				return 0;
3143 			}
3144 
3145 			skb2->len = size;
3146 			skb2->data = packet;
3147 			skb_set_tail_pointer(skb2, size);
3148 
3149 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3150 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3151 
3152 			skb_trim(skb2, skb2->len - 4); /* remove fcs */
3153 			skb2->truesize = size + sizeof(struct sk_buff);
3154 
3155 			lan78xx_skb_return(dev, skb2);
3156 		}
3157 
3158 		skb_pull(skb, size);
3159 
3160 		/* padding bytes before the next frame starts */
3161 		if (skb->len)
3162 			skb_pull(skb, align_count);
3163 	}
3164 
3165 	return 1;
3166 }
3167 
3168 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3169 {
3170 	if (!lan78xx_rx(dev, skb)) {
3171 		dev->net->stats.rx_errors++;
3172 		goto done;
3173 	}
3174 
3175 	if (skb->len) {
3176 		lan78xx_skb_return(dev, skb);
3177 		return;
3178 	}
3179 
3180 	netif_dbg(dev, rx_err, dev->net, "drop\n");
3181 	dev->net->stats.rx_errors++;
3182 done:
3183 	skb_queue_tail(&dev->done, skb);
3184 }
3185 
3186 static void rx_complete(struct urb *urb);
3187 
3188 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3189 {
3190 	struct sk_buff *skb;
3191 	struct skb_data *entry;
3192 	unsigned long lockflags;
3193 	size_t size = dev->rx_urb_size;
3194 	int ret = 0;
3195 
3196 	skb = netdev_alloc_skb_ip_align(dev->net, size);
3197 	if (!skb) {
3198 		usb_free_urb(urb);
3199 		return -ENOMEM;
3200 	}
3201 
3202 	entry = (struct skb_data *)skb->cb;
3203 	entry->urb = urb;
3204 	entry->dev = dev;
3205 	entry->length = 0;
3206 
3207 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3208 			  skb->data, size, rx_complete, skb);
3209 
3210 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
3211 
3212 	if (netif_device_present(dev->net) &&
3213 	    netif_running(dev->net) &&
3214 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3215 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3216 		ret = usb_submit_urb(urb, GFP_ATOMIC);
3217 		switch (ret) {
3218 		case 0:
3219 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3220 			break;
3221 		case -EPIPE:
3222 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3223 			break;
3224 		case -ENODEV:
3225 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
3226 			netif_device_detach(dev->net);
3227 			break;
3228 		case -EHOSTUNREACH:
3229 			ret = -ENOLINK;
3230 			break;
3231 		default:
3232 			netif_dbg(dev, rx_err, dev->net,
3233 				  "rx submit, %d\n", ret);
3234 			tasklet_schedule(&dev->bh);
3235 		}
3236 	} else {
3237 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3238 		ret = -ENOLINK;
3239 	}
3240 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3241 	if (ret) {
3242 		dev_kfree_skb_any(skb);
3243 		usb_free_urb(urb);
3244 	}
3245 	return ret;
3246 }
3247 
3248 static void rx_complete(struct urb *urb)
3249 {
3250 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3251 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3252 	struct lan78xx_net *dev = entry->dev;
3253 	int urb_status = urb->status;
3254 	enum skb_state state;
3255 
3256 	skb_put(skb, urb->actual_length);
3257 	state = rx_done;
3258 	entry->urb = NULL;
3259 
3260 	switch (urb_status) {
3261 	case 0:
3262 		if (skb->len < dev->net->hard_header_len) {
3263 			state = rx_cleanup;
3264 			dev->net->stats.rx_errors++;
3265 			dev->net->stats.rx_length_errors++;
3266 			netif_dbg(dev, rx_err, dev->net,
3267 				  "rx length %d\n", skb->len);
3268 		}
3269 		usb_mark_last_busy(dev->udev);
3270 		break;
3271 	case -EPIPE:
3272 		dev->net->stats.rx_errors++;
3273 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3274 		/* FALLTHROUGH */
3275 	case -ECONNRESET:				/* async unlink */
3276 	case -ESHUTDOWN:				/* hardware gone */
3277 		netif_dbg(dev, ifdown, dev->net,
3278 			  "rx shutdown, code %d\n", urb_status);
3279 		state = rx_cleanup;
3280 		entry->urb = urb;
3281 		urb = NULL;
3282 		break;
3283 	case -EPROTO:
3284 	case -ETIME:
3285 	case -EILSEQ:
3286 		dev->net->stats.rx_errors++;
3287 		state = rx_cleanup;
3288 		entry->urb = urb;
3289 		urb = NULL;
3290 		break;
3291 
3292 	/* data overrun ... flush fifo? */
3293 	case -EOVERFLOW:
3294 		dev->net->stats.rx_over_errors++;
3295 		/* FALLTHROUGH */
3296 
3297 	default:
3298 		state = rx_cleanup;
3299 		dev->net->stats.rx_errors++;
3300 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3301 		break;
3302 	}
3303 
3304 	state = defer_bh(dev, skb, &dev->rxq, state);
3305 
3306 	if (urb) {
3307 		if (netif_running(dev->net) &&
3308 		    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3309 		    state != unlink_start) {
3310 			rx_submit(dev, urb, GFP_ATOMIC);
3311 			return;
3312 		}
3313 		usb_free_urb(urb);
3314 	}
3315 	netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3316 }
3317 
3318 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3319 {
3320 	int length;
3321 	struct urb *urb = NULL;
3322 	struct skb_data *entry;
3323 	unsigned long flags;
3324 	struct sk_buff_head *tqp = &dev->txq_pend;
3325 	struct sk_buff *skb, *skb2;
3326 	int ret;
3327 	int count, pos;
3328 	int skb_totallen, pkt_cnt;
3329 
3330 	skb_totallen = 0;
3331 	pkt_cnt = 0;
3332 	count = 0;
3333 	length = 0;
3334 	spin_lock_irqsave(&tqp->lock, flags);
3335 	skb_queue_walk(tqp, skb) {
3336 		if (skb_is_gso(skb)) {
3337 			if (!skb_queue_is_first(tqp, skb)) {
3338 				/* handle previous packets first */
3339 				break;
3340 			}
3341 			count = 1;
3342 			length = skb->len - TX_OVERHEAD;
3343 			__skb_unlink(skb, tqp);
3344 			spin_unlock_irqrestore(&tqp->lock, flags);
3345 			goto gso_skb;
3346 		}
3347 
3348 		if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3349 			break;
3350 		skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3351 		pkt_cnt++;
3352 	}
3353 	spin_unlock_irqrestore(&tqp->lock, flags);
3354 
3355 	/* copy to a single skb */
3356 	skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3357 	if (!skb)
3358 		goto drop;
3359 
3360 	skb_put(skb, skb_totallen);
3361 
3362 	for (count = pos = 0; count < pkt_cnt; count++) {
3363 		skb2 = skb_dequeue(tqp);
3364 		if (skb2) {
3365 			length += (skb2->len - TX_OVERHEAD);
3366 			memcpy(skb->data + pos, skb2->data, skb2->len);
3367 			pos += roundup(skb2->len, sizeof(u32));
3368 			dev_kfree_skb(skb2);
3369 		}
3370 	}
3371 
3372 gso_skb:
3373 	urb = usb_alloc_urb(0, GFP_ATOMIC);
3374 	if (!urb)
3375 		goto drop;
3376 
3377 	entry = (struct skb_data *)skb->cb;
3378 	entry->urb = urb;
3379 	entry->dev = dev;
3380 	entry->length = length;
3381 	entry->num_of_packet = count;
3382 
3383 	spin_lock_irqsave(&dev->txq.lock, flags);
3384 	ret = usb_autopm_get_interface_async(dev->intf);
3385 	if (ret < 0) {
3386 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3387 		goto drop;
3388 	}
3389 
3390 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3391 			  skb->data, skb->len, tx_complete, skb);
3392 
3393 	if (length % dev->maxpacket == 0) {
3394 		/* send USB_ZERO_PACKET */
3395 		urb->transfer_flags |= URB_ZERO_PACKET;
3396 	}
3397 
3398 #ifdef CONFIG_PM
3399 	/* if this triggers the device is still a sleep */
3400 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3401 		/* transmission will be done in resume */
3402 		usb_anchor_urb(urb, &dev->deferred);
3403 		/* no use to process more packets */
3404 		netif_stop_queue(dev->net);
3405 		usb_put_urb(urb);
3406 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3407 		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3408 		return;
3409 	}
3410 #endif
3411 
3412 	ret = usb_submit_urb(urb, GFP_ATOMIC);
3413 	switch (ret) {
3414 	case 0:
3415 		netif_trans_update(dev->net);
3416 		lan78xx_queue_skb(&dev->txq, skb, tx_start);
3417 		if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3418 			netif_stop_queue(dev->net);
3419 		break;
3420 	case -EPIPE:
3421 		netif_stop_queue(dev->net);
3422 		lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3423 		usb_autopm_put_interface_async(dev->intf);
3424 		break;
3425 	default:
3426 		usb_autopm_put_interface_async(dev->intf);
3427 		netif_dbg(dev, tx_err, dev->net,
3428 			  "tx: submit urb err %d\n", ret);
3429 		break;
3430 	}
3431 
3432 	spin_unlock_irqrestore(&dev->txq.lock, flags);
3433 
3434 	if (ret) {
3435 		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3436 drop:
3437 		dev->net->stats.tx_dropped++;
3438 		if (skb)
3439 			dev_kfree_skb_any(skb);
3440 		usb_free_urb(urb);
3441 	} else
3442 		netif_dbg(dev, tx_queued, dev->net,
3443 			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
3444 }
3445 
3446 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3447 {
3448 	struct urb *urb;
3449 	int i;
3450 
3451 	if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3452 		for (i = 0; i < 10; i++) {
3453 			if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3454 				break;
3455 			urb = usb_alloc_urb(0, GFP_ATOMIC);
3456 			if (urb)
3457 				if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3458 					return;
3459 		}
3460 
3461 		if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3462 			tasklet_schedule(&dev->bh);
3463 	}
3464 	if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3465 		netif_wake_queue(dev->net);
3466 }
3467 
3468 static void lan78xx_bh(unsigned long param)
3469 {
3470 	struct lan78xx_net *dev = (struct lan78xx_net *)param;
3471 	struct sk_buff *skb;
3472 	struct skb_data *entry;
3473 
3474 	while ((skb = skb_dequeue(&dev->done))) {
3475 		entry = (struct skb_data *)(skb->cb);
3476 		switch (entry->state) {
3477 		case rx_done:
3478 			entry->state = rx_cleanup;
3479 			rx_process(dev, skb);
3480 			continue;
3481 		case tx_done:
3482 			usb_free_urb(entry->urb);
3483 			dev_kfree_skb(skb);
3484 			continue;
3485 		case rx_cleanup:
3486 			usb_free_urb(entry->urb);
3487 			dev_kfree_skb(skb);
3488 			continue;
3489 		default:
3490 			netdev_dbg(dev->net, "skb state %d\n", entry->state);
3491 			return;
3492 		}
3493 	}
3494 
3495 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
3496 		/* reset update timer delta */
3497 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3498 			dev->delta = 1;
3499 			mod_timer(&dev->stat_monitor,
3500 				  jiffies + STAT_UPDATE_TIMER);
3501 		}
3502 
3503 		if (!skb_queue_empty(&dev->txq_pend))
3504 			lan78xx_tx_bh(dev);
3505 
3506 		if (!timer_pending(&dev->delay) &&
3507 		    !test_bit(EVENT_RX_HALT, &dev->flags))
3508 			lan78xx_rx_bh(dev);
3509 	}
3510 }
3511 
3512 static void lan78xx_delayedwork(struct work_struct *work)
3513 {
3514 	int status;
3515 	struct lan78xx_net *dev;
3516 
3517 	dev = container_of(work, struct lan78xx_net, wq.work);
3518 
3519 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3520 		unlink_urbs(dev, &dev->txq);
3521 		status = usb_autopm_get_interface(dev->intf);
3522 		if (status < 0)
3523 			goto fail_pipe;
3524 		status = usb_clear_halt(dev->udev, dev->pipe_out);
3525 		usb_autopm_put_interface(dev->intf);
3526 		if (status < 0 &&
3527 		    status != -EPIPE &&
3528 		    status != -ESHUTDOWN) {
3529 			if (netif_msg_tx_err(dev))
3530 fail_pipe:
3531 				netdev_err(dev->net,
3532 					   "can't clear tx halt, status %d\n",
3533 					   status);
3534 		} else {
3535 			clear_bit(EVENT_TX_HALT, &dev->flags);
3536 			if (status != -ESHUTDOWN)
3537 				netif_wake_queue(dev->net);
3538 		}
3539 	}
3540 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3541 		unlink_urbs(dev, &dev->rxq);
3542 		status = usb_autopm_get_interface(dev->intf);
3543 		if (status < 0)
3544 				goto fail_halt;
3545 		status = usb_clear_halt(dev->udev, dev->pipe_in);
3546 		usb_autopm_put_interface(dev->intf);
3547 		if (status < 0 &&
3548 		    status != -EPIPE &&
3549 		    status != -ESHUTDOWN) {
3550 			if (netif_msg_rx_err(dev))
3551 fail_halt:
3552 				netdev_err(dev->net,
3553 					   "can't clear rx halt, status %d\n",
3554 					   status);
3555 		} else {
3556 			clear_bit(EVENT_RX_HALT, &dev->flags);
3557 			tasklet_schedule(&dev->bh);
3558 		}
3559 	}
3560 
3561 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3562 		int ret = 0;
3563 
3564 		clear_bit(EVENT_LINK_RESET, &dev->flags);
3565 		status = usb_autopm_get_interface(dev->intf);
3566 		if (status < 0)
3567 			goto skip_reset;
3568 		if (lan78xx_link_reset(dev) < 0) {
3569 			usb_autopm_put_interface(dev->intf);
3570 skip_reset:
3571 			netdev_info(dev->net, "link reset failed (%d)\n",
3572 				    ret);
3573 		} else {
3574 			usb_autopm_put_interface(dev->intf);
3575 		}
3576 	}
3577 
3578 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3579 		lan78xx_update_stats(dev);
3580 
3581 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3582 
3583 		mod_timer(&dev->stat_monitor,
3584 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
3585 
3586 		dev->delta = min((dev->delta * 2), 50);
3587 	}
3588 }
3589 
3590 static void intr_complete(struct urb *urb)
3591 {
3592 	struct lan78xx_net *dev = urb->context;
3593 	int status = urb->status;
3594 
3595 	switch (status) {
3596 	/* success */
3597 	case 0:
3598 		lan78xx_status(dev, urb);
3599 		break;
3600 
3601 	/* software-driven interface shutdown */
3602 	case -ENOENT:			/* urb killed */
3603 	case -ESHUTDOWN:		/* hardware gone */
3604 		netif_dbg(dev, ifdown, dev->net,
3605 			  "intr shutdown, code %d\n", status);
3606 		return;
3607 
3608 	/* NOTE:  not throttling like RX/TX, since this endpoint
3609 	 * already polls infrequently
3610 	 */
3611 	default:
3612 		netdev_dbg(dev->net, "intr status %d\n", status);
3613 		break;
3614 	}
3615 
3616 	if (!netif_running(dev->net))
3617 		return;
3618 
3619 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3620 	status = usb_submit_urb(urb, GFP_ATOMIC);
3621 	if (status != 0)
3622 		netif_err(dev, timer, dev->net,
3623 			  "intr resubmit --> %d\n", status);
3624 }
3625 
3626 static void lan78xx_disconnect(struct usb_interface *intf)
3627 {
3628 	struct lan78xx_net *dev;
3629 	struct usb_device *udev;
3630 	struct net_device *net;
3631 	struct phy_device *phydev;
3632 
3633 	dev = usb_get_intfdata(intf);
3634 	usb_set_intfdata(intf, NULL);
3635 	if (!dev)
3636 		return;
3637 
3638 	udev = interface_to_usbdev(intf);
3639 	net = dev->net;
3640 	phydev = net->phydev;
3641 
3642 	phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3643 	phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3644 
3645 	phy_disconnect(net->phydev);
3646 
3647 	if (phy_is_pseudo_fixed_link(phydev))
3648 		fixed_phy_unregister(phydev);
3649 
3650 	unregister_netdev(net);
3651 
3652 	cancel_delayed_work_sync(&dev->wq);
3653 
3654 	usb_scuttle_anchored_urbs(&dev->deferred);
3655 
3656 	lan78xx_unbind(dev, intf);
3657 
3658 	usb_kill_urb(dev->urb_intr);
3659 	usb_free_urb(dev->urb_intr);
3660 
3661 	free_netdev(net);
3662 	usb_put_dev(udev);
3663 }
3664 
3665 static void lan78xx_tx_timeout(struct net_device *net)
3666 {
3667 	struct lan78xx_net *dev = netdev_priv(net);
3668 
3669 	unlink_urbs(dev, &dev->txq);
3670 	tasklet_schedule(&dev->bh);
3671 }
3672 
3673 static const struct net_device_ops lan78xx_netdev_ops = {
3674 	.ndo_open		= lan78xx_open,
3675 	.ndo_stop		= lan78xx_stop,
3676 	.ndo_start_xmit		= lan78xx_start_xmit,
3677 	.ndo_tx_timeout		= lan78xx_tx_timeout,
3678 	.ndo_change_mtu		= lan78xx_change_mtu,
3679 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
3680 	.ndo_validate_addr	= eth_validate_addr,
3681 	.ndo_do_ioctl		= lan78xx_ioctl,
3682 	.ndo_set_rx_mode	= lan78xx_set_multicast,
3683 	.ndo_set_features	= lan78xx_set_features,
3684 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
3685 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
3686 };
3687 
3688 static void lan78xx_stat_monitor(struct timer_list *t)
3689 {
3690 	struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3691 
3692 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3693 }
3694 
3695 static int lan78xx_probe(struct usb_interface *intf,
3696 			 const struct usb_device_id *id)
3697 {
3698 	struct lan78xx_net *dev;
3699 	struct net_device *netdev;
3700 	struct usb_device *udev;
3701 	int ret;
3702 	unsigned maxp;
3703 	unsigned period;
3704 	u8 *buf = NULL;
3705 
3706 	udev = interface_to_usbdev(intf);
3707 	udev = usb_get_dev(udev);
3708 
3709 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3710 	if (!netdev) {
3711 		dev_err(&intf->dev, "Error: OOM\n");
3712 		ret = -ENOMEM;
3713 		goto out1;
3714 	}
3715 
3716 	/* netdev_printk() needs this */
3717 	SET_NETDEV_DEV(netdev, &intf->dev);
3718 
3719 	dev = netdev_priv(netdev);
3720 	dev->udev = udev;
3721 	dev->intf = intf;
3722 	dev->net = netdev;
3723 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3724 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
3725 
3726 	skb_queue_head_init(&dev->rxq);
3727 	skb_queue_head_init(&dev->txq);
3728 	skb_queue_head_init(&dev->done);
3729 	skb_queue_head_init(&dev->rxq_pause);
3730 	skb_queue_head_init(&dev->txq_pend);
3731 	mutex_init(&dev->phy_mutex);
3732 
3733 	tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3734 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3735 	init_usb_anchor(&dev->deferred);
3736 
3737 	netdev->netdev_ops = &lan78xx_netdev_ops;
3738 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3739 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
3740 
3741 	dev->delta = 1;
3742 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3743 
3744 	mutex_init(&dev->stats.access_lock);
3745 
3746 	ret = lan78xx_bind(dev, intf);
3747 	if (ret < 0)
3748 		goto out2;
3749 
3750 	if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3751 		netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3752 
3753 	/* MTU range: 68 - 9000 */
3754 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3755 
3756 	dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3757 	dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3758 	dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3759 
3760 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3761 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3762 
3763 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
3764 					dev->ep_intr->desc.bEndpointAddress &
3765 					USB_ENDPOINT_NUMBER_MASK);
3766 	period = dev->ep_intr->desc.bInterval;
3767 
3768 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3769 	buf = kmalloc(maxp, GFP_KERNEL);
3770 	if (buf) {
3771 		dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3772 		if (!dev->urb_intr) {
3773 			ret = -ENOMEM;
3774 			kfree(buf);
3775 			goto out3;
3776 		} else {
3777 			usb_fill_int_urb(dev->urb_intr, dev->udev,
3778 					 dev->pipe_intr, buf, maxp,
3779 					 intr_complete, dev, period);
3780 		}
3781 	}
3782 
3783 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3784 
3785 	/* driver requires remote-wakeup capability during autosuspend. */
3786 	intf->needs_remote_wakeup = 1;
3787 
3788 	ret = register_netdev(netdev);
3789 	if (ret != 0) {
3790 		netif_err(dev, probe, netdev, "couldn't register the device\n");
3791 		goto out3;
3792 	}
3793 
3794 	usb_set_intfdata(intf, dev);
3795 
3796 	ret = device_set_wakeup_enable(&udev->dev, true);
3797 
3798 	 /* Default delay of 2sec has more overhead than advantage.
3799 	  * Set to 10sec as default.
3800 	  */
3801 	pm_runtime_set_autosuspend_delay(&udev->dev,
3802 					 DEFAULT_AUTOSUSPEND_DELAY);
3803 
3804 	ret = lan78xx_phy_init(dev);
3805 	if (ret < 0)
3806 		goto out4;
3807 
3808 	return 0;
3809 
3810 out4:
3811 	unregister_netdev(netdev);
3812 out3:
3813 	lan78xx_unbind(dev, intf);
3814 out2:
3815 	free_netdev(netdev);
3816 out1:
3817 	usb_put_dev(udev);
3818 
3819 	return ret;
3820 }
3821 
3822 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3823 {
3824 	const u16 crc16poly = 0x8005;
3825 	int i;
3826 	u16 bit, crc, msb;
3827 	u8 data;
3828 
3829 	crc = 0xFFFF;
3830 	for (i = 0; i < len; i++) {
3831 		data = *buf++;
3832 		for (bit = 0; bit < 8; bit++) {
3833 			msb = crc >> 15;
3834 			crc <<= 1;
3835 
3836 			if (msb ^ (u16)(data & 1)) {
3837 				crc ^= crc16poly;
3838 				crc |= (u16)0x0001U;
3839 			}
3840 			data >>= 1;
3841 		}
3842 	}
3843 
3844 	return crc;
3845 }
3846 
3847 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3848 {
3849 	u32 buf;
3850 	int ret;
3851 	int mask_index;
3852 	u16 crc;
3853 	u32 temp_wucsr;
3854 	u32 temp_pmt_ctl;
3855 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3856 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3857 	const u8 arp_type[2] = { 0x08, 0x06 };
3858 
3859 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3860 	buf &= ~MAC_TX_TXEN_;
3861 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
3862 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3863 	buf &= ~MAC_RX_RXEN_;
3864 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3865 
3866 	ret = lan78xx_write_reg(dev, WUCSR, 0);
3867 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
3868 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3869 
3870 	temp_wucsr = 0;
3871 
3872 	temp_pmt_ctl = 0;
3873 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3874 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3875 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3876 
3877 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3878 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3879 
3880 	mask_index = 0;
3881 	if (wol & WAKE_PHY) {
3882 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3883 
3884 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3885 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3886 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3887 	}
3888 	if (wol & WAKE_MAGIC) {
3889 		temp_wucsr |= WUCSR_MPEN_;
3890 
3891 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3892 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3893 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3894 	}
3895 	if (wol & WAKE_BCAST) {
3896 		temp_wucsr |= WUCSR_BCST_EN_;
3897 
3898 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3899 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3900 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3901 	}
3902 	if (wol & WAKE_MCAST) {
3903 		temp_wucsr |= WUCSR_WAKE_EN_;
3904 
3905 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3906 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3907 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3908 					WUF_CFGX_EN_ |
3909 					WUF_CFGX_TYPE_MCAST_ |
3910 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3911 					(crc & WUF_CFGX_CRC16_MASK_));
3912 
3913 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3914 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3915 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3916 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3917 		mask_index++;
3918 
3919 		/* for IPv6 Multicast */
3920 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3921 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3922 					WUF_CFGX_EN_ |
3923 					WUF_CFGX_TYPE_MCAST_ |
3924 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3925 					(crc & WUF_CFGX_CRC16_MASK_));
3926 
3927 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3928 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3929 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3930 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3931 		mask_index++;
3932 
3933 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3934 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3935 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3936 	}
3937 	if (wol & WAKE_UCAST) {
3938 		temp_wucsr |= WUCSR_PFDA_EN_;
3939 
3940 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3941 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3942 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3943 	}
3944 	if (wol & WAKE_ARP) {
3945 		temp_wucsr |= WUCSR_WAKE_EN_;
3946 
3947 		/* set WUF_CFG & WUF_MASK
3948 		 * for packettype (offset 12,13) = ARP (0x0806)
3949 		 */
3950 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
3951 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3952 					WUF_CFGX_EN_ |
3953 					WUF_CFGX_TYPE_ALL_ |
3954 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3955 					(crc & WUF_CFGX_CRC16_MASK_));
3956 
3957 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3958 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3959 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3960 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3961 		mask_index++;
3962 
3963 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3964 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3965 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3966 	}
3967 
3968 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3969 
3970 	/* when multiple WOL bits are set */
3971 	if (hweight_long((unsigned long)wol) > 1) {
3972 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3973 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3974 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3975 	}
3976 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3977 
3978 	/* clear WUPS */
3979 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3980 	buf |= PMT_CTL_WUPS_MASK_;
3981 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3982 
3983 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3984 	buf |= MAC_RX_RXEN_;
3985 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3986 
3987 	return 0;
3988 }
3989 
3990 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3991 {
3992 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3993 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3994 	u32 buf;
3995 	int ret;
3996 	int event;
3997 
3998 	event = message.event;
3999 
4000 	if (!dev->suspend_count++) {
4001 		spin_lock_irq(&dev->txq.lock);
4002 		/* don't autosuspend while transmitting */
4003 		if ((skb_queue_len(&dev->txq) ||
4004 		     skb_queue_len(&dev->txq_pend)) &&
4005 			PMSG_IS_AUTO(message)) {
4006 			spin_unlock_irq(&dev->txq.lock);
4007 			ret = -EBUSY;
4008 			goto out;
4009 		} else {
4010 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4011 			spin_unlock_irq(&dev->txq.lock);
4012 		}
4013 
4014 		/* stop TX & RX */
4015 		ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4016 		buf &= ~MAC_TX_TXEN_;
4017 		ret = lan78xx_write_reg(dev, MAC_TX, buf);
4018 		ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4019 		buf &= ~MAC_RX_RXEN_;
4020 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
4021 
4022 		/* empty out the rx and queues */
4023 		netif_device_detach(dev->net);
4024 		lan78xx_terminate_urbs(dev);
4025 		usb_kill_urb(dev->urb_intr);
4026 
4027 		/* reattach */
4028 		netif_device_attach(dev->net);
4029 	}
4030 
4031 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4032 		del_timer(&dev->stat_monitor);
4033 
4034 		if (PMSG_IS_AUTO(message)) {
4035 			/* auto suspend (selective suspend) */
4036 			ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4037 			buf &= ~MAC_TX_TXEN_;
4038 			ret = lan78xx_write_reg(dev, MAC_TX, buf);
4039 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4040 			buf &= ~MAC_RX_RXEN_;
4041 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
4042 
4043 			ret = lan78xx_write_reg(dev, WUCSR, 0);
4044 			ret = lan78xx_write_reg(dev, WUCSR2, 0);
4045 			ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4046 
4047 			/* set goodframe wakeup */
4048 			ret = lan78xx_read_reg(dev, WUCSR, &buf);
4049 
4050 			buf |= WUCSR_RFE_WAKE_EN_;
4051 			buf |= WUCSR_STORE_WAKE_;
4052 
4053 			ret = lan78xx_write_reg(dev, WUCSR, buf);
4054 
4055 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4056 
4057 			buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4058 			buf |= PMT_CTL_RES_CLR_WKP_STS_;
4059 
4060 			buf |= PMT_CTL_PHY_WAKE_EN_;
4061 			buf |= PMT_CTL_WOL_EN_;
4062 			buf &= ~PMT_CTL_SUS_MODE_MASK_;
4063 			buf |= PMT_CTL_SUS_MODE_3_;
4064 
4065 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4066 
4067 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4068 
4069 			buf |= PMT_CTL_WUPS_MASK_;
4070 
4071 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4072 
4073 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4074 			buf |= MAC_RX_RXEN_;
4075 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
4076 		} else {
4077 			lan78xx_set_suspend(dev, pdata->wol);
4078 		}
4079 	}
4080 
4081 	ret = 0;
4082 out:
4083 	return ret;
4084 }
4085 
4086 static int lan78xx_resume(struct usb_interface *intf)
4087 {
4088 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4089 	struct sk_buff *skb;
4090 	struct urb *res;
4091 	int ret;
4092 	u32 buf;
4093 
4094 	if (!timer_pending(&dev->stat_monitor)) {
4095 		dev->delta = 1;
4096 		mod_timer(&dev->stat_monitor,
4097 			  jiffies + STAT_UPDATE_TIMER);
4098 	}
4099 
4100 	if (!--dev->suspend_count) {
4101 		/* resume interrupt URBs */
4102 		if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
4103 				usb_submit_urb(dev->urb_intr, GFP_NOIO);
4104 
4105 		spin_lock_irq(&dev->txq.lock);
4106 		while ((res = usb_get_from_anchor(&dev->deferred))) {
4107 			skb = (struct sk_buff *)res->context;
4108 			ret = usb_submit_urb(res, GFP_ATOMIC);
4109 			if (ret < 0) {
4110 				dev_kfree_skb_any(skb);
4111 				usb_free_urb(res);
4112 				usb_autopm_put_interface_async(dev->intf);
4113 			} else {
4114 				netif_trans_update(dev->net);
4115 				lan78xx_queue_skb(&dev->txq, skb, tx_start);
4116 			}
4117 		}
4118 
4119 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4120 		spin_unlock_irq(&dev->txq.lock);
4121 
4122 		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
4123 			if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
4124 				netif_start_queue(dev->net);
4125 			tasklet_schedule(&dev->bh);
4126 		}
4127 	}
4128 
4129 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4130 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4131 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4132 
4133 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4134 					     WUCSR2_ARP_RCD_ |
4135 					     WUCSR2_IPV6_TCPSYN_RCD_ |
4136 					     WUCSR2_IPV4_TCPSYN_RCD_);
4137 
4138 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4139 					    WUCSR_EEE_RX_WAKE_ |
4140 					    WUCSR_PFDA_FR_ |
4141 					    WUCSR_RFE_WAKE_FR_ |
4142 					    WUCSR_WUFR_ |
4143 					    WUCSR_MPR_ |
4144 					    WUCSR_BCST_FR_);
4145 
4146 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4147 	buf |= MAC_TX_TXEN_;
4148 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
4149 
4150 	return 0;
4151 }
4152 
4153 static int lan78xx_reset_resume(struct usb_interface *intf)
4154 {
4155 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4156 
4157 	lan78xx_reset(dev);
4158 
4159 	phy_start(dev->net->phydev);
4160 
4161 	return lan78xx_resume(intf);
4162 }
4163 
4164 static const struct usb_device_id products[] = {
4165 	{
4166 	/* LAN7800 USB Gigabit Ethernet Device */
4167 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4168 	},
4169 	{
4170 	/* LAN7850 USB Gigabit Ethernet Device */
4171 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4172 	},
4173 	{
4174 	/* LAN7801 USB Gigabit Ethernet Device */
4175 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4176 	},
4177 	{},
4178 };
4179 MODULE_DEVICE_TABLE(usb, products);
4180 
4181 static struct usb_driver lan78xx_driver = {
4182 	.name			= DRIVER_NAME,
4183 	.id_table		= products,
4184 	.probe			= lan78xx_probe,
4185 	.disconnect		= lan78xx_disconnect,
4186 	.suspend		= lan78xx_suspend,
4187 	.resume			= lan78xx_resume,
4188 	.reset_resume		= lan78xx_reset_resume,
4189 	.supports_autosuspend	= 1,
4190 	.disable_hub_initiated_lpm = 1,
4191 };
4192 
4193 module_usb_driver(lan78xx_driver);
4194 
4195 MODULE_AUTHOR(DRIVER_AUTHOR);
4196 MODULE_DESCRIPTION(DRIVER_DESC);
4197 MODULE_LICENSE("GPL");
4198